diff --git a/.all-contributorsrc b/.all-contributorsrc index 35f26df90327..21353af60c47 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -1057,6 +1057,24 @@ "contributions": [ "code" ] + }, + { + "login": "jencymaryjoseph", + "name": "Jency Joseph", + "avatar_url": "https://avatars.githubusercontent.com/u/35571282?v=4", + "profile": "https://github.com/jencymaryjoseph", + "contributions": [ + "code" + ] + }, + { + "login": "reifiedbeans", + "name": "Drew Davis", + "avatar_url": "https://avatars.githubusercontent.com/u/9686215?v=4", + "profile": "https://github.com/reifiedbeans", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, diff --git a/.changes/2.31.40.json b/.changes/2.31.40.json new file mode 100644 index 000000000000..a2e10f51333c --- /dev/null +++ b/.changes/2.31.40.json @@ -0,0 +1,36 @@ +{ + "version": "2.31.40", + "date": "2025-05-09", + "entries": [ + { + "type": "feature", + "category": "Amazon Athena", + "contributor": "", + "description": "Minor API documentation updates" + }, + { + "type": "feature", + "category": "Amazon CloudWatch Logs", + "contributor": "", + "description": "We are pleased to announce limit increases to our grok processor logs transformation feature. Now you can define 20 Grok patterns in their configurations, with an expanded total pattern matching limit of 512 characters." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "contributor": "", + "description": "Remove parameter EnableWorkDocs from WorkSpacesServiceModel due to end of support of Amazon WorkDocs service." + }, + { + "type": "feature", + "category": "Synthetics", + "contributor": "", + "description": "Add support to retry a canary automatically after schedule run failures. Users can enable this feature by configuring the RetryConfig field when calling the CreateCanary or UpdateCanary API. Also includes changes in GetCanary and GetCanaryRuns to support retrieving retry configurations." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.41.json b/.changes/2.31.41.json new file mode 100644 index 000000000000..730e4a664c41 --- /dev/null +++ b/.changes/2.31.41.json @@ -0,0 +1,66 @@ +{ + "version": "2.31.41", + "date": "2025-05-12", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add `@Mutable` and `@NotThreadSafe` to model Builders" + }, + { + "type": "feature", + "category": "AWSDeadlineCloud", + "contributor": "", + "description": "AWS Deadline Cloud service-managed fleets now support configuration scripts. Configuration scripts make it easy to install additional software, like plugins and packages, onto a worker." + }, + { + "type": "feature", + "category": "AWS Elemental MediaLive", + "contributor": "", + "description": "Add support to the AV1 rate control mode" + }, + { + "type": "feature", + "category": "AWS Identity and Access Management", + "contributor": "", + "description": "Updating the endpoint list for the Identity and access management (IAM) service" + }, + { + "type": "feature", + "category": "AWS MediaTailor", + "contributor": "", + "description": "Documenting that EnabledLoggingStrategies is always present in responses of PlaybackConfiguration read operations." + }, + { + "type": "feature", + "category": "AWS S3 Control", + "contributor": "", + "description": "Updates to support S3 Express zonal endpoints for directory buckets in AWS CLI" + }, + { + "type": "feature", + "category": "AWS Supply Chain", + "contributor": "", + "description": "Launch new AWS Supply Chain public APIs for DataIntegrationEvent, DataIntegrationFlowExecution and DatasetNamespace. Also add more capabilities to existing public APIs to support direct dataset event publish, data deduplication in DataIntegrationFlow, partition specification of custom datasets." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "EC2 - Adding support for AvailabilityZoneId" + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "No API changes from previous release. This release migrated the model to Smithy keeping all features unchanged." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.42.json b/.changes/2.31.42.json new file mode 100644 index 000000000000..d80760740fca --- /dev/null +++ b/.changes/2.31.42.json @@ -0,0 +1,42 @@ +{ + "version": "2.31.42", + "date": "2025-05-13", + "entries": [ + { + "type": "feature", + "category": "AWS Control Tower", + "contributor": "", + "description": "AWS Control Tower now reports the inheritance drift status for EnabledBaselines through the GetEnabledBaseline and ListEnabledBaselines APIs. You can now filter EnabledBaselines by their enablement and drift status using the ListEnabledBaselines API to view accounts and OUs that require attention." + }, + { + "type": "feature", + "category": "AWS License Manager", + "contributor": "", + "description": "Add Tagging feature to resources in the Managed Entitlements service. License and Grant resources can now be tagged." + }, + { + "type": "feature", + "category": "Agents for Amazon Bedrock Runtime", + "contributor": "", + "description": "Changes for enhanced metadata in trace" + }, + { + "type": "feature", + "category": "Amazon Aurora DSQL", + "contributor": "", + "description": "CreateMultiRegionClusters and DeleteMultiRegionClusters APIs marked as deprecated. Introduced new multi-Region clusters creation experience through multiRegionProperties parameter in CreateCluster API." + }, + { + "type": "feature", + "category": "Amazon Bedrock", + "contributor": "", + "description": "Enable cross-Region inference for Amazon Bedrock Guardrails by using the crossRegionConfig parameter when calling the CreateGuardrail or UpdateGuardrail operation." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This release extends functionality for Amazon EBS volumes attached to Amazon ECS tasks by adding support for the new EBS volumeInitializationRate parameter in ECS RunTask/StartTask/CreateService/UpdateService APIs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.43.json b/.changes/2.31.43.json new file mode 100644 index 000000000000..79c512337aad --- /dev/null +++ b/.changes/2.31.43.json @@ -0,0 +1,30 @@ +{ + "version": "2.31.43", + "date": "2025-05-14", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This update enables cropping for video overlays and adds a new STL to Teletext upconversion toggle to preserve styling." + }, + { + "type": "feature", + "category": "Amazon CloudWatch Logs", + "contributor": "", + "description": "This release adds a new API \"ListLogGroups\" and an improvement in API \"DescribeLogGroups\"" + }, + { + "type": "feature", + "category": "Amazon Cognito Identity Provider", + "contributor": "", + "description": "Add exceptions to WebAuthn operations." + }, + { + "type": "feature", + "category": "Amazon Kinesis Firehose", + "contributor": "", + "description": "This release adds catalogARN support for s3 tables multi-catalog catalogARNs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.44.json b/.changes/2.31.44.json new file mode 100644 index 000000000000..ae8bd0a04f12 --- /dev/null +++ b/.changes/2.31.44.json @@ -0,0 +1,48 @@ +{ + "version": "2.31.44", + "date": "2025-05-15", + "entries": [ + { + "type": "feature", + "category": "AWS CodeBuild", + "contributor": "", + "description": "AWS CodeBuild now supports Docker Server capability" + }, + { + "type": "feature", + "category": "AWS Control Tower", + "contributor": "", + "description": "Updated the descriptions for the AWS Control Tower Baseline APIs to make them more intuitive." + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "Introduces Data Resync feature to describe-table-statistics and IAM database authentication for MariaDB, MySQL, and PostgreSQL." + }, + { + "type": "feature", + "category": "AWS Parallel Computing Service", + "contributor": "", + "description": "This release adds support for Slurm accounting. For more information, see the Slurm accounting topic in the AWS PCS User Guide. Slurm accounting is supported for Slurm 24.11 and later. This release also adds 24.11 as a valid value for the version parameter of the Scheduler data type." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Small optimization for endpoint rules. Lazily compile the region pattern instead of parsing it every time. This will pay the penalty of parsing it just once at the cost of using a bit more of memory to keep the parsed pattern." + }, + { + "type": "feature", + "category": "Agents for Amazon Bedrock", + "contributor": "", + "description": "Amazon Bedrock Flows introduces DoWhile loops nodes, parallel node executions, and enhancements to knowledge base nodes." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "contributor": "", + "description": "Added the new AlwaysOn running mode for WorkSpaces Pools. Customers can now choose between AlwaysOn (for instant access, with hourly usage billing regardless of connection status), or AutoStop (to optimize cost, with a brief startup delay) for their pools." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.45.json b/.changes/2.31.45.json new file mode 100644 index 000000000000..e50da33569f9 --- /dev/null +++ b/.changes/2.31.45.json @@ -0,0 +1,66 @@ +{ + "version": "2.31.45", + "date": "2025-05-16", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fix a regression for the JSON REST protocol for which an structure explicit payload member was set to the empty object instead of null" + }, + { + "type": "feature", + "category": "AWS CodePipeline", + "contributor": "", + "description": "CodePipeline now supports new API ListDeployActionExecutionTargets that lists the deployment target details for deploy action executions." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Changes include (1) Excel as S3 Source type and XML and Tableau's Hyper as S3 Sink types, (2) targeted number of partitions parameter in S3 sinks and (3) new compression types in CSV/JSON and Parquet S3 sinks." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This is an Amazon ECs documentation only release to support the change of the container exit \"reason\" field from 255 characters to 1024 characters." + }, + { + "type": "feature", + "category": "Amazon EMR", + "contributor": "", + "description": "Added APIs for managing Application UIs: Access Persistent (serverless) UIs via CreatePersistentAppUI DescribePersistentAppUI & GetPersistentAppUIPresignedURL, and Cluster-based UIs through GetOnClusterAppUIPresignedURL. Supports Yarn, Spark History, and TEZ interfaces." + }, + { + "type": "feature", + "category": "Amazon Neptune", + "contributor": "", + "description": "This release adds Global Cluster Switchover capability which enables you to change your global cluster's primary AWS Region, the region that serves writes, while preserving the replication between all regions in the global cluster." + }, + { + "type": "feature", + "category": "Data Automation for Amazon Bedrock", + "contributor": "", + "description": "Add support for VIDEO modality to BlueprintType enum." + }, + { + "type": "feature", + "category": "Runtime for Amazon Bedrock Data Automation", + "contributor": "", + "description": "Add AssetProcessingConfiguration for video segment to InputConfiguration" + }, + { + "type": "feature", + "category": "Service Quotas", + "contributor": "", + "description": "This release introduces CreateSupportCase operation to SDK." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.46.json b/.changes/2.31.46.json new file mode 100644 index 000000000000..ee1c6a4e5234 --- /dev/null +++ b/.changes/2.31.46.json @@ -0,0 +1,30 @@ +{ + "version": "2.31.46", + "date": "2025-05-19", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaPackage v2", + "contributor": "", + "description": "This release adds support for DVB-DASH, EBU-TT-D subtitle format, and non-compacted manifests for DASH in MediaPackage v2 Origin Endpoints." + }, + { + "type": "feature", + "category": "Amazon Aurora DSQL", + "contributor": "", + "description": "CreateMultiRegionCluster and DeleteMultiRegionCluster APIs removed" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release includes new APIs for System Integrity Protection (SIP) configuration and automated root volume ownership delegation for EC2 Mac instances." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.47.json b/.changes/2.31.47.json new file mode 100644 index 000000000000..8594a937b1f1 --- /dev/null +++ b/.changes/2.31.47.json @@ -0,0 +1,48 @@ +{ + "version": "2.31.47", + "date": "2025-05-20", + "entries": [ + { + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "Remove Discovery APIs from the DataSync service" + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "Enhanced AWS Glue ListConnectionTypes API Model with additional metadata fields." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release expands the ModifyInstanceMaintenanceOptions API to enable or disable instance migration during customer-initiated reboots for EC2 Scheduled Reboot Events." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "This release introduces the new DescribeDBMajorEngineVersions API for describing the properties of specific major versions of database engines." + }, + { + "type": "feature", + "category": "CloudWatch Observability Access Manager", + "contributor": "", + "description": "Add IncludeTags field to GetLink, GetSink and UpdateLink API" + }, + { + "type": "feature", + "category": "Inspector2", + "contributor": "", + "description": "This release adds GetClustersForImage API and filter updates as part of the mapping of container images to running containers feature." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.48.json b/.changes/2.31.48.json new file mode 100644 index 000000000000..30eedc8e6013 --- /dev/null +++ b/.changes/2.31.48.json @@ -0,0 +1,42 @@ +{ + "version": "2.31.48", + "date": "2025-05-21", + "entries": [ + { + "type": "feature", + "category": "Agents for Amazon Bedrock Runtime", + "contributor": "", + "description": "Amazon Bedrock introduces asynchronous flows (in preview), which let you run flows for longer durations and yield control so that your application can perform other tasks and you don't have to actively monitor the flow's progress." + }, + { + "type": "feature", + "category": "Amazon CloudWatch", + "contributor": "", + "description": "Adds support for setting up Contributor Insight rules on logs transformed via Logs Transformation feature." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Release of Dualstack and Ipv6-only EC2 Public DNS hostnames" + }, + { + "type": "feature", + "category": "Application Auto Scaling", + "contributor": "", + "description": "Doc only update that addresses a customer reported issue." + }, + { + "type": "feature", + "category": "Partner Central Selling API", + "contributor": "", + "description": "Modified validation to allow expectedCustomerSpend array with zero elements in Partner Opportunity operations." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.49.json b/.changes/2.31.49.json new file mode 100644 index 000000000000..20f0ba284e05 --- /dev/null +++ b/.changes/2.31.49.json @@ -0,0 +1,36 @@ +{ + "version": "2.31.49", + "date": "2025-05-22", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Update non-streaming error unmarshalling to properly unmarshall exceptions to their expected types." + }, + { + "type": "feature", + "category": "AWS Audit Manager", + "contributor": "", + "description": "With this release, the AssessmentControl description field has been deprecated, as of May 19, 2025. Additionally, the UpdateAssessment API can now return a ServiceQuotaExceededException when applicable service quotas are exceeded." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "This release supports additional ConversionSpec parameter as part of IntegrationPartition Structure in CreateIntegrationTableProperty API. This parameter is referred to apply appropriate column transformation for columns that are used for timestamp based partitioning" + }, + { + "type": "feature", + "category": "Amazon Aurora DSQL", + "contributor": "", + "description": "Features: support for customer managed encryption keys" + }, + { + "type": "feature", + "category": "Amazon Prometheus Service", + "contributor": "", + "description": "Add QueryLoggingConfiguration APIs for Amazon Managed Prometheus" + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.50.json b/.changes/2.31.50.json new file mode 100644 index 000000000000..543efe927680 --- /dev/null +++ b/.changes/2.31.50.json @@ -0,0 +1,36 @@ +{ + "version": "2.31.50", + "date": "2025-05-23", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fix CompletableFuture hanging when RetryStrategy/MetricsCollector raise errors" + }, + { + "type": "bugfix", + "category": "Netty NIO HTTP Client", + "contributor": "", + "description": "Enable Netty HTTP header validation when connecting with proxy" + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for the C7i-flex, M7i-flex, I7i, I7ie, I8g, P6-b200, Trn2, C8gd, M8gd and R8gd instances" + }, + { + "type": "feature", + "category": "Security Incident Response", + "contributor": "", + "description": "Update PrincipalId pattern documentation to reflect what user should receive back from the API call" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.51.json b/.changes/2.31.51.json new file mode 100644 index 000000000000..643c131efbcb --- /dev/null +++ b/.changes/2.31.51.json @@ -0,0 +1,24 @@ +{ + "version": "2.31.51", + "date": "2025-05-27", + "entries": [ + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "This release introduces Cost Comparison feature (GetCostAndUsageComparisons, GetCostComparisonDrivers) allowing you find cost variations across multiple dimensions and identify key drivers of spending changes." + }, + { + "type": "feature", + "category": "AWSDeadlineCloud", + "contributor": "", + "description": "AWS Deadline Cloud service-managed fleets now support storage profiles. With storage profiles, you can map file paths between a workstation and the worker hosts running the job." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds three features - option to store AWS Site-to-Site VPN pre-shared keys in AWS Secrets Manager, GetActiveVpnTunnelStatus API to check the in-use VPN algorithms, and SampleType option in GetVpnConnectionDeviceSampleConfiguration API to get recommended sample configs for VPN devices." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.52.json b/.changes/2.31.52.json new file mode 100644 index 000000000000..a2a40faf30d8 --- /dev/null +++ b/.changes/2.31.52.json @@ -0,0 +1,42 @@ +{ + "version": "2.31.52", + "date": "2025-05-28", + "entries": [ + { + "type": "feature", + "category": "AWS Network Firewall", + "contributor": "", + "description": "You can now use VPC endpoint associations to create multiple firewall endpoints for a single firewall." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Enable the option to automatically delete underlying Amazon EBS snapshots when deregistering Amazon Machine Images (AMIs)" + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "contributor": "", + "description": "Allow for more than 2 characters for location codes in EventBridge ARNs" + }, + { + "type": "feature", + "category": "Cost Optimization Hub", + "contributor": "", + "description": "This release allows customers to modify their preferred commitment term and payment options." + }, + { + "type": "feature", + "category": "Synthetics", + "contributor": "", + "description": "Add support to change ephemeral storage. Add a new field \"TestResult\" under CanaryRunStatus." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.53.json b/.changes/2.31.53.json new file mode 100644 index 000000000000..2086c9b4320a --- /dev/null +++ b/.changes/2.31.53.json @@ -0,0 +1,78 @@ +{ + "version": "2.31.53", + "date": "2025-05-29", + "entries": [ + { + "type": "feature", + "category": "AWS Amplify", + "contributor": "", + "description": "Add support for customizable build instance sizes. CreateApp and UpdateApp operations now accept a new JobConfig parameter composed of BuildComputeType." + }, + { + "type": "feature", + "category": "AWS Billing and Cost Management Pricing Calculator", + "contributor": "", + "description": "Add AFTER_DISCOUNTS_AND_COMMITMENTS to Workload Estimate Rate Type. Set ListWorkLoadEstimateUsage maxResults range to minimum of 0 and maximum of 300." + }, + { + "type": "feature", + "category": "AWS CloudTrail", + "contributor": "", + "description": "CloudTrail Feature Release: Support for Enriched Events with Configurable Context for Event Data Store" + }, + { + "type": "feature", + "category": "AWS Data Exchange", + "contributor": "", + "description": "This release adds Tag support for Event Action resource, through which customers can create event actions with Tags and retrieve event actions with Tags." + }, + { + "type": "feature", + "category": "AWS DataSync", + "contributor": "", + "description": "AgentArns field is made optional for Object Storage and Azure Blob location create requests. Location credentials are now managed via Secrets Manager, and may be encrypted with service managed or customer managed keys. Authentication is now optional for Azure Blob locations." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "Amazon Connect Service Feature: Email Recipient Limit Increase" + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "FSx API changes to support the public launch of new Intelligent Tiering storage class on Amazon FSx for Lustre" + }, + { + "type": "feature", + "category": "Amazon Interactive Video Service RealTime", + "contributor": "", + "description": "IVS Real-Time now offers customers the participant replication that allow customers to copy a participant from one stage to another." + }, + { + "type": "feature", + "category": "AmazonMWAA", + "contributor": "", + "description": "Amazon MWAA now lets you choose a worker replacement strategy when updating an environment. This release adds two worker replacement strategies: FORCED (default), which stops workers immediately, and GRACEFUL, which allows workers to finish current tasks before shutting down." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Add maintenance status field to DescribeMlflowTrackingServer API response" + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Adding checksum support for S3 PutBucketOwnershipControls API." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Add support for \"apple\" CpuManufacturer in ABIS" + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.54.json b/.changes/2.31.54.json new file mode 100644 index 000000000000..e2adfa5957b7 --- /dev/null +++ b/.changes/2.31.54.json @@ -0,0 +1,24 @@ +{ + "version": "2.31.54", + "date": "2025-05-30", + "entries": [ + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Release new parameter CapacityReservationConfig in ProductionVariant" + }, + { + "type": "feature", + "category": "EMR Serverless", + "contributor": "", + "description": "This release adds the capability for users to specify an optional Execution IAM policy in the StartJobRun action. The resulting permissions assumed by the job run is the intersection of the permissions in the Execution Role and the specified Execution IAM Policy." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.55.json b/.changes/2.31.55.json new file mode 100644 index 000000000000..6fd8f18cb2cf --- /dev/null +++ b/.changes/2.31.55.json @@ -0,0 +1,78 @@ +{ + "version": "2.31.55", + "date": "2025-06-02", + "entries": [ + { + "type": "feature", + "category": "AWS Backup", + "contributor": "", + "description": "You can now subscribe to Amazon SNS notifications and Amazon EventBridge events for backup indexing. You can now receive notifications when a backup index is created, deleted, or fails to create, enhancing your ability to monitor and track your backup operations." + }, + { + "type": "feature", + "category": "AWS Compute Optimizer", + "contributor": "", + "description": "This release enables AWS Compute Optimizer to analyze Amazon Aurora database clusters and generate Aurora I/O-Optimized recommendations." + }, + { + "type": "feature", + "category": "AWS EntityResolution", + "contributor": "", + "description": "Add support for generating match IDs in near real-time." + }, + { + "type": "feature", + "category": "AWS Parallel Computing Service", + "contributor": "", + "description": "Introduces SUSPENDING and SUSPENDED states for clusters, compute node groups, and queues." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Improve the endpoint rules performance by directly passing the needed params instead of using a POJO to keep track of them." + }, + { + "type": "feature", + "category": "Agents for Amazon Bedrock", + "contributor": "", + "description": "This release adds the Agent Lifecycle Paused State feature to Amazon Bedrock agents. By using an agent's alias, you can temporarily suspend agent operations during maintenance, updates, or other situations." + }, + { + "type": "feature", + "category": "Amazon Athena", + "contributor": "", + "description": "Add support for the managed query result in the workgroup APIs. The managed query result configuration enables users to store query results to Athena owned storage." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "Updates Amazon ECS documentation to include note for upcoming default log driver mode change." + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "contributor": "", + "description": "Add support for filtering ListInsights API calls on MISCONFIGURATION insight category" + }, + { + "type": "feature", + "category": "Cost Optimization Hub", + "contributor": "", + "description": "Support recommendations for Aurora instance and Aurora cluster storage." + }, + { + "type": "feature", + "category": "Synthetics", + "contributor": "", + "description": "Support for Java runtime handler pattern." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.56.json b/.changes/2.31.56.json new file mode 100644 index 000000000000..82589878b17e --- /dev/null +++ b/.changes/2.31.56.json @@ -0,0 +1,36 @@ +{ + "version": "2.31.56", + "date": "2025-06-03", + "entries": [ + { + "type": "bugfix", + "category": "AWS S3 Event Notifications", + "contributor": "reifiedbeans", + "description": "Fixed parsing of S3 event notifications to allow eventTime to be null when eventName is not" + }, + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fix NPE in `ProfileFileSupplier.defaultSupplier` when both credentials and config files do not exist." + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "contributor": "", + "description": "Adds support to set the routing mode for a custom domain name." + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "contributor": "", + "description": "Adds support to create routing rules and set the routing mode for a custom domain name." + }, + { + "type": "feature", + "category": "EMR Serverless", + "contributor": "", + "description": "AWS EMR Serverless: Adds a new option in the CancelJobRun API in EMR 7.9.0+, to cancel a job with grace period. This feature is enabled by default with a 120-second grace period for streaming jobs and is not enabled by default for batch jobs." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.57.json b/.changes/2.31.57.json new file mode 100644 index 000000000000..bb89037b8928 --- /dev/null +++ b/.changes/2.31.57.json @@ -0,0 +1,72 @@ +{ + "version": "2.31.57", + "date": "2025-06-04", + "entries": [ + { + "type": "bugfix", + "category": "S3 Transfer Manager", + "contributor": "jencymaryjoseph", + "description": "DownloadFilter type incompatability methods overriden from extended interface" + }, + { + "type": "feature", + "category": "AWS Amplify", + "contributor": "", + "description": "Update documentation for cacheConfig in CreateApp API" + }, + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release includes support for embedding and signing C2PA content credentials in MP4 outputs." + }, + { + "type": "feature", + "category": "AWS Invoicing", + "contributor": "", + "description": "Added new Invoicing ListInvoiceSummaries API Operation" + }, + { + "type": "feature", + "category": "AWS MediaConnect", + "contributor": "", + "description": "This release updates the DescribeFlow API to show peer IP addresses. You can now identify the peer IP addresses of devices connected to your sources and outputs. This helps you to verify and troubleshoot your flow's active connections." + }, + { + "type": "feature", + "category": "AWS Network Firewall", + "contributor": "", + "description": "You can now monitor flow and alert log metrics from the Network Firewall console." + }, + { + "type": "feature", + "category": "Amazon Elastic VMware Service", + "contributor": "", + "description": "Amazon Elastic VMware Service (Amazon EVS) allows you to run VMware Cloud Foundation (VCF) directly within your Amazon VPC including simplified self-managed migration experience with guided workflow in AWS console or via AWS CLI, get full access to their VCF deployment and VCF license portability." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Added support for p6-b200 instance type in SageMaker Training Jobs and Training Plans." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "contributor": "", + "description": "AWS Healthscribe now supports new templates for the clinical note summary: BIRP, SIRP, DAP, BEHAVIORAL_SOAP, and PHYSICAL_SOAP" + }, + { + "type": "feature", + "category": "Amazon Transcribe Streaming Service", + "contributor": "", + "description": "AWS Healthscribe now supports new templates for the clinical note summary: BIRP, SIRP, DAP, BEHAVIORAL_SOAP, and PHYSICAL_SOAP" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.58.json b/.changes/2.31.58.json new file mode 100644 index 000000000000..cd2c06fabaf6 --- /dev/null +++ b/.changes/2.31.58.json @@ -0,0 +1,48 @@ +{ + "version": "2.31.58", + "date": "2025-06-05", + "entries": [ + { + "type": "bugfix", + "category": "Amazon DynamoDB Enhanced Client", + "contributor": "", + "description": "Fixed DynamoDbEnhancedClient DefaultDynamoDbAsyncTable::createTable() to create secondary indices that are defined on annotations of the POJO class, similar to DefaultDynamoDbTable::createTable()." + }, + { + "type": "feature", + "category": "AWS Billing and Cost Management Pricing Calculator", + "contributor": "", + "description": "Updating the minimum for List APIs to be 1 (instead of 0)" + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "Add new warning type 'EXCLUDED_PROPERTIES'" + }, + { + "type": "feature", + "category": "AWS Key Management Service", + "contributor": "", + "description": "AWS KMS announces the support for on-demand rotation of symmetric-encryption KMS keys with imported key material (EXTERNAL origin)." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Added ability to configure preferred authentication schemes when multiple auth options are available." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "AWS WAF adds support for ASN-based traffic filtering and support for ASN-based rate limiting." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.59.json b/.changes/2.31.59.json new file mode 100644 index 000000000000..8f3cfd0c9ea6 --- /dev/null +++ b/.changes/2.31.59.json @@ -0,0 +1,60 @@ +{ + "version": "2.31.59", + "date": "2025-06-06", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Fix expiration in past warning during profile credential loading." + }, + { + "type": "feature", + "category": "AWS Key Management Service", + "contributor": "", + "description": "Remove unpopulated KeyMaterialId from Encrypt Response" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add support for protocols field in service model" + }, + { + "type": "feature", + "category": "Agents for Amazon Bedrock Runtime", + "contributor": "", + "description": "This release introduces the `PromptCreationConfigurations` input parameter, which includes fields to control prompt population for `InvokeAgent` or `InvokeInlineAgent` requests." + }, + { + "type": "feature", + "category": "Amazon Rekognition", + "contributor": "", + "description": "Adds support for defining an ordered preference list of different Rekognition Face Liveness challenge types when calling CreateFaceLivenessSession." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Include Global Cluster Identifier in DBCluster if the DBCluster is a Global Cluster Member." + }, + { + "type": "feature", + "category": "Amazon Route 53", + "contributor": "", + "description": "Amazon Route 53 now supports the Asia Pacific (Taipei) Region (ap-east-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region." + }, + { + "type": "feature", + "category": "Amazon S3 Tables", + "contributor": "", + "description": "S3 Tables now supports getting details about a table via its table ARN." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.60.json b/.changes/2.31.60.json new file mode 100644 index 000000000000..c8beab43a60d --- /dev/null +++ b/.changes/2.31.60.json @@ -0,0 +1,66 @@ +{ + "version": "2.31.60", + "date": "2025-06-09", + "entries": [ + { + "type": "feature", + "category": "AWS AppSync", + "contributor": "", + "description": "Deprecate `atRestEncryptionEnabled` and `transitEncryptionEnabled` attributes in `CreateApiCache` action. Encryption is always enabled for new caches." + }, + { + "type": "feature", + "category": "AWS Cost Explorer Service", + "contributor": "", + "description": "Support dual-stack endpoints for ce api" + }, + { + "type": "feature", + "category": "AWS Marketplace Catalog Service", + "contributor": "", + "description": "The ListEntities API now supports the EntityID, LastModifiedDate, ProductTitle, and Visibility filters for machine learning products. You can also sort using all of those filters." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Adds support for configuring bearer auth using a token sourced from the environment for services with the `enableEnvironmentBearerToken` customization flag." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated Region class generation to use Partitions.json instead of the Endpoints.json and removed the hardcoded global regions." + }, + { + "type": "feature", + "category": "Amazon Connect Customer Profiles", + "contributor": "", + "description": "This release introduces capability of Profile Explorer, using correct ingestion timestamp & using historical data for computing calculated attributes, and new standard objects for T&H as part of Amazon Connect Customer Profiles service." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Release to support Elastic VMware Service (Amazon EVS) Subnet and Amazon EVS Network Interface Types." + }, + { + "type": "feature", + "category": "Amazon Elastic File System", + "contributor": "", + "description": "Added support for Internet Protocol Version 6 (IPv6) on EFS Service APIs and mount targets." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces Thin Client", + "contributor": "", + "description": "Add ConflictException to UpdateEnvironment API" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.61.json b/.changes/2.31.61.json new file mode 100644 index 000000000000..108562eba48d --- /dev/null +++ b/.changes/2.31.61.json @@ -0,0 +1,18 @@ +{ + "version": "2.31.61", + "date": "2025-06-10", + "entries": [ + { + "type": "feature", + "category": "Amazon GameLift Streams", + "contributor": "", + "description": "Documentation updates for Amazon GameLift Streams to address formatting errors, correct resource ID examples, and update links to other guides" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.62.json b/.changes/2.31.62.json new file mode 100644 index 000000000000..cbcc401665a5 --- /dev/null +++ b/.changes/2.31.62.json @@ -0,0 +1,54 @@ +{ + "version": "2.31.62", + "date": "2025-06-11", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "jencymaryjoseph", + "description": "Deprecated DefaultCredentialsProvider.create() since it creates Singleton instance" + }, + { + "type": "feature", + "category": "AWS Control Catalog", + "contributor": "", + "description": "Introduced ListControlMappings API that retrieves control mappings. Added control aliases and governed resources fields in GetControl and ListControls APIs. New filtering capability in ListControls API, with implementation identifiers and implementation types." + }, + { + "type": "feature", + "category": "AWS Network Manager", + "contributor": "", + "description": "Add support for public DNS hostname resolution to private IP addresses across Cloud WAN-managed VPCs. Add support for security group referencing across Cloud WAN-managed VPCs." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "WAF now provides two DDoS protection options: resource-level monitoring for Application Load Balancers and the AWSManagedRulesAntiDDoSRuleSet managed rule group for CloudFront distributions." + }, + { + "type": "feature", + "category": "Amazon Elastic Kubernetes Service", + "contributor": "", + "description": "Release for EKS Pod Identity Cross Account feature and disableSessionTags flag." + }, + { + "type": "feature", + "category": "Amazon Lex Model Building V2", + "contributor": "", + "description": "Add support for the Assisted NLU feature to improve bot performance" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Updates Amazon RDS documentation for Amazon RDS for Db2 cross-Region replicas in standby mode." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.63.json b/.changes/2.31.63.json new file mode 100644 index 000000000000..89c3626a137a --- /dev/null +++ b/.changes/2.31.63.json @@ -0,0 +1,54 @@ +{ + "version": "2.31.63", + "date": "2025-06-12", + "entries": [ + { + "type": "feature", + "category": "AWS IoT FleetWise", + "contributor": "", + "description": "Add new status READY_FOR_CHECKIN used for vehicle synchronisation" + }, + { + "type": "feature", + "category": "AWS Key Management Service", + "contributor": "", + "description": "AWS KMS announces the support of ML-DSA key pairs that creates post-quantum safe digital signatures." + }, + { + "type": "feature", + "category": "AWS Parallel Computing Service", + "contributor": "", + "description": "Fixed regex patterns for ARN fields." + }, + { + "type": "feature", + "category": "Amazon API Gateway", + "contributor": "", + "description": "Documentation updates for Amazon API Gateway" + }, + { + "type": "feature", + "category": "AmazonApiGatewayV2", + "contributor": "", + "description": "Documentation updates for Amazon API Gateway" + }, + { + "type": "feature", + "category": "AmazonConnectCampaignServiceV2", + "contributor": "", + "description": "Added PutInstanceCommunicationLimits and GetInstanceCommunicationLimits APIs" + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "This Amazon ECS release supports updating the capacityProviderStrategy parameter in update-service." + }, + { + "type": "feature", + "category": "EMR Serverless", + "contributor": "", + "description": "This release adds support for retrieval of the optional executionIamPolicy field in the GetJobRun API response." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.64.json b/.changes/2.31.64.json new file mode 100644 index 000000000000..5413641bccd0 --- /dev/null +++ b/.changes/2.31.64.json @@ -0,0 +1,36 @@ +{ + "version": "2.31.64", + "date": "2025-06-16", + "entries": [ + { + "type": "feature", + "category": "AWS Network Firewall", + "contributor": "", + "description": "You can now create firewalls using a Transit Gateway instead of a VPC, resulting in a TGW attachment." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add tracking of RequestBody/ResponseTransfromer implementations used in UserAgent." + }, + { + "type": "feature", + "category": "Amazon Bedrock", + "contributor": "", + "description": "This release of the SDK has the API and documentation for the createcustommodel API. This feature lets you copy a Amazon SageMaker trained Amazon Nova model into Amazon Bedrock for inference." + }, + { + "type": "feature", + "category": "Amazon Elastic Container Registry", + "contributor": "", + "description": "The `DescribeImageScanning` API now includes `lastInUseAt` and `InUseCount` fields that can be used to prioritize vulnerability remediation for images that are actively being used." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This release 1) adds a new S3DataType Converse for SageMaker training 2)adds C8g R7gd M8g C6in P6 P6e instance type for SageMaker endpoint 3) adds m7i, r7i, c7i instance type for SageMaker Training and Processing." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.65.json b/.changes/2.31.65.json new file mode 100644 index 000000000000..af3f7bda357c --- /dev/null +++ b/.changes/2.31.65.json @@ -0,0 +1,90 @@ +{ + "version": "2.31.65", + "date": "2025-06-17", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java V2", + "contributor": "", + "description": "Fix a bug in ConstructorCache when classes are GC'ed but not removed from cache" + }, + { + "type": "feature", + "category": "AWS Backup", + "contributor": "", + "description": "AWS Backup is adding support for integration of its logically air-gapped vaults with the AWS Organizations Multi-party approval capability." + }, + { + "type": "feature", + "category": "AWS Certificate Manager", + "contributor": "", + "description": "Adds support for Exportable Public Certificates" + }, + { + "type": "feature", + "category": "AWS Database Migration Service", + "contributor": "", + "description": "Add \"Virtual\" field to Data Provider as well as \"S3Path\" and \"S3AccessRoleArn\" fields to DataProvider settings" + }, + { + "type": "feature", + "category": "AWS Multi-party Approval", + "contributor": "", + "description": "This release enables customers to create Multi-party approval teams and approval requests to protect supported operations." + }, + { + "type": "feature", + "category": "AWS Network Firewall", + "contributor": "", + "description": "Release of Active Threat Defense in Network Firewall" + }, + { + "type": "feature", + "category": "AWS Organizations", + "contributor": "", + "description": "Add support for policy operations on the SECURITYHUB_POLICY policy type." + }, + { + "type": "feature", + "category": "AWS SecurityHub", + "contributor": "", + "description": "Adds operations, structures, and exceptions required for public preview release of Security Hub V2." + }, + { + "type": "feature", + "category": "AWS Security Token Service", + "contributor": "", + "description": "The AWS Security Token Service APIs AssumeRoleWithSAML and AssumeRoleWithWebIdentity can now be invoked without pre-configured AWS credentials in the SDK configuration." + }, + { + "type": "feature", + "category": "AWS WAFV2", + "contributor": "", + "description": "AWS WAF can now suggest protection packs for you based on the application information you provide when you create a webACL." + }, + { + "type": "feature", + "category": "Access Analyzer", + "contributor": "", + "description": "We are launching a new analyzer type, internal access analyzer. The new analyzer will generate internal access findings, which help customers understand who within their AWS organization or AWS Account has access to their critical AWS resources." + }, + { + "type": "feature", + "category": "Amazon Bedrock", + "contributor": "", + "description": "This release of the SDK has the API and documentation for the createcustommodel API. This feature lets you copy a trained model into Amazon Bedrock for inference." + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "contributor": "", + "description": "Adding support for extended threat detection for EKS Audit Logs and EKS Runtime Monitoring." + }, + { + "type": "feature", + "category": "Inspector2", + "contributor": "", + "description": "Add Code Repository Scanning as part of AWS InspectorV2" + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.66.json b/.changes/2.31.66.json new file mode 100644 index 000000000000..310ca92c2a4b --- /dev/null +++ b/.changes/2.31.66.json @@ -0,0 +1,54 @@ +{ + "version": "2.31.66", + "date": "2025-06-18", + "entries": [ + { + "type": "feature", + "category": "AWS AI Ops", + "contributor": "", + "description": "This is the initial SDK release for Amazon AI Operations (AIOps). AIOps is a generative AI-powered assistant that helps you respond to incidents in your system by scanning your system's telemetry and quickly surface suggestions that might be related to your issue." + }, + { + "type": "feature", + "category": "AWS S3", + "contributor": "tmccombs", + "description": "Adds the ability to presign HeadObject and HeadBucket requests with the S3 Presigner" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Adding a new method of constructing ARNs without exceptions as control flow" + }, + { + "type": "feature", + "category": "Amazon CloudWatch Logs", + "contributor": "", + "description": "Added CloudWatch Logs Transformer support for converting CloudTrail, VPC Flow, EKS Audit, AWS WAF and Route53 Resolver logs to OCSF v1.1 format." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Add support for p6-b200 instance type for SageMaker Hyperpod" + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Added support for renaming objects within the same bucket using the new RenameObject API." + }, + { + "type": "feature", + "category": "Auto Scaling", + "contributor": "", + "description": "Add IncludeInstances parameter to DescribeAutoScalingGroups API" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.67.json b/.changes/2.31.67.json new file mode 100644 index 000000000000..4e1ccc9fcf25 --- /dev/null +++ b/.changes/2.31.67.json @@ -0,0 +1,42 @@ +{ + "version": "2.31.67", + "date": "2025-06-19", + "entries": [ + { + "type": "feature", + "category": "AWS Lambda", + "contributor": "", + "description": "Support Schema Registry feature for Kafka Event Source Mapping. Customers can now configure a Schema Registry to enable schema validation and filtering for Avro, Protobuf, and JSON-formatted events in Lambda for Kafka Event Source." + }, + { + "type": "feature", + "category": "Amazon Bedrock", + "contributor": "", + "description": "This release of the SDK has the API and documentation for the createcustommodel API. This feature lets you copy a trained model into Amazon Bedrock for inference." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "This release introduces alternative support for utilizing CFN templates from S3 for SageMaker Projects." + }, + { + "type": "feature", + "category": "EMR Serverless", + "contributor": "", + "description": "This release adds the capability to enable IAM IdentityCenter Trusted Identity Propagation for users running Interactive Sessions on EMR Serverless Applications." + }, + { + "type": "feature", + "category": "Payment Cryptography Control Plane", + "contributor": "", + "description": "Additional support for managing HMAC keys that adheres to changes documented in X9.143-2021 and provides better interoperability for key import/export" + }, + { + "type": "feature", + "category": "Payment Cryptography Data Plane", + "contributor": "", + "description": "Additional support for managing HMAC keys that adheres to changes documented in X9.143-2021 and provides better interoperability for key import/export" + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.68.json b/.changes/2.31.68.json new file mode 100644 index 000000000000..a25a216df165 --- /dev/null +++ b/.changes/2.31.68.json @@ -0,0 +1,42 @@ +{ + "version": "2.31.68", + "date": "2025-06-20", + "entries": [ + { + "type": "feature", + "category": "AWS Elemental MediaConvert", + "contributor": "", + "description": "This release adds a new SPECIFIED_OPTIMAL option for handling DDS when using DVB-Sub with high resolution video." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "AWS Glue Data Quality now provides aggregated metrics in evaluation results when publishAggregatedMetrics with row-level results are enabled. These metrics include summary statistics showing total counts of processed, passed, and failed rows and rules in a single view." + }, + { + "type": "feature", + "category": "Amazon Bedrock", + "contributor": "", + "description": "Add support for tiers in Content Filters and Denied Topics for Amazon Bedrock Guardrails." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "Add ECS support for Windows Server 2025" + }, + { + "type": "feature", + "category": "Amazon Location Service Places V2", + "contributor": "", + "description": "Geocode, ReverseGeocode, and GetPlace APIs added Intersections and SecondaryAddresses. To use, add to the AdditionalFeatures list in your request. This provides info about nearby intersections and secondary addresses that are associated with a main address. Also added MainAddress and ParsedQuery." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.69.json b/.changes/2.31.69.json new file mode 100644 index 000000000000..563cd23f9453 --- /dev/null +++ b/.changes/2.31.69.json @@ -0,0 +1,30 @@ +{ + "version": "2.31.69", + "date": "2025-06-23", + "entries": [ + { + "type": "bugfix", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Ignore unknown properties on endpoints in endpoint rules." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "AWS Glue now supports sort and z-order strategy for managed automated compaction for Iceberg tables in addition to binpack." + }, + { + "type": "feature", + "category": "Amazon S3 Tables", + "contributor": "", + "description": "S3 Tables now supports sort and z-order compaction strategies for Iceberg tables in addition to binpack." + }, + { + "type": "feature", + "category": "Amazon Workspaces Instances", + "contributor": "", + "description": "Added support for Amazon WorkSpaces Instances API" + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.70.json b/.changes/2.31.70.json new file mode 100644 index 000000000000..817b8aedf076 --- /dev/null +++ b/.changes/2.31.70.json @@ -0,0 +1,66 @@ +{ + "version": "2.31.70", + "date": "2025-06-24", + "entries": [ + { + "type": "feature", + "category": "AWS AI Ops", + "contributor": "", + "description": "Adds support for cross account investigations for CloudWatch investigations AI Operations (AIOps)." + }, + { + "type": "feature", + "category": "AWS Batch", + "contributor": "", + "description": "Add userdataType to LaunchTemplateSpecification and LaunchTemplateSpecificationOverride." + }, + { + "type": "feature", + "category": "AWS License Manager", + "contributor": "", + "description": "AWS License Manager now supports license type conversions for AWS Marketplace products. Customers can provide Marketplace codes in the source license context or destination license context in the CreateLicenseConversionTaskForResource requests." + }, + { + "type": "feature", + "category": "Amazon Bedrock", + "contributor": "", + "description": "We are making ListFoundationModelAgreementOffers, DeleteFoundationModelAgreement, CreateFoundationModelAgreement, GetFoundationModelAvailability, PutUseCaseForModelAccess and GetUseCaseForModelAccess APIs public, previously they were console." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release allows you to create and register AMIs while maintaining their underlying EBS snapshots within Local Zones." + }, + { + "type": "feature", + "category": "Amazon GameLift", + "contributor": "", + "description": "Add support for UDP ping beacons to ListLocations API, including new PingBeacon and UDPEndpoint data types within its Locations return value. Use UDP ping beacon endpoints to help measure real-time network latency for multiplayer games." + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Adding support for RDS on Dedicated Local Zones, including local backup target, snapshot availability zone and snapshot target" + }, + { + "type": "feature", + "category": "Amazon Route 53 Resolver", + "contributor": "", + "description": "Add support for iterative DNS queries through the new INBOUND_DELEGATION endpoint. Add delegation support through the Outbound Endpoints with DELEGATE rules." + }, + { + "type": "feature", + "category": "Amazon Transcribe Service", + "contributor": "", + "description": "This Feature Adds Support for the \"et-EE\" Locale for Batch Operations" + }, + { + "type": "feature", + "category": "Elastic Load Balancing", + "contributor": "", + "description": "Add Paginator for DescribeAccountLimits, and fix Paginators for DescribeTrustStoreAssociations, DescribeTrustStoreRevocations, and DescribeTrustStores" + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.71.json b/.changes/2.31.71.json new file mode 100644 index 000000000000..edbc2cad5b36 --- /dev/null +++ b/.changes/2.31.71.json @@ -0,0 +1,54 @@ +{ + "version": "2.31.71", + "date": "2025-06-25", + "entries": [ + { + "type": "feature", + "category": "AWS S3 Control", + "contributor": "", + "description": "Add support for the ability to use Amazon S3 Access Points with Amazon FSx for OpenZFS file systems." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Adding constant to each SDK module to represent its version" + }, + { + "type": "feature", + "category": "AWS Storage Gateway", + "contributor": "", + "description": "This release adds IPv6 support to the Storage Gateway APIs. APIs that previously only accept or return IPv4 address will now accept or return both IPv4 and IPv6 addresses." + }, + { + "type": "feature", + "category": "Amazon EC2 Container Service", + "contributor": "", + "description": "Updates for change to Amazon ECS default log driver mode from blocking to non-blocking" + }, + { + "type": "feature", + "category": "Amazon FSx", + "contributor": "", + "description": "Add support for the ability to create Amazon S3 Access Points for Amazon FSx for OpenZFS file systems." + }, + { + "type": "feature", + "category": "Amazon Simple Storage Service", + "contributor": "", + "description": "Adds support for additional server-side encryption mode and storage class values for accessing Amazon FSx data from Amazon S3 using S3 Access Points" + }, + { + "type": "feature", + "category": "Amazon Textract", + "contributor": "", + "description": "Add RotationAngle field to Geometry of WORD blocks for Textract AnalyzeDocument API" + }, + { + "type": "feature", + "category": "Amazon WorkSpaces Thin Client", + "contributor": "", + "description": "Remove Tags field from Get API responses" + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.72.json b/.changes/2.31.72.json new file mode 100644 index 000000000000..d493417e807f --- /dev/null +++ b/.changes/2.31.72.json @@ -0,0 +1,90 @@ +{ + "version": "2.31.72", + "date": "2025-06-26", + "entries": [ + { + "type": "bugfix", + "category": "EmfMetricLoggingPublisher", + "contributor": "", + "description": "Fixed the bug that EmfMetricLoggingPublisher not properly publishing Long type metrics" + }, + { + "type": "feature", + "category": "AWSDeadlineCloud", + "contributor": "", + "description": "Added fields to track cumulative task retry attempts for steps and jobs" + }, + { + "type": "feature", + "category": "AWS Key Management Service", + "contributor": "", + "description": "This release updates AWS CLI examples for KMS APIs." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add code generation validation for missing request URI on an operation." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add support for defining service model validators and generating valdiation reports during code generation." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add support for validating that shared models between two services are identical." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "This release adds support for OdbNetworkArn as a target in VPC Route Tables" + }, + { + "type": "feature", + "category": "Amazon Keyspaces", + "contributor": "", + "description": "This release provides change data capture (CDC) streams support through updates to the Amazon Keyspaces API." + }, + { + "type": "feature", + "category": "Amazon Keyspaces Streams", + "contributor": "", + "description": "This release adds change data capture (CDC) streams support through the new Amazon Keyspaces Streams API." + }, + { + "type": "feature", + "category": "Amazon WorkSpaces", + "contributor": "", + "description": "Updated modifyStreamingProperties to support PrivateLink VPC endpoints for directories" + }, + { + "type": "feature", + "category": "Code Generator Maven Plugin", + "contributor": "", + "description": "Update the generator plugin to support model validation during code generation. In addition, this adds the `writeValidationReport` flag to support writing the validation report to disk." + }, + { + "type": "feature", + "category": "Managed integrations for AWS IoT Device Management", + "contributor": "", + "description": "Adding managed integrations APIs for IoT Device Management to onboard and control devices across different manufacturers, connectivity protocols and third party vendor clouds. APIs include managed thing operations, provisioning profile management, and cloud connector operations." + }, + { + "type": "feature", + "category": "QBusiness", + "contributor": "", + "description": "Added support for App level authentication for QBusiness DataAccessor using AWS IAM Identity center Trusted Token issuer" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.73.json b/.changes/2.31.73.json new file mode 100644 index 000000000000..a102a07f196c --- /dev/null +++ b/.changes/2.31.73.json @@ -0,0 +1,54 @@ +{ + "version": "2.31.73", + "date": "2025-06-27", + "entries": [ + { + "type": "feature", + "category": "AWS Config", + "contributor": "", + "description": "Added important considerations to the PutConformancePack and PutOrganizationConformancPack APIs." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "AWS Glue now supports schema, partition and sort management of Apache Iceberg tables using Glue SDK" + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release adds the following value to an InitiateAs enum: COMPLETED" + }, + { + "type": "feature", + "category": "Amazon GuardDuty", + "contributor": "", + "description": "Update JSON target for Kubernetes workload resource type." + }, + { + "type": "feature", + "category": "Amazon Q Connect", + "contributor": "", + "description": "Adding UnauthorizedException to public SDK" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "StartDBCluster and StopDBCluster can now throw InvalidDBShardGroupStateFault." + }, + { + "type": "feature", + "category": "Amazon Simple Email Service", + "contributor": "", + "description": "Added support for new SES regions" + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.74.json b/.changes/2.31.74.json new file mode 100644 index 000000000000..0da6a07188d4 --- /dev/null +++ b/.changes/2.31.74.json @@ -0,0 +1,132 @@ +{ + "version": "2.31.74", + "date": "2025-06-30", + "entries": [ + { + "type": "feature", + "category": "AWS ARC - Zonal Shift", + "contributor": "", + "description": "Added support for on-demand practice runs and balanced capacity checks in ARC autoshift practice." + }, + { + "type": "feature", + "category": "AWS B2B Data Interchange", + "contributor": "", + "description": "Updated APIs to support inbound EDI split capabilities and additional Partnership-level configurations of generated EDI files' contents and format." + }, + { + "type": "feature", + "category": "AWS CloudFormation", + "contributor": "", + "description": "Added support for UNKNOWN drift status." + }, + { + "type": "feature", + "category": "AWS Config", + "contributor": "", + "description": "Updated ResourceType enum with new resource types onboarded by AWS Config as of June 2025" + }, + { + "type": "feature", + "category": "AWS Data Exchange", + "contributor": "", + "description": "This release updates resource Id with alphanumeric constraint, including Asset id, Revision id, Data Set id, Job id, and Event Action id." + }, + { + "type": "feature", + "category": "AWS Glue", + "contributor": "", + "description": "releasing source processing properties to support source properties for ODB integrations" + }, + { + "type": "feature", + "category": "AWS Health Imaging", + "contributor": "", + "description": "Added new fields to support the concept of primary image sets within the storage hierarchy." + }, + { + "type": "feature", + "category": "AWS Identity and Access Management", + "contributor": "", + "description": "Updated IAM ServiceSpecificCredential support to include expiration, API Key output format instead of username and password for services that will support API keys, and the ability to list credentials for all users in the account for a given service configuration." + }, + { + "type": "feature", + "category": "AWS Outposts", + "contributor": "", + "description": "Make ContactName and ContactPhoneNumber required fields when creating and updating Outpost Site Addresses." + }, + { + "type": "feature", + "category": "AWS Parallel Computing Service", + "contributor": "", + "description": "Fixed the validation pattern for an instance profile Amazon Resource Name (ARN) in AWS PCS." + }, + { + "type": "feature", + "category": "AWS Transfer Family", + "contributor": "", + "description": "Added support for dual-stack (IPv4 and IPv6) endpoints for SFTP public endpoints and VPC-internal endpoints (SFTP, FTPS, FTP, and AS2), enabling customers to configure new servers with IPv4 or dual-stack mode, convert existing servers to dual-stack, and use IPv6 with service APIs." + }, + { + "type": "feature", + "category": "Amazon Bedrock", + "contributor": "", + "description": "Add support for API Keys, Re-Ranker, implicit filter for RAG / KB evaluation for Bedrock APIs." + }, + { + "type": "feature", + "category": "Amazon Bedrock Runtime", + "contributor": "", + "description": "Add API Key and document citations support for Bedrock Runtime APIs" + }, + { + "type": "feature", + "category": "Amazon CloudWatch Logs", + "contributor": "", + "description": "Increase minimum length of queryId parameter to 1 character." + }, + { + "type": "feature", + "category": "Amazon Connect Service", + "contributor": "", + "description": "This release introduces ChatMetrics to the model, providing comprehensive analytics insights for Amazon Connect chat conversations. Users can access these detailed metrics through the AWS Connect API by using the DescribeContact operation with their specific instance and contact IDs" + }, + { + "type": "feature", + "category": "Amazon DynamoDB", + "contributor": "", + "description": "This change adds support for witnesses in global tables. It also adds a new table status, REPLICATION_NOT_AUTHORIZED. This status will indicate scenarios where global replicas table can't be utilized for data plane operations." + }, + { + "type": "feature", + "category": "Amazon EventBridge", + "contributor": "", + "description": "Added support for EventBridge Dualstack endpoints in AWS GovCloud (US) regions (us-gov-east-1 and us-gov-west-1). The dualstack endpoints are identical for both FIPS and non-FIPS configurations, following the format: events.{region}.api.aws" + }, + { + "type": "feature", + "category": "Amazon QuickSight", + "contributor": "", + "description": "Introduced custom permission capabilities for reporting content. Added menu option in exploration to preserve configuration data when textbox menu option is used. Added support for Athena trusted identity propagation." + }, + { + "type": "feature", + "category": "Amazon Simple Systems Manager (SSM)", + "contributor": "", + "description": "Introduces AccessType, a new filter value for the DescribeSessions API." + }, + { + "type": "feature", + "category": "Network Flow Monitor", + "contributor": "", + "description": "Add ConflictExceptions to UpdateScope and DeleteScope operations for scopes being mutated." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/2.31.75.json b/.changes/2.31.75.json new file mode 100644 index 000000000000..1f01be61b2ed --- /dev/null +++ b/.changes/2.31.75.json @@ -0,0 +1,60 @@ +{ + "version": "2.31.75", + "date": "2025-07-01", + "entries": [ + { + "type": "feature", + "category": "AWS Clean Rooms ML", + "contributor": "", + "description": "This release introduces support for incremental training and distributed training for custom models in AWS Clean Rooms ML." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Add the sdk service client version to the useragent" + }, + { + "type": "feature", + "category": "Amazon DataZone", + "contributor": "", + "description": "Add support for the new optional domain-unit-id parameter in the UpdateProject API." + }, + { + "type": "feature", + "category": "Amazon Elastic Compute Cloud", + "contributor": "", + "description": "Add Context to GetInstanceTypesFromInstanceRequirements API" + }, + { + "type": "feature", + "category": "Amazon Relational Database Service", + "contributor": "", + "description": "Amazon RDS Custom for Oracle now supports multi-AZ database instances." + }, + { + "type": "feature", + "category": "Amazon SageMaker Service", + "contributor": "", + "description": "Updated field validation requirements for InstanceGroups." + }, + { + "type": "feature", + "category": "QBusiness", + "contributor": "", + "description": "New ChatResponseConfiguration to Customize Q Business chat responses for specific use cases and communication needs. Updated Boosting capability allowing admins to provide preference on date attributes for recency and/or provide a preferred data source." + }, + { + "type": "feature", + "category": "odb", + "contributor": "", + "description": "This release adds API operations for Oracle Database@AWS. You can use the APIs to create Exadata infrastructure, ODB networks, and Exadata and Autonomous VM clusters inside AWS data centers. The infrastructure is managed by OCI. You can integrate these resources with AWS services." + }, + { + "type": "feature", + "category": "AWS SDK for Java v2", + "contributor": "", + "description": "Updated endpoint and partition metadata." + } + ] +} \ No newline at end of file diff --git a/.changes/next-release/feature-AWSSDKforJavav2-b405876.json b/.changes/next-release/feature-AWSSDKforJavav2-b405876.json new file mode 100644 index 000000000000..bb9d5276a1ac --- /dev/null +++ b/.changes/next-release/feature-AWSSDKforJavav2-b405876.json @@ -0,0 +1,6 @@ +{ + "type": "feature", + "category": "Amazon DyanmoDB", + "contributor": "", + "description": "Enable caching calls to URI constructors for account-id based endpoints" +} diff --git a/.github/workflows/api-surface-area-review-verification.yml b/.github/workflows/api-surface-area-review-verification.yml new file mode 100644 index 000000000000..eef6b517dab8 --- /dev/null +++ b/.github/workflows/api-surface-area-review-verification.yml @@ -0,0 +1,40 @@ +name: API Surface Area Review Verification + +permissions: + contents: read + pull-requests: read + +on: + pull_request: + types: [ opened, synchronize, reopened, labeled, unlabeled ] + branches: + - master + +jobs: + api-surface-area-review-verification: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Verifies updates to protected/public APIs have been reviewed and approved by the team, if any + id: api-surface-area-review-verification + if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-api-surface-area-change') }} + run: | + git fetch origin ${{ github.base_ref }} --depth 1 + FILES=$( git diff remotes/origin/${{ github.base_ref }} --name-only | grep "\.java$" | grep -v -E "(^|/)(internal|test|codegen|v2-migration|it)/" || true) + if [ -n "$FILES" ]; then + echo "::error::Changes around protected/public APIs found:" + echo "$FILES" | while read file; do + echo "::error::$file" + done + echo "has_matches=true" >> $GITHUB_OUTPUT + else + echo "No changes around protected/public APIs found." + echo "has_matches=false" >> $GITHUB_OUTPUT + fi + - name: Fail if there are changes around protected/public APIs and there's no label + if: ${{ steps.api-surface-area-review-verification.outputs.has_matches == 'true' && !contains(github.event.pull_request.labels.*.name, 'api-surface-area-approved-by-team') }} + run: | + echo "::error ::Change around public/protected APIs has been detected. Please either:" + echo "::error ::* Review it with the team and add the 'api-surface-area-reviewed' label to this PR after approval –or–" + echo "::error ::* Add the 'no-api-surface-area-change' label to this PR in case this is a false positive" + exit 1 diff --git a/.github/workflows/changelog-verification.yml b/.github/workflows/changelog-verification.yml index ea2df21723c7..bc619e590851 100644 --- a/.github/workflows/changelog-verification.yml +++ b/.github/workflows/changelog-verification.yml @@ -1,5 +1,9 @@ name: Changelog verification +permissions: + contents: read + pull-requests: read + on: pull_request: types: [ opened, synchronize, reopened, labeled, unlabeled ] @@ -12,14 +16,30 @@ jobs: steps: - uses: actions/checkout@v4 - name: Check for changelog entry - if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-changelog') }} + if: ${{ !contains(github.event.pull_request.labels.*.name, 'changelog-not-required') }} run: | - git fetch origin ${{ github.base_ref }} --depth 1 && \ - git diff remotes/origin/${{ github.base_ref }} --name-only | grep -P "\.changes/next-release/*[a-zA-Z0-9_-]+\.json" + git fetch origin ${{ github.base_ref }} --depth 1 + NON_TEST_FILES=$(git diff remotes/origin/${{ github.base_ref }} --name-only | grep "\.java$" | grep -v -E "(^|/)(test|it)/" || true) + if [ -n "$NON_TEST_FILES" ]; then + echo "::notice::Non-test Java changes found:" + echo "$NON_TEST_FILES" | while read file; do + echo "::notice::$file" + done + echo "Checking for changelog entry..." + CHANGELOG_FILES=$(git diff remotes/origin/${{ github.base_ref }} --name-only | grep -P "\.changes/next-release/.*[a-zA-Z0-9_-]+\.json" || true) + if [ -z "$CHANGELOG_FILES" ]; then + echo "::error::No changelog entry found for Java changes" + exit 1 + else + echo "::notice::Changelog entry found: $CHANGELOG_FILES" + fi + else + echo "::notice::No non-test Java changes found. Changelog verification skipped." + fi - name: Error message if: ${{ failure() }} run: | - echo "::error ::No new/updated changelog entry found in /.changes/next-release directory. Please either:" - echo "::error ::* Add a changelog entry (see CONTRIBUTING.md for instructions) –or–" - echo "::error ::* Add the 'no-changelog' label to this PR (in rare cases not warranting a changelog entry)" - exit 1 \ No newline at end of file + echo "::error::No new/updated changelog entry found in /.changes/next-release directory. Please either:" + echo "::error::* Add a changelog entry (see CONTRIBUTING.md for instructions) –or–" + echo "::error::* Add the 'changelog-not-required' label to this PR (in rare cases not warranting a changelog entry)" + exit 1 diff --git a/.github/workflows/codebuild-ci.yml b/.github/workflows/codebuild-ci.yml index c99c8eae844f..72b79d06b3ed 100644 --- a/.github/workflows/codebuild-ci.yml +++ b/.github/workflows/codebuild-ci.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -34,7 +34,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -49,7 +49,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -64,7 +64,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -79,7 +79,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -93,7 +93,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -107,7 +107,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -130,7 +130,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -139,3 +139,134 @@ jobs: uses: aws-actions/aws-codebuild-run-build@v1 with: project-name: aws-sdk-java-v2-endpoints-test + brazil-json-validation: + if: github.repository == 'aws/aws-sdk-java-v2' + runs-on: ubuntu-latest + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} + aws-region: us-west-2 + role-duration-seconds: 7200 + - name: Validate Brazil config + uses: aws-actions/aws-codebuild-run-build@v1 + with: + project-name: aws-java-sdk-v2-validate-brazil-config + migration-tests: + if: github.repository == 'aws/aws-sdk-java-v2' + runs-on: ubuntu-latest + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} + aws-region: us-west-2 + role-duration-seconds: 7200 + - name: Run migration test + uses: aws-actions/aws-codebuild-run-build@v1 + with: + project-name: aws-sdk-java-v2-migration-test + + s3-regression-tests-download: + if: github.repository == 'aws/aws-sdk-java-v2' + runs-on: ubuntu-latest + env: + REGRESSION_TEST: DownloadStreamingRegressionTesting + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} + aws-region: us-west-2 + role-duration-seconds: 7200 + - name: Run s3 regression tests for downloads + uses: aws-actions/aws-codebuild-run-build@v1 + with: + project-name: aws-sdk-java-v2-s3-regression-tests + env-vars-for-codebuild: REGRESSION_TEST + s3-regression-tests-control-plane: + if: github.repository == 'aws/aws-sdk-java-v2' + runs-on: ubuntu-latest + env: + REGRESSION_TEST: ControlPlaneOperationRegressionTesting + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} + aws-region: us-west-2 + role-duration-seconds: 7200 + - name: Run s3 regression tests for control plane + uses: aws-actions/aws-codebuild-run-build@v1 + with: + project-name: aws-sdk-java-v2-s3-regression-tests + env-vars-for-codebuild: REGRESSION_TEST + s3-regression-tests-upload-sync: + if: github.repository == 'aws/aws-sdk-java-v2' + runs-on: ubuntu-latest + env: + REGRESSION_TEST: UploadSyncRegressionTesting + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} + aws-region: us-west-2 + role-duration-seconds: 7200 + - name: Run s3 regression tests for uploads + uses: aws-actions/aws-codebuild-run-build@v1 + with: + project-name: aws-sdk-java-v2-s3-regression-tests + env-vars-for-codebuild: REGRESSION_TEST + s3-regression-tests-upload-async: + if: github.repository == 'aws/aws-sdk-java-v2' + runs-on: ubuntu-latest + env: + REGRESSION_TEST: UploadAsyncRegressionTesting + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} + aws-region: us-west-2 + role-duration-seconds: 7200 + - name: Run s3 regression tests for uploads + uses: aws-actions/aws-codebuild-run-build@v1 + with: + project-name: aws-sdk-java-v2-s3-regression-tests + env-vars-for-codebuild: REGRESSION_TEST + s3-regression-tests-upload-crt: + if: github.repository == 'aws/aws-sdk-java-v2' + runs-on: ubuntu-latest + env: + REGRESSION_TEST: UploadCrtRegressionTesting + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} + aws-region: us-west-2 + role-duration-seconds: 7200 + - name: Run s3 regression tests for uploads + uses: aws-actions/aws-codebuild-run-build@v1 + with: + project-name: aws-sdk-java-v2-s3-regression-tests + env-vars-for-codebuild: REGRESSION_TEST + s3-regression-tests-upload-multi: + if: github.repository == 'aws/aws-sdk-java-v2' + runs-on: ubuntu-latest + env: + REGRESSION_TEST: UploadTransferManagerRegressionTesting + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} + aws-region: us-west-2 + role-duration-seconds: 7200 + - name: Run s3 regression tests for uploads + uses: aws-actions/aws-codebuild-run-build@v1 + with: + project-name: aws-sdk-java-v2-s3-regression-tests + env-vars-for-codebuild: REGRESSION_TEST diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 000000000000..79405678f4ee --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,90 @@ +name: "CodeQL Analysis" + +on: + push: + branches: [ "master" ] + pull_request: + schedule: + - cron: '44 14 * * 6' + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners (GitHub.com only) + # Consider using larger runners or machines with greater resources for possible analysis time improvements. + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + permissions: + # required for all workflows + security-events: write + + # required to fetch internal or private CodeQL packs + packages: read + + # only required for workflows in private repositories + actions: read + contents: read + + strategy: + fail-fast: false + matrix: + include: + - language: actions + build-mode: none + - language: java-kotlin + build-mode: none # This mode only analyzes Java. Set this to 'autobuild' or 'manual' to analyze Kotlin too. + - language: python + build-mode: none + # CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' + # Use `c-cpp` to analyze code written in C, C++ or both + # Use 'java-kotlin' to analyze code written in Java, Kotlin or both + # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both + # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, + # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. + # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how + # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Add any setup steps before running the `github/codeql-action/init` action. + # This includes steps like installing compilers or runtimes (`actions/setup-node` + # or others). This is typically only required for manual builds. + # - name: Setup runtime (example) + # uses: actions/setup-example@v1 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + # If the analyze step fails for one of the languages you are analyzing with + # "We were unable to automatically build your code", modify the matrix above + # to set the build mode to "manual" for that language. Then modify this step + # to build your code. + # ℹ️ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + - if: matrix.build-mode == 'manual' + shell: bash + run: | + echo 'If you are using a "manual" build mode for one or more of the' \ + 'languages you are analyzing, replace this with the commands to build' \ + 'your code, for example:' + echo ' make bootstrap' + echo ' make release' + exit 1 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/lock-conversation-closed-pr.yml b/.github/workflows/lock-conversation-closed-pr.yml new file mode 100644 index 000000000000..0e75c1b07aa1 --- /dev/null +++ b/.github/workflows/lock-conversation-closed-pr.yml @@ -0,0 +1,35 @@ +name: Lock PR Conversation on Close + +on: + pull_request: + types: [closed] + +jobs: + lock-conversation-closed-prs: + if: github.repository == 'aws/aws-sdk-java-v2' + name: Lock PR Conversation on Close + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Lock PR conversation on Close + uses: actions/github-script@v7 + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + with: + script: | + const prNumber = context.payload.pull_request.number; + + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body: "This pull request has been closed and the conversation has been locked. Comments on closed PRs are hard for our team to see. If you need more assistance, please open a new issue that references this one." + }); + + await github.rest.issues.lock({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + lock_reason: "resolved" + }); \ No newline at end of file diff --git a/.github/workflows/merge-queue-metric.yml b/.github/workflows/merge-queue-metric.yml index f7c12aa12429..259f03e4d268 100644 --- a/.github/workflows/merge-queue-metric.yml +++ b/.github/workflows/merge-queue-metric.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 @@ -32,11 +32,11 @@ jobs: if: ${{ github.repository == 'aws/aws-sdk-java-v2' && github.event.action == 'dequeued' && github.event.reason != 'MERGE'}} steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.CI_AWS_ROLE_ARN }} aws-region: us-west-2 role-duration-seconds: 900 - name: Record merge queue removal run: | - aws --region us-west-2 cloudwatch put-metric-data --namespace AwsJavaSdkV2/GitHub --metric-name MergeQueue-Remove --unit Count --value 1 --dimensions Branch=master \ No newline at end of file + aws --region us-west-2 cloudwatch put-metric-data --namespace AwsJavaSdkV2/GitHub --metric-name MergeQueue-Remove --unit Count --value 1 --dimensions Branch=master diff --git a/.github/workflows/new-module-verification.yml b/.github/workflows/new-module-verification.yml new file mode 100644 index 000000000000..f04b620d200d --- /dev/null +++ b/.github/workflows/new-module-verification.yml @@ -0,0 +1,194 @@ +name: New Module Verification + +on: + pull_request: + types: [opened, synchronize, reopened, labeled, unlabeled] + branches: + - master + - feature/master/* + +permissions: + contents: read + +jobs: + new-module-verification: + name: Verify New Modules + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Check for new module additions + id: check-new-modules + shell: bash + run: | + set -euo pipefail + + echo "::group::Detecting new modules" + git fetch origin ${{ github.base_ref }} --depth 1 + + # Find new pom.xml files in the diff + NEW_POM_FILES=$(git diff --name-only remotes/origin/${{ github.base_ref }} | grep -E '.*pom\.xml$' | grep -v "target/" || echo "") + + if [ -z "$NEW_POM_FILES" ]; then + echo "No new modules detected." + echo "new_modules_found=false" >> $GITHUB_OUTPUT + exit 0 + fi + + echo "Potential new modules detected:" + echo "$NEW_POM_FILES" + echo "new_modules_found=true" >> $GITHUB_OUTPUT + + # Save the list of new pom files for later steps + echo "$NEW_POM_FILES" > new_pom_files.txt + echo "::endgroup::" + + - name: Verify new modules + if: steps.check-new-modules.outputs.new_modules_found == 'true' + shell: bash + run: | + set -euo pipefail + + NEW_POM_FILES=$(cat new_pom_files.txt) + + # Initialize counters and error flag + TEST_MODULES=0 + NON_TEST_MODULES=0 + HAS_ERRORS=0 + + echo "::group::Analyzing new modules" + + for POM_FILE in $NEW_POM_FILES; do + MODULE_DIR=$(dirname "$POM_FILE") + MODULE_NAME=$(basename "$MODULE_DIR") + + # Check if this is a new module (not just an updated pom.xml) + if git show remotes/origin/${{ github.base_ref }}:"$POM_FILE" &>/dev/null; then + echo "Skipping $POM_FILE - file already exists in base branch" + continue + fi + + # Skip modules under services directory + if [[ "$MODULE_DIR" == services/* ]]; then + echo "Skipping $MODULE_DIR - modules under services/ are excluded from verification" + continue + fi + + echo "New module detected: $MODULE_DIR" + + # Check if it's a test module + if [[ "$MODULE_DIR" == *"/test/"* || "$MODULE_DIR" == *"/it/"* || "$MODULE_DIR" == *"-test"* || "$MODULE_DIR" == *"-tests"* ]]; then + echo "::group::Test module: $MODULE_DIR" + TEST_MODULES=$((TEST_MODULES + 1)) + + echo "Verifying test module requirements..." + + # 1. Check if excluded from maven deploy command + if ! grep -q "$MODULE_NAME" buildspecs/release-to-maven.yml 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not excluded from maven deploy command in buildspecs/release-to-maven.yml" + HAS_ERRORS=1 + else + echo "✅ Module is excluded from maven deploy command" + fi + + # 2. Check if excluded from javadoc generation + if ! grep -q "$MODULE_NAME" buildspecs/release-javadoc.yml 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not excluded from javadoc generation in buildspecs/release-javadoc.yml" + HAS_ERRORS=1 + else + echo "✅ Module is excluded from javadoc generation" + fi + + # 3. Check if Brazil import is skipped + if ! grep -q "\"$MODULE_NAME\".*\"skip\".*true" .brazil.json 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not configured to skip Brazil import in .brazil.json" + HAS_ERRORS=1 + else + echo "✅ Brazil import is skipped for this module" + fi + echo "::endgroup::" + + else + echo "::group::Non-test module: $MODULE_DIR" + NON_TEST_MODULES=$((NON_TEST_MODULES + 1)) + + echo "Verifying non-test module requirements..." + + # 1. Check for Automatic-Module-Name in pom.xml + if ! grep -q "Automatic-Module-Name" "$POM_FILE" 2>/dev/null; then + echo "::error::Automatic-Module-Name is not specified in $POM_FILE" + HAS_ERRORS=1 + else + echo "✅ Automatic-Module-Name is specified" + fi + + # 2. Check if added to tests-coverage-reporting pom.xml + if ! grep -q ".*$MODULE_NAME" test/tests-coverage-reporting/pom.xml 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not added to tests-coverage-reporting pom.xml" + HAS_ERRORS=1 + else + echo "✅ Module is added to tests-coverage-reporting" + fi + + # 3. Check if added to aws-sdk-java pom.xml + if ! grep -q ".*$MODULE_NAME" aws-sdk-java/pom.xml 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not added to aws-sdk-java pom.xml" + HAS_ERRORS=1 + else + echo "✅ Module is added to aws-sdk-java pom.xml" + fi + + # 4. Check if added to architecture-tests pom.xml + if ! grep -q ".*$MODULE_NAME" test/architecture-tests/pom.xml 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not added to architecture-tests pom.xml" + HAS_ERRORS=1 + else + echo "✅ Module is added to architecture-tests pom.xml" + fi + + # 5. Check if added to bom pom.xml + if ! grep -q "$MODULE_NAME" bom/pom.xml 2>/dev/null; then + echo "::error::Module $MODULE_NAME is not added to bom pom.xml" + HAS_ERRORS=1 + else + echo "✅ Module is added to bom pom.xml" + fi + + # 6. Check if japicmp plugin config is updated + JAPICMP_CHECK=$(grep -A 50 "japicmp-maven-plugin" pom.xml 2>/dev/null | grep -A 50 "" 2>/dev/null | grep -q "$MODULE_NAME" 2>/dev/null || echo "MISSING") + if [ "$JAPICMP_CHECK" = "MISSING" ]; then + echo "::error::Module $MODULE_NAME is not included in japicmp-maven-plugin includeModules section in pom.xml" + HAS_ERRORS=1 + else + echo "✅ Module is included in japicmp-maven-plugin configuration" + fi + + # 7. Check if package name mapping is added in .brazil.json + if ! grep -q "\"$MODULE_NAME\"" .brazil.json 2>/dev/null; then + echo "::error::Package name mapping for $MODULE_NAME is not added in .brazil.json" + HAS_ERRORS=1 + else + echo "✅ Package name mapping is added in .brazil.json" + fi + echo "::endgroup::" + fi + done + echo "::endgroup::" + + echo "::group::Verification summary" + echo "Verification complete." + echo "Test modules found: $TEST_MODULES" + echo "Non-test modules found: $NON_TEST_MODULES" + + if [ $HAS_ERRORS -eq 1 ]; then + echo "::error::Some verification checks failed. Please review the errors above and fix them." + exit 1 + else + echo "✅ All automated verification checks passed!" + fi + echo "::endgroup::" diff --git a/CHANGELOG.md b/CHANGELOG.md index ee86f1225f41..2f742efa378b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,1137 @@ #### 👋 _Looking for changelogs for older versions? You can find them in the [changelogs](./changelogs) directory._ +# __2.31.75__ __2025-07-01__ +## __AWS Clean Rooms ML__ + - ### Features + - This release introduces support for incremental training and distributed training for custom models in AWS Clean Rooms ML. + +## __AWS SDK for Java v2__ + - ### Features + - Add the sdk service client version to the useragent + - Updated endpoint and partition metadata. + +## __Amazon DataZone__ + - ### Features + - Add support for the new optional domain-unit-id parameter in the UpdateProject API. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Add Context to GetInstanceTypesFromInstanceRequirements API + +## __Amazon Relational Database Service__ + - ### Features + - Amazon RDS Custom for Oracle now supports multi-AZ database instances. + +## __Amazon SageMaker Service__ + - ### Features + - Updated field validation requirements for InstanceGroups. + +## __QBusiness__ + - ### Features + - New ChatResponseConfiguration to Customize Q Business chat responses for specific use cases and communication needs. Updated Boosting capability allowing admins to provide preference on date attributes for recency and/or provide a preferred data source. + +## __odb__ + - ### Features + - This release adds API operations for Oracle Database@AWS. You can use the APIs to create Exadata infrastructure, ODB networks, and Exadata and Autonomous VM clusters inside AWS data centers. The infrastructure is managed by OCI. You can integrate these resources with AWS services. + +# __2.31.74__ __2025-06-30__ +## __AWS ARC - Zonal Shift__ + - ### Features + - Added support for on-demand practice runs and balanced capacity checks in ARC autoshift practice. + +## __AWS B2B Data Interchange__ + - ### Features + - Updated APIs to support inbound EDI split capabilities and additional Partnership-level configurations of generated EDI files' contents and format. + +## __AWS CloudFormation__ + - ### Features + - Added support for UNKNOWN drift status. + +## __AWS Config__ + - ### Features + - Updated ResourceType enum with new resource types onboarded by AWS Config as of June 2025 + +## __AWS Data Exchange__ + - ### Features + - This release updates resource Id with alphanumeric constraint, including Asset id, Revision id, Data Set id, Job id, and Event Action id. + +## __AWS Glue__ + - ### Features + - releasing source processing properties to support source properties for ODB integrations + +## __AWS Health Imaging__ + - ### Features + - Added new fields to support the concept of primary image sets within the storage hierarchy. + +## __AWS Identity and Access Management__ + - ### Features + - Updated IAM ServiceSpecificCredential support to include expiration, API Key output format instead of username and password for services that will support API keys, and the ability to list credentials for all users in the account for a given service configuration. + +## __AWS Outposts__ + - ### Features + - Make ContactName and ContactPhoneNumber required fields when creating and updating Outpost Site Addresses. + +## __AWS Parallel Computing Service__ + - ### Features + - Fixed the validation pattern for an instance profile Amazon Resource Name (ARN) in AWS PCS. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __AWS Transfer Family__ + - ### Features + - Added support for dual-stack (IPv4 and IPv6) endpoints for SFTP public endpoints and VPC-internal endpoints (SFTP, FTPS, FTP, and AS2), enabling customers to configure new servers with IPv4 or dual-stack mode, convert existing servers to dual-stack, and use IPv6 with service APIs. + +## __Amazon Bedrock__ + - ### Features + - Add support for API Keys, Re-Ranker, implicit filter for RAG / KB evaluation for Bedrock APIs. + +## __Amazon Bedrock Runtime__ + - ### Features + - Add API Key and document citations support for Bedrock Runtime APIs + +## __Amazon CloudWatch Logs__ + - ### Features + - Increase minimum length of queryId parameter to 1 character. + +## __Amazon Connect Service__ + - ### Features + - This release introduces ChatMetrics to the model, providing comprehensive analytics insights for Amazon Connect chat conversations. Users can access these detailed metrics through the AWS Connect API by using the DescribeContact operation with their specific instance and contact IDs + +## __Amazon DynamoDB__ + - ### Features + - This change adds support for witnesses in global tables. It also adds a new table status, REPLICATION_NOT_AUTHORIZED. This status will indicate scenarios where global replicas table can't be utilized for data plane operations. + +## __Amazon EventBridge__ + - ### Features + - Added support for EventBridge Dualstack endpoints in AWS GovCloud (US) regions (us-gov-east-1 and us-gov-west-1). The dualstack endpoints are identical for both FIPS and non-FIPS configurations, following the format: events.{region}.api.aws + +## __Amazon QuickSight__ + - ### Features + - Introduced custom permission capabilities for reporting content. Added menu option in exploration to preserve configuration data when textbox menu option is used. Added support for Athena trusted identity propagation. + +## __Amazon Simple Systems Manager (SSM)__ + - ### Features + - Introduces AccessType, a new filter value for the DescribeSessions API. + +## __Network Flow Monitor__ + - ### Features + - Add ConflictExceptions to UpdateScope and DeleteScope operations for scopes being mutated. + +# __2.31.73__ __2025-06-27__ +## __AWS Config__ + - ### Features + - Added important considerations to the PutConformancePack and PutOrganizationConformancPack APIs. + +## __AWS Glue__ + - ### Features + - AWS Glue now supports schema, partition and sort management of Apache Iceberg tables using Glue SDK + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Connect Service__ + - ### Features + - This release adds the following value to an InitiateAs enum: COMPLETED + +## __Amazon GuardDuty__ + - ### Features + - Update JSON target for Kubernetes workload resource type. + +## __Amazon Q Connect__ + - ### Features + - Adding UnauthorizedException to public SDK + +## __Amazon Relational Database Service__ + - ### Features + - StartDBCluster and StopDBCluster can now throw InvalidDBShardGroupStateFault. + +## __Amazon Simple Email Service__ + - ### Features + - Added support for new SES regions + +# __2.31.72__ __2025-06-26__ +## __AWS Key Management Service__ + - ### Features + - This release updates AWS CLI examples for KMS APIs. + +## __AWS SDK for Java v2__ + - ### Features + - Add code generation validation for missing request URI on an operation. + - Add support for defining service model validators and generating valdiation reports during code generation. + - Add support for validating that shared models between two services are identical. + - Updated endpoint and partition metadata. + +## __AWSDeadlineCloud__ + - ### Features + - Added fields to track cumulative task retry attempts for steps and jobs + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for OdbNetworkArn as a target in VPC Route Tables + +## __Amazon Keyspaces__ + - ### Features + - This release provides change data capture (CDC) streams support through updates to the Amazon Keyspaces API. + +## __Amazon Keyspaces Streams__ + - ### Features + - This release adds change data capture (CDC) streams support through the new Amazon Keyspaces Streams API. + +## __Amazon WorkSpaces__ + - ### Features + - Updated modifyStreamingProperties to support PrivateLink VPC endpoints for directories + +## __Code Generator Maven Plugin__ + - ### Features + - Update the generator plugin to support model validation during code generation. In addition, this adds the `writeValidationReport` flag to support writing the validation report to disk. + +## __EmfMetricLoggingPublisher__ + - ### Bugfixes + - Fixed the bug that EmfMetricLoggingPublisher not properly publishing Long type metrics + +## __Managed integrations for AWS IoT Device Management__ + - ### Features + - Adding managed integrations APIs for IoT Device Management to onboard and control devices across different manufacturers, connectivity protocols and third party vendor clouds. APIs include managed thing operations, provisioning profile management, and cloud connector operations. + +## __QBusiness__ + - ### Features + - Added support for App level authentication for QBusiness DataAccessor using AWS IAM Identity center Trusted Token issuer + +# __2.31.71__ __2025-06-25__ +## __AWS S3 Control__ + - ### Features + - Add support for the ability to use Amazon S3 Access Points with Amazon FSx for OpenZFS file systems. + +## __AWS SDK for Java v2__ + - ### Features + - Adding constant to each SDK module to represent its version + +## __AWS Storage Gateway__ + - ### Features + - This release adds IPv6 support to the Storage Gateway APIs. APIs that previously only accept or return IPv4 address will now accept or return both IPv4 and IPv6 addresses. + +## __Amazon EC2 Container Service__ + - ### Features + - Updates for change to Amazon ECS default log driver mode from blocking to non-blocking + +## __Amazon FSx__ + - ### Features + - Add support for the ability to create Amazon S3 Access Points for Amazon FSx for OpenZFS file systems. + +## __Amazon Simple Storage Service__ + - ### Features + - Adds support for additional server-side encryption mode and storage class values for accessing Amazon FSx data from Amazon S3 using S3 Access Points + +## __Amazon Textract__ + - ### Features + - Add RotationAngle field to Geometry of WORD blocks for Textract AnalyzeDocument API + +## __Amazon WorkSpaces Thin Client__ + - ### Features + - Remove Tags field from Get API responses + +# __2.31.70__ __2025-06-24__ +## __AWS AI Ops__ + - ### Features + - Adds support for cross account investigations for CloudWatch investigations AI Operations (AIOps). + +## __AWS Batch__ + - ### Features + - Add userdataType to LaunchTemplateSpecification and LaunchTemplateSpecificationOverride. + +## __AWS License Manager__ + - ### Features + - AWS License Manager now supports license type conversions for AWS Marketplace products. Customers can provide Marketplace codes in the source license context or destination license context in the CreateLicenseConversionTaskForResource requests. + +## __Amazon Bedrock__ + - ### Features + - We are making ListFoundationModelAgreementOffers, DeleteFoundationModelAgreement, CreateFoundationModelAgreement, GetFoundationModelAvailability, PutUseCaseForModelAccess and GetUseCaseForModelAccess APIs public, previously they were console. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release allows you to create and register AMIs while maintaining their underlying EBS snapshots within Local Zones. + +## __Amazon GameLift__ + - ### Features + - Add support for UDP ping beacons to ListLocations API, including new PingBeacon and UDPEndpoint data types within its Locations return value. Use UDP ping beacon endpoints to help measure real-time network latency for multiplayer games. + +## __Amazon Relational Database Service__ + - ### Features + - Adding support for RDS on Dedicated Local Zones, including local backup target, snapshot availability zone and snapshot target + +## __Amazon Route 53 Resolver__ + - ### Features + - Add support for iterative DNS queries through the new INBOUND_DELEGATION endpoint. Add delegation support through the Outbound Endpoints with DELEGATE rules. + +## __Amazon Transcribe Service__ + - ### Features + - This Feature Adds Support for the "et-EE" Locale for Batch Operations + +## __Elastic Load Balancing__ + - ### Features + - Add Paginator for DescribeAccountLimits, and fix Paginators for DescribeTrustStoreAssociations, DescribeTrustStoreRevocations, and DescribeTrustStores + +# __2.31.69__ __2025-06-23__ +## __AWS Glue__ + - ### Features + - AWS Glue now supports sort and z-order strategy for managed automated compaction for Iceberg tables in addition to binpack. + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Ignore unknown properties on endpoints in endpoint rules. + +## __Amazon S3 Tables__ + - ### Features + - S3 Tables now supports sort and z-order compaction strategies for Iceberg tables in addition to binpack. + +## __Amazon Workspaces Instances__ + - ### Features + - Added support for Amazon WorkSpaces Instances API + +# __2.31.68__ __2025-06-20__ +## __AWS Elemental MediaConvert__ + - ### Features + - This release adds a new SPECIFIED_OPTIMAL option for handling DDS when using DVB-Sub with high resolution video. + +## __AWS Glue__ + - ### Features + - AWS Glue Data Quality now provides aggregated metrics in evaluation results when publishAggregatedMetrics with row-level results are enabled. These metrics include summary statistics showing total counts of processed, passed, and failed rows and rules in a single view. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Bedrock__ + - ### Features + - Add support for tiers in Content Filters and Denied Topics for Amazon Bedrock Guardrails. + +## __Amazon EC2 Container Service__ + - ### Features + - Add ECS support for Windows Server 2025 + +## __Amazon Location Service Places V2__ + - ### Features + - Geocode, ReverseGeocode, and GetPlace APIs added Intersections and SecondaryAddresses. To use, add to the AdditionalFeatures list in your request. This provides info about nearby intersections and secondary addresses that are associated with a main address. Also added MainAddress and ParsedQuery. + +# __2.31.67__ __2025-06-19__ +## __AWS Lambda__ + - ### Features + - Support Schema Registry feature for Kafka Event Source Mapping. Customers can now configure a Schema Registry to enable schema validation and filtering for Avro, Protobuf, and JSON-formatted events in Lambda for Kafka Event Source. + +## __Amazon Bedrock__ + - ### Features + - This release of the SDK has the API and documentation for the createcustommodel API. This feature lets you copy a trained model into Amazon Bedrock for inference. + +## __Amazon SageMaker Service__ + - ### Features + - This release introduces alternative support for utilizing CFN templates from S3 for SageMaker Projects. + +## __EMR Serverless__ + - ### Features + - This release adds the capability to enable IAM IdentityCenter Trusted Identity Propagation for users running Interactive Sessions on EMR Serverless Applications. + +## __Payment Cryptography Control Plane__ + - ### Features + - Additional support for managing HMAC keys that adheres to changes documented in X9.143-2021 and provides better interoperability for key import/export + +## __Payment Cryptography Data Plane__ + - ### Features + - Additional support for managing HMAC keys that adheres to changes documented in X9.143-2021 and provides better interoperability for key import/export + +# __2.31.66__ __2025-06-18__ +## __AWS AI Ops__ + - ### Features + - This is the initial SDK release for Amazon AI Operations (AIOps). AIOps is a generative AI-powered assistant that helps you respond to incidents in your system by scanning your system's telemetry and quickly surface suggestions that might be related to your issue. + +## __AWS S3__ + - ### Features + - Adds the ability to presign HeadObject and HeadBucket requests with the S3 Presigner + - Contributed by: [@tmccombs](https://github.com/tmccombs) + +## __AWS SDK for Java v2__ + - ### Features + - Adding a new method of constructing ARNs without exceptions as control flow + - Updated endpoint and partition metadata. + +## __Amazon CloudWatch Logs__ + - ### Features + - Added CloudWatch Logs Transformer support for converting CloudTrail, VPC Flow, EKS Audit, AWS WAF and Route53 Resolver logs to OCSF v1.1 format. + +## __Amazon SageMaker Service__ + - ### Features + - Add support for p6-b200 instance type for SageMaker Hyperpod + +## __Amazon Simple Storage Service__ + - ### Features + - Added support for renaming objects within the same bucket using the new RenameObject API. + +## __Auto Scaling__ + - ### Features + - Add IncludeInstances parameter to DescribeAutoScalingGroups API + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@tmccombs](https://github.com/tmccombs) +# __2.31.65__ __2025-06-17__ +## __AWS Backup__ + - ### Features + - AWS Backup is adding support for integration of its logically air-gapped vaults with the AWS Organizations Multi-party approval capability. + +## __AWS Certificate Manager__ + - ### Features + - Adds support for Exportable Public Certificates + +## __AWS Database Migration Service__ + - ### Features + - Add "Virtual" field to Data Provider as well as "S3Path" and "S3AccessRoleArn" fields to DataProvider settings + +## __AWS Multi-party Approval__ + - ### Features + - This release enables customers to create Multi-party approval teams and approval requests to protect supported operations. + +## __AWS Network Firewall__ + - ### Features + - Release of Active Threat Defense in Network Firewall + +## __AWS Organizations__ + - ### Features + - Add support for policy operations on the SECURITYHUB_POLICY policy type. + +## __AWS SDK for Java V2__ + - ### Bugfixes + - Fix a bug in ConstructorCache when classes are GC'ed but not removed from cache + +## __AWS Security Token Service__ + - ### Features + - The AWS Security Token Service APIs AssumeRoleWithSAML and AssumeRoleWithWebIdentity can now be invoked without pre-configured AWS credentials in the SDK configuration. + +## __AWS SecurityHub__ + - ### Features + - Adds operations, structures, and exceptions required for public preview release of Security Hub V2. + +## __AWS WAFV2__ + - ### Features + - AWS WAF can now suggest protection packs for you based on the application information you provide when you create a webACL. + +## __Access Analyzer__ + - ### Features + - We are launching a new analyzer type, internal access analyzer. The new analyzer will generate internal access findings, which help customers understand who within their AWS organization or AWS Account has access to their critical AWS resources. + +## __Amazon Bedrock__ + - ### Features + - This release of the SDK has the API and documentation for the createcustommodel API. This feature lets you copy a trained model into Amazon Bedrock for inference. + +## __Amazon GuardDuty__ + - ### Features + - Adding support for extended threat detection for EKS Audit Logs and EKS Runtime Monitoring. + +## __Inspector2__ + - ### Features + - Add Code Repository Scanning as part of AWS InspectorV2 + +# __2.31.64__ __2025-06-16__ +## __AWS Network Firewall__ + - ### Features + - You can now create firewalls using a Transit Gateway instead of a VPC, resulting in a TGW attachment. + +## __AWS SDK for Java v2__ + - ### Features + - Add tracking of RequestBody/ResponseTransfromer implementations used in UserAgent. + +## __Amazon Bedrock__ + - ### Features + - This release of the SDK has the API and documentation for the createcustommodel API. This feature lets you copy a Amazon SageMaker trained Amazon Nova model into Amazon Bedrock for inference. + +## __Amazon Elastic Container Registry__ + - ### Features + - The `DescribeImageScanning` API now includes `lastInUseAt` and `InUseCount` fields that can be used to prioritize vulnerability remediation for images that are actively being used. + +## __Amazon SageMaker Service__ + - ### Features + - This release 1) adds a new S3DataType Converse for SageMaker training 2)adds C8g R7gd M8g C6in P6 P6e instance type for SageMaker endpoint 3) adds m7i, r7i, c7i instance type for SageMaker Training and Processing. + +# __2.31.63__ __2025-06-12__ +## __AWS IoT FleetWise__ + - ### Features + - Add new status READY_FOR_CHECKIN used for vehicle synchronisation + +## __AWS Key Management Service__ + - ### Features + - AWS KMS announces the support of ML-DSA key pairs that creates post-quantum safe digital signatures. + +## __AWS Parallel Computing Service__ + - ### Features + - Fixed regex patterns for ARN fields. + +## __Amazon API Gateway__ + - ### Features + - Documentation updates for Amazon API Gateway + +## __Amazon EC2 Container Service__ + - ### Features + - This Amazon ECS release supports updating the capacityProviderStrategy parameter in update-service. + +## __AmazonApiGatewayV2__ + - ### Features + - Documentation updates for Amazon API Gateway + +## __AmazonConnectCampaignServiceV2__ + - ### Features + - Added PutInstanceCommunicationLimits and GetInstanceCommunicationLimits APIs + +## __EMR Serverless__ + - ### Features + - This release adds support for retrieval of the optional executionIamPolicy field in the GetJobRun API response. + +# __2.31.62__ __2025-06-11__ +## __AWS Control Catalog__ + - ### Features + - Introduced ListControlMappings API that retrieves control mappings. Added control aliases and governed resources fields in GetControl and ListControls APIs. New filtering capability in ListControls API, with implementation identifiers and implementation types. + +## __AWS Network Manager__ + - ### Features + - Add support for public DNS hostname resolution to private IP addresses across Cloud WAN-managed VPCs. Add support for security group referencing across Cloud WAN-managed VPCs. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Deprecated DefaultCredentialsProvider.create() since it creates Singleton instance + - Contributed by: [@jencymaryjoseph](https://github.com/jencymaryjoseph) + +## __AWS WAFV2__ + - ### Features + - WAF now provides two DDoS protection options: resource-level monitoring for Application Load Balancers and the AWSManagedRulesAntiDDoSRuleSet managed rule group for CloudFront distributions. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Release for EKS Pod Identity Cross Account feature and disableSessionTags flag. + +## __Amazon Lex Model Building V2__ + - ### Features + - Add support for the Assisted NLU feature to improve bot performance + +## __Amazon Relational Database Service__ + - ### Features + - Updates Amazon RDS documentation for Amazon RDS for Db2 cross-Region replicas in standby mode. + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@jencymaryjoseph](https://github.com/jencymaryjoseph) +# __2.31.61__ __2025-06-10__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon GameLift Streams__ + - ### Features + - Documentation updates for Amazon GameLift Streams to address formatting errors, correct resource ID examples, and update links to other guides + +# __2.31.60__ __2025-06-09__ +## __AWS AppSync__ + - ### Features + - Deprecate `atRestEncryptionEnabled` and `transitEncryptionEnabled` attributes in `CreateApiCache` action. Encryption is always enabled for new caches. + +## __AWS Cost Explorer Service__ + - ### Features + - Support dual-stack endpoints for ce api + +## __AWS Marketplace Catalog Service__ + - ### Features + - The ListEntities API now supports the EntityID, LastModifiedDate, ProductTitle, and Visibility filters for machine learning products. You can also sort using all of those filters. + +## __AWS SDK for Java v2__ + - ### Features + - Adds support for configuring bearer auth using a token sourced from the environment for services with the `enableEnvironmentBearerToken` customization flag. + - Updated Region class generation to use Partitions.json instead of the Endpoints.json and removed the hardcoded global regions. + - Updated endpoint and partition metadata. + +## __Amazon Connect Customer Profiles__ + - ### Features + - This release introduces capability of Profile Explorer, using correct ingestion timestamp & using historical data for computing calculated attributes, and new standard objects for T&H as part of Amazon Connect Customer Profiles service. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Release to support Elastic VMware Service (Amazon EVS) Subnet and Amazon EVS Network Interface Types. + +## __Amazon Elastic File System__ + - ### Features + - Added support for Internet Protocol Version 6 (IPv6) on EFS Service APIs and mount targets. + +## __Amazon WorkSpaces Thin Client__ + - ### Features + - Add ConflictException to UpdateEnvironment API + +# __2.31.59__ __2025-06-06__ +## __AWS Key Management Service__ + - ### Features + - Remove unpopulated KeyMaterialId from Encrypt Response + +## __AWS SDK for Java v2__ + - ### Features + - Add support for protocols field in service model + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Fix expiration in past warning during profile credential loading. + +## __Agents for Amazon Bedrock Runtime__ + - ### Features + - This release introduces the `PromptCreationConfigurations` input parameter, which includes fields to control prompt population for `InvokeAgent` or `InvokeInlineAgent` requests. + +## __Amazon Rekognition__ + - ### Features + - Adds support for defining an ordered preference list of different Rekognition Face Liveness challenge types when calling CreateFaceLivenessSession. + +## __Amazon Relational Database Service__ + - ### Features + - Include Global Cluster Identifier in DBCluster if the DBCluster is a Global Cluster Member. + +## __Amazon Route 53__ + - ### Features + - Amazon Route 53 now supports the Asia Pacific (Taipei) Region (ap-east-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +## __Amazon S3 Tables__ + - ### Features + - S3 Tables now supports getting details about a table via its table ARN. + +# __2.31.58__ __2025-06-05__ +## __AWS Billing and Cost Management Pricing Calculator__ + - ### Features + - Updating the minimum for List APIs to be 1 (instead of 0) + +## __AWS CloudFormation__ + - ### Features + - Add new warning type 'EXCLUDED_PROPERTIES' + +## __AWS Key Management Service__ + - ### Features + - AWS KMS announces the support for on-demand rotation of symmetric-encryption KMS keys with imported key material (EXTERNAL origin). + +## __AWS SDK for Java v2__ + - ### Features + - Added ability to configure preferred authentication schemes when multiple auth options are available. + - Updated endpoint and partition metadata. + +## __AWS WAFV2__ + - ### Features + - AWS WAF adds support for ASN-based traffic filtering and support for ASN-based rate limiting. + +## __Amazon DynamoDB Enhanced Client__ + - ### Bugfixes + - Fixed DynamoDbEnhancedClient DefaultDynamoDbAsyncTable::createTable() to create secondary indices that are defined on annotations of the POJO class, similar to DefaultDynamoDbTable::createTable(). + +# __2.31.57__ __2025-06-04__ +## __AWS Amplify__ + - ### Features + - Update documentation for cacheConfig in CreateApp API + +## __AWS Elemental MediaConvert__ + - ### Features + - This release includes support for embedding and signing C2PA content credentials in MP4 outputs. + +## __AWS Invoicing__ + - ### Features + - Added new Invoicing ListInvoiceSummaries API Operation + +## __AWS MediaConnect__ + - ### Features + - This release updates the DescribeFlow API to show peer IP addresses. You can now identify the peer IP addresses of devices connected to your sources and outputs. This helps you to verify and troubleshoot your flow's active connections. + +## __AWS Network Firewall__ + - ### Features + - You can now monitor flow and alert log metrics from the Network Firewall console. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Elastic VMware Service__ + - ### Features + - Amazon Elastic VMware Service (Amazon EVS) allows you to run VMware Cloud Foundation (VCF) directly within your Amazon VPC including simplified self-managed migration experience with guided workflow in AWS console or via AWS CLI, get full access to their VCF deployment and VCF license portability. + +## __Amazon SageMaker Service__ + - ### Features + - Added support for p6-b200 instance type in SageMaker Training Jobs and Training Plans. + +## __Amazon Transcribe Service__ + - ### Features + - AWS Healthscribe now supports new templates for the clinical note summary: BIRP, SIRP, DAP, BEHAVIORAL_SOAP, and PHYSICAL_SOAP + +## __Amazon Transcribe Streaming Service__ + - ### Features + - AWS Healthscribe now supports new templates for the clinical note summary: BIRP, SIRP, DAP, BEHAVIORAL_SOAP, and PHYSICAL_SOAP + +## __S3 Transfer Manager__ + - ### Bugfixes + - DownloadFilter type incompatability methods overriden from extended interface + - Contributed by: [@jencymaryjoseph](https://github.com/jencymaryjoseph) + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@jencymaryjoseph](https://github.com/jencymaryjoseph) +# __2.31.56__ __2025-06-03__ +## __AWS S3 Event Notifications__ + - ### Bugfixes + - Fixed parsing of S3 event notifications to allow eventTime to be null when eventName is not + - Contributed by: [@reifiedbeans](https://github.com/reifiedbeans) + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Fix NPE in `ProfileFileSupplier.defaultSupplier` when both credentials and config files do not exist. + +## __Amazon API Gateway__ + - ### Features + - Adds support to set the routing mode for a custom domain name. + +## __AmazonApiGatewayV2__ + - ### Features + - Adds support to create routing rules and set the routing mode for a custom domain name. + +## __EMR Serverless__ + - ### Features + - AWS EMR Serverless: Adds a new option in the CancelJobRun API in EMR 7.9.0+, to cancel a job with grace period. This feature is enabled by default with a 120-second grace period for streaming jobs and is not enabled by default for batch jobs. + +## __Contributors__ +Special thanks to the following contributors to this release: + +[@reifiedbeans](https://github.com/reifiedbeans) +# __2.31.55__ __2025-06-02__ +## __AWS Backup__ + - ### Features + - You can now subscribe to Amazon SNS notifications and Amazon EventBridge events for backup indexing. You can now receive notifications when a backup index is created, deleted, or fails to create, enhancing your ability to monitor and track your backup operations. + +## __AWS Compute Optimizer__ + - ### Features + - This release enables AWS Compute Optimizer to analyze Amazon Aurora database clusters and generate Aurora I/O-Optimized recommendations. + +## __AWS EntityResolution__ + - ### Features + - Add support for generating match IDs in near real-time. + +## __AWS Parallel Computing Service__ + - ### Features + - Introduces SUSPENDING and SUSPENDED states for clusters, compute node groups, and queues. + +## __AWS SDK for Java v2__ + - ### Features + - Improve the endpoint rules performance by directly passing the needed params instead of using a POJO to keep track of them. + - Updated endpoint and partition metadata. + +## __Agents for Amazon Bedrock__ + - ### Features + - This release adds the Agent Lifecycle Paused State feature to Amazon Bedrock agents. By using an agent's alias, you can temporarily suspend agent operations during maintenance, updates, or other situations. + +## __Amazon Athena__ + - ### Features + - Add support for the managed query result in the workgroup APIs. The managed query result configuration enables users to store query results to Athena owned storage. + +## __Amazon EC2 Container Service__ + - ### Features + - Updates Amazon ECS documentation to include note for upcoming default log driver mode change. + +## __Amazon Elastic Kubernetes Service__ + - ### Features + - Add support for filtering ListInsights API calls on MISCONFIGURATION insight category + +## __Cost Optimization Hub__ + - ### Features + - Support recommendations for Aurora instance and Aurora cluster storage. + +## __Synthetics__ + - ### Features + - Support for Java runtime handler pattern. + +# __2.31.54__ __2025-05-30__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon SageMaker Service__ + - ### Features + - Release new parameter CapacityReservationConfig in ProductionVariant + +## __EMR Serverless__ + - ### Features + - This release adds the capability for users to specify an optional Execution IAM policy in the StartJobRun action. The resulting permissions assumed by the job run is the intersection of the permissions in the Execution Role and the specified Execution IAM Policy. + +# __2.31.53__ __2025-05-29__ +## __AWS Amplify__ + - ### Features + - Add support for customizable build instance sizes. CreateApp and UpdateApp operations now accept a new JobConfig parameter composed of BuildComputeType. + +## __AWS Billing and Cost Management Pricing Calculator__ + - ### Features + - Add AFTER_DISCOUNTS_AND_COMMITMENTS to Workload Estimate Rate Type. Set ListWorkLoadEstimateUsage maxResults range to minimum of 0 and maximum of 300. + +## __AWS CloudTrail__ + - ### Features + - CloudTrail Feature Release: Support for Enriched Events with Configurable Context for Event Data Store + +## __AWS Data Exchange__ + - ### Features + - This release adds Tag support for Event Action resource, through which customers can create event actions with Tags and retrieve event actions with Tags. + +## __AWS DataSync__ + - ### Features + - AgentArns field is made optional for Object Storage and Azure Blob location create requests. Location credentials are now managed via Secrets Manager, and may be encrypted with service managed or customer managed keys. Authentication is now optional for Azure Blob locations. + +## __Amazon Connect Service__ + - ### Features + - Amazon Connect Service Feature: Email Recipient Limit Increase + +## __Amazon FSx__ + - ### Features + - FSx API changes to support the public launch of new Intelligent Tiering storage class on Amazon FSx for Lustre + +## __Amazon Interactive Video Service RealTime__ + - ### Features + - IVS Real-Time now offers customers the participant replication that allow customers to copy a participant from one stage to another. + +## __Amazon SageMaker Service__ + - ### Features + - Add maintenance status field to DescribeMlflowTrackingServer API response + +## __Amazon Simple Storage Service__ + - ### Features + - Adding checksum support for S3 PutBucketOwnershipControls API. + +## __AmazonMWAA__ + - ### Features + - Amazon MWAA now lets you choose a worker replacement strategy when updating an environment. This release adds two worker replacement strategies: FORCED (default), which stops workers immediately, and GRACEFUL, which allows workers to finish current tasks before shutting down. + +## __Auto Scaling__ + - ### Features + - Add support for "apple" CpuManufacturer in ABIS + +# __2.31.52__ __2025-05-28__ +## __AWS Network Firewall__ + - ### Features + - You can now use VPC endpoint associations to create multiple firewall endpoints for a single firewall. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Enable the option to automatically delete underlying Amazon EBS snapshots when deregistering Amazon Machine Images (AMIs) + +## __Amazon EventBridge__ + - ### Features + - Allow for more than 2 characters for location codes in EventBridge ARNs + +## __Cost Optimization Hub__ + - ### Features + - This release allows customers to modify their preferred commitment term and payment options. + +## __Synthetics__ + - ### Features + - Add support to change ephemeral storage. Add a new field "TestResult" under CanaryRunStatus. + +# __2.31.51__ __2025-05-27__ +## __AWS Cost Explorer Service__ + - ### Features + - This release introduces Cost Comparison feature (GetCostAndUsageComparisons, GetCostComparisonDrivers) allowing you find cost variations across multiple dimensions and identify key drivers of spending changes. + +## __AWSDeadlineCloud__ + - ### Features + - AWS Deadline Cloud service-managed fleets now support storage profiles. With storage profiles, you can map file paths between a workstation and the worker hosts running the job. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds three features - option to store AWS Site-to-Site VPN pre-shared keys in AWS Secrets Manager, GetActiveVpnTunnelStatus API to check the in-use VPN algorithms, and SampleType option in GetVpnConnectionDeviceSampleConfiguration API to get recommended sample configs for VPN devices. + +# __2.31.50__ __2025-05-23__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Fix CompletableFuture hanging when RetryStrategy/MetricsCollector raise errors + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release adds support for the C7i-flex, M7i-flex, I7i, I7ie, I8g, P6-b200, Trn2, C8gd, M8gd and R8gd instances + +## __Netty NIO HTTP Client__ + - ### Bugfixes + - Enable Netty HTTP header validation when connecting with proxy + +## __Security Incident Response__ + - ### Features + - Update PrincipalId pattern documentation to reflect what user should receive back from the API call + +# __2.31.49__ __2025-05-22__ +## __AWS Audit Manager__ + - ### Features + - With this release, the AssessmentControl description field has been deprecated, as of May 19, 2025. Additionally, the UpdateAssessment API can now return a ServiceQuotaExceededException when applicable service quotas are exceeded. + +## __AWS Glue__ + - ### Features + - This release supports additional ConversionSpec parameter as part of IntegrationPartition Structure in CreateIntegrationTableProperty API. This parameter is referred to apply appropriate column transformation for columns that are used for timestamp based partitioning + +## __AWS SDK for Java v2__ + - ### Bugfixes + - Update non-streaming error unmarshalling to properly unmarshall exceptions to their expected types. + +## __Amazon Aurora DSQL__ + - ### Features + - Features: support for customer managed encryption keys + +## __Amazon Prometheus Service__ + - ### Features + - Add QueryLoggingConfiguration APIs for Amazon Managed Prometheus + +# __2.31.48__ __2025-05-21__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Agents for Amazon Bedrock Runtime__ + - ### Features + - Amazon Bedrock introduces asynchronous flows (in preview), which let you run flows for longer durations and yield control so that your application can perform other tasks and you don't have to actively monitor the flow's progress. + +## __Amazon CloudWatch__ + - ### Features + - Adds support for setting up Contributor Insight rules on logs transformed via Logs Transformation feature. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - Release of Dualstack and Ipv6-only EC2 Public DNS hostnames + +## __Application Auto Scaling__ + - ### Features + - Doc only update that addresses a customer reported issue. + +## __Partner Central Selling API__ + - ### Features + - Modified validation to allow expectedCustomerSpend array with zero elements in Partner Opportunity operations. + +# __2.31.47__ __2025-05-20__ +## __AWS DataSync__ + - ### Features + - Remove Discovery APIs from the DataSync service + +## __AWS Glue__ + - ### Features + - Enhanced AWS Glue ListConnectionTypes API Model with additional metadata fields. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release expands the ModifyInstanceMaintenanceOptions API to enable or disable instance migration during customer-initiated reboots for EC2 Scheduled Reboot Events. + +## __Amazon Relational Database Service__ + - ### Features + - This release introduces the new DescribeDBMajorEngineVersions API for describing the properties of specific major versions of database engines. + +## __CloudWatch Observability Access Manager__ + - ### Features + - Add IncludeTags field to GetLink, GetSink and UpdateLink API + +## __Inspector2__ + - ### Features + - This release adds GetClustersForImage API and filter updates as part of the mapping of container images to running containers feature. + +# __2.31.46__ __2025-05-19__ +## __AWS Elemental MediaPackage v2__ + - ### Features + - This release adds support for DVB-DASH, EBU-TT-D subtitle format, and non-compacted manifests for DASH in MediaPackage v2 Origin Endpoints. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Aurora DSQL__ + - ### Features + - CreateMultiRegionCluster and DeleteMultiRegionCluster APIs removed + +## __Amazon Elastic Compute Cloud__ + - ### Features + - This release includes new APIs for System Integrity Protection (SIP) configuration and automated root volume ownership delegation for EC2 Mac instances. + +# __2.31.45__ __2025-05-16__ +## __AWS CodePipeline__ + - ### Features + - CodePipeline now supports new API ListDeployActionExecutionTargets that lists the deployment target details for deploy action executions. + +## __AWS Glue__ + - ### Features + - Changes include (1) Excel as S3 Source type and XML and Tableau's Hyper as S3 Sink types, (2) targeted number of partitions parameter in S3 sinks and (3) new compression types in CSV/JSON and Parquet S3 sinks. + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Fix a regression for the JSON REST protocol for which an structure explicit payload member was set to the empty object instead of null + +## __Amazon EC2 Container Service__ + - ### Features + - This is an Amazon ECs documentation only release to support the change of the container exit "reason" field from 255 characters to 1024 characters. + +## __Amazon EMR__ + - ### Features + - Added APIs for managing Application UIs: Access Persistent (serverless) UIs via CreatePersistentAppUI DescribePersistentAppUI & GetPersistentAppUIPresignedURL, and Cluster-based UIs through GetOnClusterAppUIPresignedURL. Supports Yarn, Spark History, and TEZ interfaces. + +## __Amazon Neptune__ + - ### Features + - This release adds Global Cluster Switchover capability which enables you to change your global cluster's primary AWS Region, the region that serves writes, while preserving the replication between all regions in the global cluster. + +## __Data Automation for Amazon Bedrock__ + - ### Features + - Add support for VIDEO modality to BlueprintType enum. + +## __Runtime for Amazon Bedrock Data Automation__ + - ### Features + - Add AssetProcessingConfiguration for video segment to InputConfiguration + +## __Service Quotas__ + - ### Features + - This release introduces CreateSupportCase operation to SDK. + +# __2.31.44__ __2025-05-15__ +## __AWS CodeBuild__ + - ### Features + - AWS CodeBuild now supports Docker Server capability + +## __AWS Control Tower__ + - ### Features + - Updated the descriptions for the AWS Control Tower Baseline APIs to make them more intuitive. + +## __AWS Database Migration Service__ + - ### Features + - Introduces Data Resync feature to describe-table-statistics and IAM database authentication for MariaDB, MySQL, and PostgreSQL. + +## __AWS Parallel Computing Service__ + - ### Features + - This release adds support for Slurm accounting. For more information, see the Slurm accounting topic in the AWS PCS User Guide. Slurm accounting is supported for Slurm 24.11 and later. This release also adds 24.11 as a valid value for the version parameter of the Scheduler data type. + +## __AWS SDK for Java v2__ + - ### Features + - Small optimization for endpoint rules. Lazily compile the region pattern instead of parsing it every time. This will pay the penalty of parsing it just once at the cost of using a bit more of memory to keep the parsed pattern. + +## __Agents for Amazon Bedrock__ + - ### Features + - Amazon Bedrock Flows introduces DoWhile loops nodes, parallel node executions, and enhancements to knowledge base nodes. + +## __Amazon WorkSpaces__ + - ### Features + - Added the new AlwaysOn running mode for WorkSpaces Pools. Customers can now choose between AlwaysOn (for instant access, with hourly usage billing regardless of connection status), or AutoStop (to optimize cost, with a brief startup delay) for their pools. + +# __2.31.43__ __2025-05-14__ +## __AWS Elemental MediaConvert__ + - ### Features + - This update enables cropping for video overlays and adds a new STL to Teletext upconversion toggle to preserve styling. + +## __Amazon CloudWatch Logs__ + - ### Features + - This release adds a new API "ListLogGroups" and an improvement in API "DescribeLogGroups" + +## __Amazon Cognito Identity Provider__ + - ### Features + - Add exceptions to WebAuthn operations. + +## __Amazon Kinesis Firehose__ + - ### Features + - This release adds catalogARN support for s3 tables multi-catalog catalogARNs. + +# __2.31.42__ __2025-05-13__ +## __AWS Control Tower__ + - ### Features + - AWS Control Tower now reports the inheritance drift status for EnabledBaselines through the GetEnabledBaseline and ListEnabledBaselines APIs. You can now filter EnabledBaselines by their enablement and drift status using the ListEnabledBaselines API to view accounts and OUs that require attention. + +## __AWS License Manager__ + - ### Features + - Add Tagging feature to resources in the Managed Entitlements service. License and Grant resources can now be tagged. + +## __Agents for Amazon Bedrock Runtime__ + - ### Features + - Changes for enhanced metadata in trace + +## __Amazon Aurora DSQL__ + - ### Features + - CreateMultiRegionClusters and DeleteMultiRegionClusters APIs marked as deprecated. Introduced new multi-Region clusters creation experience through multiRegionProperties parameter in CreateCluster API. + +## __Amazon Bedrock__ + - ### Features + - Enable cross-Region inference for Amazon Bedrock Guardrails by using the crossRegionConfig parameter when calling the CreateGuardrail or UpdateGuardrail operation. + +## __Amazon EC2 Container Service__ + - ### Features + - This release extends functionality for Amazon EBS volumes attached to Amazon ECS tasks by adding support for the new EBS volumeInitializationRate parameter in ECS RunTask/StartTask/CreateService/UpdateService APIs. + +# __2.31.41__ __2025-05-12__ +## __AWS Elemental MediaLive__ + - ### Features + - Add support to the AV1 rate control mode + +## __AWS Identity and Access Management__ + - ### Features + - Updating the endpoint list for the Identity and access management (IAM) service + +## __AWS MediaTailor__ + - ### Features + - Documenting that EnabledLoggingStrategies is always present in responses of PlaybackConfiguration read operations. + +## __AWS S3 Control__ + - ### Features + - Updates to support S3 Express zonal endpoints for directory buckets in AWS CLI + +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + + - ### Bugfixes + - Add `@Mutable` and `@NotThreadSafe` to model Builders + +## __AWS Supply Chain__ + - ### Features + - Launch new AWS Supply Chain public APIs for DataIntegrationEvent, DataIntegrationFlowExecution and DatasetNamespace. Also add more capabilities to existing public APIs to support direct dataset event publish, data deduplication in DataIntegrationFlow, partition specification of custom datasets. + +## __AWSDeadlineCloud__ + - ### Features + - AWS Deadline Cloud service-managed fleets now support configuration scripts. Configuration scripts make it easy to install additional software, like plugins and packages, onto a worker. + +## __Amazon Elastic Compute Cloud__ + - ### Features + - EC2 - Adding support for AvailabilityZoneId + +## __Amazon SageMaker Service__ + - ### Features + - No API changes from previous release. This release migrated the model to Smithy keeping all features unchanged. + +# __2.31.40__ __2025-05-09__ +## __AWS SDK for Java v2__ + - ### Features + - Updated endpoint and partition metadata. + +## __Amazon Athena__ + - ### Features + - Minor API documentation updates + +## __Amazon CloudWatch Logs__ + - ### Features + - We are pleased to announce limit increases to our grok processor logs transformation feature. Now you can define 20 Grok patterns in their configurations, with an expanded total pattern matching limit of 512 characters. + +## __Amazon WorkSpaces__ + - ### Features + - Remove parameter EnableWorkDocs from WorkSpacesServiceModel due to end of support of Amazon WorkDocs service. + +## __Synthetics__ + - ### Features + - Add support to retry a canary automatically after schedule run failures. Users can enable this feature by configuring the RetryConfig field when calling the CreateCanary or UpdateCanary API. Also includes changes in GetCanary and GetCanaryRuns to support retrieving retry configurations. + # __2.31.39__ __2025-05-08__ ## __AWS CodePipeline__ - ### Features diff --git a/README.md b/README.md index b947c1f42261..138e78fe5d87 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![Maven](https://img.shields.io/maven-central/v/software.amazon.awssdk/s3.svg?label=Maven)](https://search.maven.org/search?q=g:%22software.amazon.awssdk%22%20AND%20a:%22s3%22) [![codecov](https://codecov.io/gh/aws/aws-sdk-java-v2/branch/master/graph/badge.svg)](https://codecov.io/gh/aws/aws-sdk-java-v2) -[![All Contributors](https://img.shields.io/badge/all_contributors-117-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-119-orange.svg?style=flat-square)](#contributors-) The **AWS SDK for Java 2.0** is a rewrite of 1.0 with some great new features. As with version 1.0, @@ -51,7 +51,7 @@ To automatically manage module versions (currently all modules have the same ver software.amazon.awssdk bom - 2.31.39 + 2.31.75 pom import @@ -85,12 +85,12 @@ Alternatively you can add dependencies for the specific services you use only: software.amazon.awssdk ec2 - 2.31.39 + 2.31.75 software.amazon.awssdk s3 - 2.31.39 + 2.31.75 ``` @@ -102,7 +102,7 @@ You can import the whole SDK into your project (includes *ALL* services). Please software.amazon.awssdk aws-sdk-java - 2.31.39 + 2.31.75 ``` @@ -340,6 +340,8 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d Ran Vaknin
Ran Vaknin

💻 Greg Oledzki
Greg Oledzki

💻 Kevin Stich
Kevin Stich

💻 + Jency Joseph
Jency Joseph

💻 + Drew Davis
Drew Davis

💻 diff --git a/archetypes/archetype-app-quickstart/pom.xml b/archetypes/archetype-app-quickstart/pom.xml index 801b6a8fd2fa..dd4bf1d68a1a 100644 --- a/archetypes/archetype-app-quickstart/pom.xml +++ b/archetypes/archetype-app-quickstart/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/archetypes/archetype-lambda/pom.xml b/archetypes/archetype-lambda/pom.xml index fae3cb9cc124..ef2ea74c4c5f 100644 --- a/archetypes/archetype-lambda/pom.xml +++ b/archetypes/archetype-lambda/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 archetype-lambda diff --git a/archetypes/archetype-tools/pom.xml b/archetypes/archetype-tools/pom.xml index cf95e34e9d2f..ca41b0676686 100644 --- a/archetypes/archetype-tools/pom.xml +++ b/archetypes/archetype-tools/pom.xml @@ -20,7 +20,7 @@ archetypes software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/archetypes/archetype-tools/src/main/resources/map-service-to-client-prefix b/archetypes/archetype-tools/src/main/resources/map-service-to-client-prefix index 8574844e7283..846018dd9677 100755 --- a/archetypes/archetype-tools/src/main/resources/map-service-to-client-prefix +++ b/archetypes/archetype-tools/src/main/resources/map-service-to-client-prefix @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import json import os @@ -14,8 +14,9 @@ SERVICE_DIR = os.path.join( def load_all_service_modules(): service_mapping = {} - for f in [f for f in os.listdir(SERVICE_DIR) if os.path.isdir(os.path.join(SERVICE_DIR, f)) & os.path.exists(os.path.join(SERVICE_DIR, f, 'target'))]: - for s in [s for s in os.listdir(os.path.join(SERVICE_DIR, f, 'target', 'generated-sources/sdk/software/amazon/awssdk/services', f)) if s.endswith('AsyncClient.java') & s.startswith('Default')]: + # Use logical AND (and) instead of bitwise AND (&) for boolean operations + for f in [f for f in os.listdir(SERVICE_DIR) if os.path.isdir(os.path.join(SERVICE_DIR, f)) and os.path.exists(os.path.join(SERVICE_DIR, f, 'target'))]: + for s in [s for s in os.listdir(os.path.join(SERVICE_DIR, f, 'target', 'generated-sources/sdk/software/amazon/awssdk/services', f)) if s.endswith('AsyncClient.java') and s.startswith('Default')]: service_mapping[f] = find_client_prefix(s) return service_mapping diff --git a/archetypes/pom.xml b/archetypes/pom.xml index b122efe43b16..ef7e4795d710 100644 --- a/archetypes/pom.xml +++ b/archetypes/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 archetypes diff --git a/aws-sdk-java/pom.xml b/aws-sdk-java/pom.xml index aa5ed51232ed..71038e870e86 100644 --- a/aws-sdk-java/pom.xml +++ b/aws-sdk-java/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../pom.xml aws-sdk-java @@ -1538,11 +1538,6 @@ Amazon AutoScaling, etc). licensemanagerusersubscriptions ${awsjavasdk.version} - - software.amazon.awssdk - privatenetworks - ${awsjavasdk.version} - software.amazon.awssdk supportapp @@ -2073,6 +2068,36 @@ Amazon AutoScaling, etc). ssmguiconnect ${awsjavasdk.version} + + software.amazon.awssdk + evs + ${awsjavasdk.version} + + + software.amazon.awssdk + mpa + ${awsjavasdk.version} + + + software.amazon.awssdk + aiops + ${awsjavasdk.version} + + + software.amazon.awssdk + workspacesinstances + ${awsjavasdk.version} + + + software.amazon.awssdk + keyspacesstreams + ${awsjavasdk.version} + + + software.amazon.awssdk + odb + ${awsjavasdk.version} + ${project.artifactId}-${project.version} diff --git a/bom-internal/pom.xml b/bom-internal/pom.xml index f33dc87e72ee..5db756cda2de 100644 --- a/bom-internal/pom.xml +++ b/bom-internal/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/bom/pom.xml b/bom/pom.xml index f02e527b0b6f..9219c536ef6e 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../pom.xml bom @@ -1743,11 +1743,6 @@ licensemanagerusersubscriptions ${awsjavasdk.version} - - software.amazon.awssdk - privatenetworks - ${awsjavasdk.version} - software.amazon.awssdk supportapp @@ -2273,6 +2268,36 @@ ssmguiconnect ${awsjavasdk.version} + + software.amazon.awssdk + evs + ${awsjavasdk.version} + + + software.amazon.awssdk + mpa + ${awsjavasdk.version} + + + software.amazon.awssdk + aiops + ${awsjavasdk.version} + + + software.amazon.awssdk + workspacesinstances + ${awsjavasdk.version} + + + software.amazon.awssdk + keyspacesstreams + ${awsjavasdk.version} + + + software.amazon.awssdk + odb + ${awsjavasdk.version} + diff --git a/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml b/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml index e123f641d076..7206acceb577 100644 --- a/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml +++ b/build-tools/src/main/resources/software/amazon/awssdk/spotbugs-suppressions.xml @@ -359,4 +359,8 @@ + + + + diff --git a/buildspecs/migration-test.yml b/buildspecs/migration-test.yml new file mode 100644 index 000000000000..504cf4c1d074 --- /dev/null +++ b/buildspecs/migration-test.yml @@ -0,0 +1,18 @@ +version: 0.2 + +phases: + install: + runtime-versions: + java: "$JAVA_RUNTIME" + + build: + commands: + - mvn clean install -pl :v2-migration-tests,:bom-internal -am -P quick $MAVEN_OPTIONS + - mvn install -pl :v2-migration-tests -P migration-tests -T2C $MAVEN_OPTIONS + finally: + - mkdir -p codebuild-test-reports + - find ./ -name 'TEST-*.xml' -type f -exec cp {} codebuild-test-reports/ \; +reports: + IntegTests: + files: + - 'codebuild-test-reports/**/*' diff --git a/buildspecs/release-javadoc.yml b/buildspecs/release-javadoc.yml index 3795526d25f1..ea262f492b12 100644 --- a/buildspecs/release-javadoc.yml +++ b/buildspecs/release-javadoc.yml @@ -13,12 +13,15 @@ phases: pre_build: commands: - DOC_PATH='s3://aws-java-sdk-javadoc/java/api' + - MODULES_TO_SKIP="protocol-tests,protocol-tests-core,codegen-generated-classes-test,sdk-benchmarks,s3-benchmarks,module-path-tests,test-utils,http-client-tests,tests-coverage-reporting,sdk-native-image-test,ruleset-testing-core,old-client-version-compatibility-test,crt-unavailable-tests,bundle-shading-tests,v2-migration,v2-migration-tests,architecture-tests,s3-tests" build: commands: - python ./scripts/doc_crosslinks/generate_cross_link_data.py --apiDefinitionsBasePath ./services/ --apiDefinitionsRelativeFilePath src/main/resources/codegen-resources/service-2.json --templateFilePath ./scripts/doc_crosslinks/crosslink_redirect.html --outputFilePath ./scripts/crosslink_redirect.html - mvn install -P quick -T1C - - mvn clean install javadoc:aggregate -B -Ppublic-javadoc -Dcheckstyle.skip -Dspotbugs.skip -DskipTests -Ddoclint=none -pl '!:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:s3-benchmarks,!:module-path-tests,!:test-utils,!:http-client-tests,!:tests-coverage-reporting,!:sdk-native-image-test,!:ruleset-testing-core,!:old-client-version-compatibility-test,!:crt-unavailable-tests,!:bundle-shading-tests,!:v2-migration,!:v2-migration-tests,!:architecture-tests,!:s3-tests' + # Convert comma-separated list to space-separated list with !: prefix for each module + - MODULES_TO_SKIP_FORMATTED=$(echo $MODULES_TO_SKIP | sed 's/,/,!:/g' | sed 's/^/!:/') + - mvn clean install javadoc:aggregate -B -Ppublic-javadoc -Dcheckstyle.skip -Dspotbugs.skip -DskipTests -Ddoclint=none -pl $MODULES_TO_SKIP_FORMATTED - RELEASE_VERSION=`mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec` - - aws s3 sync target/site/apidocs/ $DOC_PATH/$RELEASE_VERSION/ --acl="public-read" diff --git a/buildspecs/release-to-maven.yml b/buildspecs/release-to-maven.yml index 451f977c9a56..fafb8fae03c6 100644 --- a/buildspecs/release-to-maven.yml +++ b/buildspecs/release-to-maven.yml @@ -16,6 +16,7 @@ phases: - SDK_SIGNING_GPG_PASSPHRASE_ARN="arn:aws:secretsmanager:us-east-1:103431983078:secret:sdk-signing-gpg-passphrase-A0H1Kq" - SONATYPE_PASSWORD_ARN="arn:aws:secretsmanager:us-east-1:103431983078:secret:sonatype-password-I2V6Y0" - SONATYPE_USERNAME_ARN="arn:aws:secretsmanager:us-east-1:103431983078:secret:sonatype-username-HphNZQ" + - MODULES_TO_SKIP="protocol-tests,protocol-tests-core,codegen-generated-classes-test,sdk-benchmarks,module-path-tests,tests-coverage-reporting,stability-tests,sdk-native-image-test,auth-tests,s3-benchmarks,region-testing,old-client-version-compatibility-test,crt-unavailable-tests,bundle-shading-tests,v2-migration-tests,architecture-tests,s3-tests" build: commands: @@ -37,7 +38,10 @@ phases: awk 'BEGIN { var=ENVIRON["SDK_SIGNING_GPG_KEYNAME"] } { gsub("\\$SDK_SIGNING_GPG_KEYNAME", var, $0); print }' > \ $SETTINGS_XML - mvn clean deploy -B -s $SETTINGS_XML -Ppublishing -DperformRelease -Dspotbugs.skip -DskipTests -Dcheckstyle.skip -Djapicmp.skip -Ddoclint=none -pl !:protocol-tests,!:protocol-tests-core,!:codegen-generated-classes-test,!:sdk-benchmarks,!:module-path-tests,!:tests-coverage-reporting,!:stability-tests,!:sdk-native-image-test,!:auth-tests,!:s3-benchmarks,!:region-testing,!:old-client-version-compatibility-test,!:crt-unavailable-tests,!:bundle-shading-tests,!:v2-migration-tests,!:architecture-tests,!:s3-tests -DautoReleaseAfterClose=true -DstagingProgressTimeoutMinutes=30 -Dmaven.wagon.httpconnectionManager.ttlSeconds=120 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true + # Convert comma-separated list to space-separated list with !: prefix for each module + MODULES_TO_SKIP_FORMATTED=$(echo $MODULES_TO_SKIP | sed 's/,/,!:/g' | sed 's/^/!:/') + + mvn clean deploy -B -s $SETTINGS_XML -Ppublishing -DperformRelease -Dspotbugs.skip -DskipTests -Dcheckstyle.skip -Djapicmp.skip -Ddoclint=none -pl $MODULES_TO_SKIP_FORMATTED -DautoReleaseAfterClose=true -DstagingProgressTimeoutMinutes=30 -Dmaven.wagon.httpconnectionManager.ttlSeconds=120 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true else echo "This version was already released." fi diff --git a/buildspecs/resources/ci.cloudformation.yml b/buildspecs/resources/ci.cloudformation.yml index 02adcf3a24c4..740be418648c 100644 --- a/buildspecs/resources/ci.cloudformation.yml +++ b/buildspecs/resources/ci.cloudformation.yml @@ -57,6 +57,8 @@ Resources: - !Sub arn:aws:codebuild:${ AWS::Region }:${ AWS::AccountId }:project/aws-sdk-java-v2-native-image-test - !Sub arn:aws:codebuild:${ AWS::Region }:${ AWS::AccountId }:project/aws-sdk-java-v2-sonar - !Sub arn:aws:codebuild:${ AWS::Region }:${ AWS::AccountId }:project/aws-sdk-java-v2-endpoints-test + - !Sub arn:aws:codebuild:${ AWS::Region }:${ AWS::AccountId }:project/aws-sdk-java-v2-migration-test + - !Sub arn:aws:codebuild:${ AWS::Region }:${ AWS::AccountId }:project/aws-sdk-java-v2-s3-regression-tests - Effect: Allow Action: - logs:GetLogEvents @@ -69,6 +71,8 @@ Resources: - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/codebuild/aws-sdk-java-v2-native-image-test:* - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/codebuild/aws-sdk-java-v2-sonar:* - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/codebuild/aws-sdk-java-v2-endpoints-test:* + - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/codebuild/aws-sdk-java-v2-migration-test:* + - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/codebuild/aws-sdk-java-v2-s3-regression-tests:* GithubOidc: Type: AWS::IAM::OIDCProvider diff --git a/buildspecs/s3-regression-tests.yml b/buildspecs/s3-regression-tests.yml new file mode 100644 index 000000000000..4b7102350ce0 --- /dev/null +++ b/buildspecs/s3-regression-tests.yml @@ -0,0 +1,14 @@ +version: 0.2 + +phases: + build: + commands: + - mvn clean install -P s3-regression-tests -pl :s3-tests -am -T1C -Dregression.test=$REGRESSION_TEST $MAVEN_OPTIONS + - echo $MAVEN_OPTIONS + finally: + - mkdir -p codebuild-test-reports + - find ./ -name 'TEST-*.xml' -type f -exec cp {} codebuild-test-reports/ \; +reports: + ChecksumsTests: + files: + - 'codebuild-test-reports/**/*' diff --git a/buildspecs/validate-brazil-config.yml b/buildspecs/validate-brazil-config.yml new file mode 100644 index 000000000000..481aa791f4ab --- /dev/null +++ b/buildspecs/validate-brazil-config.yml @@ -0,0 +1,14 @@ +version: 0.2 + +phases: + install: + runtime-versions: + java: "$JAVA_RUNTIME" + python: 3.13 + + build: + commands: + - mvn clean install -P quick -T0.4C + - mvn exec:exec -Dexec.executable=pwd -pl !:aws-sdk-java-pom,!:sdk-benchmarks,!:module-path-tests -q 2>&1 > modules.txt + - mvn dependency:list -DexcludeTransitive=true -DincludeScope=runtime 2>&1 > deps.txt + - scripts/validate-brazil-config modules.txt deps.txt \ No newline at end of file diff --git a/bundle-logging-bridge/pom.xml b/bundle-logging-bridge/pom.xml index 355425911a33..0decc070a4f7 100644 --- a/bundle-logging-bridge/pom.xml +++ b/bundle-logging-bridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT bundle-logging-bridge jar diff --git a/bundle-sdk/pom.xml b/bundle-sdk/pom.xml index fd7e794886d8..a89e0182f5f2 100644 --- a/bundle-sdk/pom.xml +++ b/bundle-sdk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT bundle-sdk jar diff --git a/bundle/pom.xml b/bundle/pom.xml index 1d7f5a43ed85..b095bb7b81c4 100644 --- a/bundle/pom.xml +++ b/bundle/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT bundle jar diff --git a/codegen-lite-maven-plugin/pom.xml b/codegen-lite-maven-plugin/pom.xml index 56871379696e..c8cb7e848abe 100644 --- a/codegen-lite-maven-plugin/pom.xml +++ b/codegen-lite-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../pom.xml codegen-lite-maven-plugin diff --git a/codegen-lite-maven-plugin/src/main/java/software/amazon/awssdk/codegen/lite/maven/plugin/RegionGenerationMojo.java b/codegen-lite-maven-plugin/src/main/java/software/amazon/awssdk/codegen/lite/maven/plugin/RegionGenerationMojo.java index e02358f3a67c..10422e931450 100644 --- a/codegen-lite-maven-plugin/src/main/java/software/amazon/awssdk/codegen/lite/maven/plugin/RegionGenerationMojo.java +++ b/codegen-lite-maven-plugin/src/main/java/software/amazon/awssdk/codegen/lite/maven/plugin/RegionGenerationMojo.java @@ -29,6 +29,7 @@ import software.amazon.awssdk.codegen.lite.regions.EndpointTagGenerator; import software.amazon.awssdk.codegen.lite.regions.PartitionMetadataGenerator; import software.amazon.awssdk.codegen.lite.regions.PartitionMetadataProviderGenerator; +import software.amazon.awssdk.codegen.lite.regions.PartitionsRegionsMetadataLoader; import software.amazon.awssdk.codegen.lite.regions.RegionGenerator; import software.amazon.awssdk.codegen.lite.regions.RegionMetadataGenerator; import software.amazon.awssdk.codegen.lite.regions.RegionMetadataLoader; @@ -36,6 +37,7 @@ import software.amazon.awssdk.codegen.lite.regions.ServiceMetadataGenerator; import software.amazon.awssdk.codegen.lite.regions.ServiceMetadataProviderGenerator; import software.amazon.awssdk.codegen.lite.regions.model.Partitions; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionsRegionsMetadata; import software.amazon.awssdk.utils.StringUtils; /** @@ -59,19 +61,24 @@ public class RegionGenerationMojo extends AbstractMojo { "${basedir}/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json") private File endpoints; + @Parameter(property = "partitionsJson", defaultValue = + "${basedir}/../../codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource") + private File partitionsJson; + @Override public void execute() throws MojoExecutionException { Path baseSourcesDirectory = Paths.get(outputDirectory).resolve("generated-sources").resolve("sdk"); Path testsDirectory = Paths.get(outputDirectory).resolve("generated-test-sources").resolve("sdk-tests"); Partitions partitions = RegionMetadataLoader.build(endpoints); + PartitionsRegionsMetadata regionPartitions = PartitionsRegionsMetadataLoader.build(partitionsJson); generatePartitionMetadataClass(baseSourcesDirectory, partitions); - generateRegionClass(baseSourcesDirectory, partitions); + generateRegionClass(baseSourcesDirectory, regionPartitions); generateServiceMetadata(baseSourcesDirectory, partitions); - generateRegions(baseSourcesDirectory, partitions); + generateRegions(baseSourcesDirectory, regionPartitions); generatePartitionProvider(baseSourcesDirectory, partitions); - generateRegionProvider(baseSourcesDirectory, partitions); + generateRegionProvider(baseSourcesDirectory, regionPartitions); generateServiceProvider(baseSourcesDirectory, partitions); generateEndpointTags(baseSourcesDirectory, partitions); @@ -88,7 +95,7 @@ public void generatePartitionMetadataClass(Path baseSourcesDirectory, Partitions REGION_BASE)).generate()); } - public void generateRegionClass(Path baseSourcesDirectory, Partitions partitions) { + public void generateRegionClass(Path baseSourcesDirectory, PartitionsRegionsMetadata partitions) { Path sourcesDirectory = baseSourcesDirectory.resolve(StringUtils.replace(REGION_BASE, ".", "/")); new CodeGenerator(sourcesDirectory.toString(), new RegionGenerator(partitions, REGION_BASE)).generate(); } @@ -105,7 +112,7 @@ public void generateServiceMetadata(Path baseSourcesDirectory, Partitions partit .generate()); } - public void generateRegions(Path baseSourcesDirectory, Partitions partitions) { + public void generateRegions(Path baseSourcesDirectory, PartitionsRegionsMetadata partitions) { Path sourcesDirectory = baseSourcesDirectory.resolve(StringUtils.replace(REGION_METADATA_BASE, ".", "/")); partitions.getPartitions() .forEach(p -> p.getRegions().forEach((k, v) -> @@ -126,7 +133,7 @@ public void generatePartitionProvider(Path baseSourcesDirectory, Partitions part .generate(); } - public void generateRegionProvider(Path baseSourcesDirectory, Partitions partitions) { + public void generateRegionProvider(Path baseSourcesDirectory, PartitionsRegionsMetadata partitions) { Path sourcesDirectory = baseSourcesDirectory.resolve(StringUtils.replace(REGION_BASE, ".", "/")); new CodeGenerator(sourcesDirectory.toString(), new RegionMetadataProviderGenerator(partitions, REGION_METADATA_BASE, diff --git a/codegen-lite/pom.xml b/codegen-lite/pom.xml index 146093d0aebb..81beb659771e 100644 --- a/codegen-lite/pom.xml +++ b/codegen-lite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codegen-lite AWS Java SDK :: Code Generator Lite diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionsRegionsMetadataLoader.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionsRegionsMetadataLoader.java new file mode 100644 index 000000000000..692ee7758081 --- /dev/null +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/PartitionsRegionsMetadataLoader.java @@ -0,0 +1,47 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.lite.regions; + +import com.fasterxml.jackson.jr.ob.JSON; +import java.io.File; +import java.io.IOException; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionsRegionsMetadata; + +/** + * Loads and parses the partitions.json file into {@link PartitionsRegionsMetadata}. + */ +@SdkInternalApi +public final class PartitionsRegionsMetadataLoader { + + private PartitionsRegionsMetadataLoader() { + } + + public static PartitionsRegionsMetadata build(File path) { + return loadPartitionFromStream(path, path.toString()); + } + + private static PartitionsRegionsMetadata loadPartitionFromStream(File stream, String location) { + + try { + return JSON.std.with(JSON.Feature.USE_IS_GETTERS) + .beanFrom(PartitionsRegionsMetadata.class, stream); + + } catch (IOException | RuntimeException e) { + throw new RuntimeException("Error while loading partitions file from " + location, e); + } + } +} diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerator.java index fa5467bc847f..27421f2b6b16 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerator.java @@ -38,16 +38,16 @@ import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.codegen.lite.PoetClass; -import software.amazon.awssdk.codegen.lite.regions.model.Partitions; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionsRegionsMetadata; import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.http.SdkHttpUtils; public class RegionGenerator implements PoetClass { - private final Partitions partitions; + private final PartitionsRegionsMetadata partitions; private final String basePackage; - public RegionGenerator(Partitions partitions, + public RegionGenerator(PartitionsRegionsMetadata partitions, String basePackage) { this.partitions = partitions; this.basePackage = basePackage; @@ -100,21 +100,15 @@ private void regions(TypeSpec.Builder builder) { .add("$T.unmodifiableList($T.asList(", Collections.class, Arrays.class); String regionsCodeBlock = regions.stream().map(r -> { + boolean isGlobal = r.contains("global"); builder.addField(FieldSpec.builder(className(), regionName(r)) .addModifiers(PUBLIC, STATIC, FINAL) - .initializer("$T.of($S)", className(), r) + .initializer(isGlobal ? "$T.of($S, true)" : "$T.of($S)", className(), r) .build()); return regionName(r); }).collect(Collectors.joining(", ")); - addGlobalRegions(builder); - - regionsArray.add(regionsCodeBlock + ", ") - .add("AWS_GLOBAL, ") - .add("AWS_CN_GLOBAL, ") - .add("AWS_US_GOV_GLOBAL, ") - .add("AWS_ISO_GLOBAL, ") - .add("AWS_ISO_B_GLOBAL"); + regionsArray.add(regionsCodeBlock); regionsArray.add("))"); TypeName listOfRegions = ParameterizedTypeName.get(ClassName.get(List.class), className()); @@ -123,29 +117,6 @@ private void regions(TypeSpec.Builder builder) { .initializer(regionsArray.build()).build()); } - private void addGlobalRegions(TypeSpec.Builder builder) { - builder.addField(FieldSpec.builder(className(), "AWS_GLOBAL") - .addModifiers(PUBLIC, STATIC, FINAL) - .initializer("$T.of($S, true)", className(), "aws-global") - .build()) - .addField(FieldSpec.builder(className(), "AWS_CN_GLOBAL") - .addModifiers(PUBLIC, STATIC, FINAL) - .initializer("$T.of($S, true)", className(), "aws-cn-global") - .build()) - .addField(FieldSpec.builder(className(), "AWS_US_GOV_GLOBAL") - .addModifiers(PUBLIC, STATIC, FINAL) - .initializer("$T.of($S, true)", className(), "aws-us-gov-global") - .build()) - .addField(FieldSpec.builder(className(), "AWS_ISO_GLOBAL") - .addModifiers(PUBLIC, STATIC, FINAL) - .initializer("$T.of($S, true)", className(), "aws-iso-global") - .build()) - .addField(FieldSpec.builder(className(), "AWS_ISO_B_GLOBAL") - .addModifiers(PUBLIC, STATIC, FINAL) - .initializer("$T.of($S, true)", className(), "aws-iso-b-global") - .build()); - } - private String regionName(String region) { return region.replace("-", "_").toUpperCase(Locale.US); } diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataGenerator.java index 92011139d2c6..b71e7c2bb671 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataGenerator.java @@ -32,17 +32,17 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.codegen.lite.PoetClass; import software.amazon.awssdk.codegen.lite.Utils; -import software.amazon.awssdk.codegen.lite.regions.model.Partition; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionRegionsMetadata; public class RegionMetadataGenerator implements PoetClass { - private final Partition partition; + private final PartitionRegionsMetadata partition; private final String region; private final String regionDescription; private final String basePackage; private final String regionBasePackage; - public RegionMetadataGenerator(Partition partition, + public RegionMetadataGenerator(PartitionRegionsMetadata partition, String region, String regionDescription, String basePackage, @@ -65,9 +65,9 @@ public TypeSpec poetClass() { .addModifiers(FINAL) .addSuperinterface(ClassName.get(regionBasePackage, "RegionMetadata")) .addField(staticFinalField("ID", region)) - .addField(staticFinalField("DOMAIN", partition.getDnsSuffix())) + .addField(staticFinalField("DOMAIN", partition.getOutputs().getDnsSuffix())) .addField(staticFinalField("DESCRIPTION", regionDescription)) - .addField(staticFinalField("PARTITION_ID", partition.getPartition())) + .addField(staticFinalField("PARTITION_ID", partition.getId())) .addMethod(getter("id", "ID")) .addMethod(getter("domain", "DOMAIN")) .addMethod(getter("description", "DESCRIPTION")) diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataProviderGenerator.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataProviderGenerator.java index 0203bbbfb649..fdd16ab73eec 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataProviderGenerator.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/RegionMetadataProviderGenerator.java @@ -36,16 +36,16 @@ import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.codegen.lite.PoetClass; import software.amazon.awssdk.codegen.lite.Utils; -import software.amazon.awssdk.codegen.lite.regions.model.Partitions; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionsRegionsMetadata; import software.amazon.awssdk.utils.ImmutableMap; public class RegionMetadataProviderGenerator implements PoetClass { - private final Partitions partitions; + private final PartitionsRegionsMetadata partitions; private final String basePackage; private final String regionBasePackage; - public RegionMetadataProviderGenerator(Partitions partitions, + public RegionMetadataProviderGenerator(PartitionsRegionsMetadata partitions, String basePackage, String regionBasePackage) { this.partitions = partitions; @@ -79,7 +79,7 @@ public ClassName className() { return ClassName.get(regionBasePackage, "GeneratedRegionMetadataProvider"); } - private CodeBlock regions(Partitions partitions) { + private CodeBlock regions(PartitionsRegionsMetadata partitions) { CodeBlock.Builder builder = CodeBlock.builder().add("$T.builder()", ImmutableMap.class); partitions.getPartitions() diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partition.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partition.java index e225d47bfd82..f8b030f64edb 100644 --- a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partition.java +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/Partition.java @@ -67,9 +67,9 @@ public Partition() { public Partition(@JsonProperty(value = "partition") String partition, @JsonProperty(value = "regions") Map - regions, + regions, @JsonProperty(value = "services") Map services) { + Service> services) { this.partition = Validate.paramNotNull(partition, "Partition"); this.regions = regions; this.services = services; @@ -186,4 +186,4 @@ private boolean hasServiceEndpoint(String endpoint) { } return false; } -} +} \ No newline at end of file diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionRegionsMetadata.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionRegionsMetadata.java new file mode 100644 index 000000000000..f780e766f71e --- /dev/null +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionRegionsMetadata.java @@ -0,0 +1,177 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.lite.regions.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * This class models a single partition from partitions.json. + */ +@SdkInternalApi +public final class PartitionRegionsMetadata { + private String id; + private PartitionOutputs outputs; + private String regionRegex; + private Map regions; + + public PartitionRegionsMetadata() { + } + + public PartitionRegionsMetadata(@JsonProperty(value = "id") String id, + @JsonProperty(value = "outputs") PartitionOutputs outputs, + @JsonProperty(value = "regionRegex") String regionRegex, + @JsonProperty(value = "regions") Map regions) { + this.id = id; + this.outputs = outputs; + this.regionRegex = regionRegex; + this.regions = regions; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public PartitionOutputs getOutputs() { + return outputs; + } + + public void setOutputs(PartitionOutputs outputs) { + this.outputs = outputs; + } + + public String getRegionRegex() { + return regionRegex; + } + + public void setRegionRegex(String regionRegex) { + this.regionRegex = regionRegex; + } + + public Map getRegions() { + return regions; + } + + public void setRegions(Map regions) { + this.regions = regions; + } + + /** + * This class models the outputs field of a partition in partitions.json. + */ + @SdkInternalApi + public static final class PartitionOutputs { + private String dnsSuffix; + private String dualStackDnsSuffix; + private String implicitGlobalRegion; + private String name; + private boolean supportsDualStack; + private boolean supportsFIPS; + + public PartitionOutputs() { + } + + public PartitionOutputs(@JsonProperty(value = "dnsSuffix") String dnsSuffix, + @JsonProperty(value = "dualStackDnsSuffix") String dualStackDnsSuffix, + @JsonProperty(value = "implicitGlobalRegion") String implicitGlobalRegion, + @JsonProperty(value = "name") String name, + @JsonProperty(value = "supportsDualStack") boolean supportsDualStack, + @JsonProperty(value = "supportsFIPS") boolean supportsFIPS) { + this.dnsSuffix = dnsSuffix; + this.dualStackDnsSuffix = dualStackDnsSuffix; + this.implicitGlobalRegion = implicitGlobalRegion; + this.name = name; + this.supportsDualStack = supportsDualStack; + this.supportsFIPS = supportsFIPS; + } + + public String getDnsSuffix() { + return dnsSuffix; + } + + public void setDnsSuffix(String dnsSuffix) { + this.dnsSuffix = dnsSuffix; + } + + public String getDualStackDnsSuffix() { + return dualStackDnsSuffix; + } + + public void setDualStackDnsSuffix(String dualStackDnsSuffix) { + this.dualStackDnsSuffix = dualStackDnsSuffix; + } + + public String getImplicitGlobalRegion() { + return implicitGlobalRegion; + } + + public void setImplicitGlobalRegion(String implicitGlobalRegion) { + this.implicitGlobalRegion = implicitGlobalRegion; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public boolean isSupportsDualStack() { + return supportsDualStack; + } + + public void setSupportsDualStack(boolean supportsDualStack) { + this.supportsDualStack = supportsDualStack; + } + + public boolean isSupportsFIPS() { + return supportsFIPS; + } + + public void setSupportsFIPS(boolean supportsFIPS) { + this.supportsFIPS = supportsFIPS; + } + } + + /** + * This class models a region in partitions.json. + */ + @SdkInternalApi + public static final class RegionMetadata { + private String description; + + public RegionMetadata() { + } + + public RegionMetadata(@JsonProperty(value = "description") String description) { + this.description = description; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + } +} diff --git a/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionsRegionsMetadata.java b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionsRegionsMetadata.java new file mode 100644 index 000000000000..7c5092e41c66 --- /dev/null +++ b/codegen-lite/src/main/java/software/amazon/awssdk/codegen/lite/regions/model/PartitionsRegionsMetadata.java @@ -0,0 +1,55 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.lite.regions.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.List; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.Validate; + +/** + * This class models the AWS partitions metadata from partitions.json. + */ +@SdkInternalApi +public final class PartitionsRegionsMetadata { + private List partitions; + private String version; + + public PartitionsRegionsMetadata() { + } + + public PartitionsRegionsMetadata(@JsonProperty(value = "partitions") List partitions, + @JsonProperty(value = "version") String version) { + this.partitions = Validate.paramNotNull(partitions, "partitions"); + this.version = Validate.paramNotNull(version, "version"); + } + + public List getPartitions() { + return partitions; + } + + public void setPartitions(List partitions) { + this.partitions = partitions; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } +} diff --git a/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerationTest.java b/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerationTest.java index 0240958a17d9..2ace744a0392 100644 --- a/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerationTest.java +++ b/codegen-lite/src/test/java/software/amazon/awssdk/codegen/lite/regions/RegionGenerationTest.java @@ -21,35 +21,42 @@ import java.nio.file.Paths; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import software.amazon.awssdk.codegen.lite.regions.model.Partition; import software.amazon.awssdk.codegen.lite.regions.model.Partitions; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionsRegionsMetadata; +import software.amazon.awssdk.codegen.lite.regions.model.PartitionRegionsMetadata; public class RegionGenerationTest { private static final String ENDPOINTS = "/software/amazon/awssdk/codegen/lite/test-endpoints.json"; + private static final String PARTITIONS = "/software/amazon/awssdk/codegen/lite/test-partitions.json.resource"; private static final String SERVICE_METADATA_BASE = "software.amazon.awssdk.regions.servicemetadata"; private static final String REGION_METADATA_BASE = "software.amazon.awssdk.regions.regionmetadata"; private static final String PARTITION_METADATA_BASE = "software.amazon.awssdk.regions.partitionmetadata"; private static final String REGION_BASE = "software.amazon.awssdk.regions"; private File endpoints; + private File partitionsFile; private Partitions partitions; + private PartitionsRegionsMetadata partitionsRegions; + @BeforeEach public void before() throws Exception { this.endpoints = Paths.get(getClass().getResource(ENDPOINTS).toURI()).toFile(); + this.partitionsFile = Paths.get(getClass().getResource(PARTITIONS).toURI()).toFile(); this.partitions = RegionMetadataLoader.build(endpoints); + this.partitionsRegions = PartitionsRegionsMetadataLoader.build(partitionsFile); } @Test public void regionClass() { - RegionGenerator regions = new RegionGenerator(partitions, REGION_BASE); + RegionGenerator regions = new RegionGenerator(partitionsRegions, REGION_BASE); assertThat(regions, generatesTo("regions.java")); } @Test public void regionMetadataClass() { - Partition partition = partitions.getPartitions().get(0); + PartitionRegionsMetadata partition = partitionsRegions.getPartitions().get(0); RegionMetadataGenerator metadataGenerator = new RegionMetadataGenerator(partition, "us-east-1", "US East (N. Virginia)", @@ -61,7 +68,7 @@ public void regionMetadataClass() { @Test public void regionMetadataProviderClass() { - RegionMetadataProviderGenerator providerGenerator = new RegionMetadataProviderGenerator(partitions, + RegionMetadataProviderGenerator providerGenerator = new RegionMetadataProviderGenerator(partitionsRegions, REGION_METADATA_BASE, REGION_BASE); assertThat(providerGenerator, generatesTo("region-metadata-provider.java")); diff --git a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/region-metadata-provider.java b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/region-metadata-provider.java index 9380df896357..798644de1fef 100644 --- a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/region-metadata-provider.java +++ b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/region-metadata-provider.java @@ -9,18 +9,37 @@ import software.amazon.awssdk.regions.regionmetadata.ApNortheast2; import software.amazon.awssdk.regions.regionmetadata.ApNortheast3; import software.amazon.awssdk.regions.regionmetadata.ApSouth1; +import software.amazon.awssdk.regions.regionmetadata.ApSouth2; import software.amazon.awssdk.regions.regionmetadata.ApSoutheast1; import software.amazon.awssdk.regions.regionmetadata.ApSoutheast2; +import software.amazon.awssdk.regions.regionmetadata.ApSoutheast3; +import software.amazon.awssdk.regions.regionmetadata.ApSoutheast4; +import software.amazon.awssdk.regions.regionmetadata.ApSoutheast5; +import software.amazon.awssdk.regions.regionmetadata.ApSoutheast7; +import software.amazon.awssdk.regions.regionmetadata.AwsCnGlobal; +import software.amazon.awssdk.regions.regionmetadata.AwsGlobal; +import software.amazon.awssdk.regions.regionmetadata.AwsIsoBGlobal; +import software.amazon.awssdk.regions.regionmetadata.AwsIsoFGlobal; +import software.amazon.awssdk.regions.regionmetadata.AwsIsoGlobal; +import software.amazon.awssdk.regions.regionmetadata.AwsUsGovGlobal; import software.amazon.awssdk.regions.regionmetadata.CaCentral1; +import software.amazon.awssdk.regions.regionmetadata.CaWest1; import software.amazon.awssdk.regions.regionmetadata.CnNorth1; import software.amazon.awssdk.regions.regionmetadata.CnNorthwest1; import software.amazon.awssdk.regions.regionmetadata.EuCentral1; +import software.amazon.awssdk.regions.regionmetadata.EuCentral2; +import software.amazon.awssdk.regions.regionmetadata.EuIsoeWest1; import software.amazon.awssdk.regions.regionmetadata.EuNorth1; import software.amazon.awssdk.regions.regionmetadata.EuSouth1; +import software.amazon.awssdk.regions.regionmetadata.EuSouth2; import software.amazon.awssdk.regions.regionmetadata.EuWest1; import software.amazon.awssdk.regions.regionmetadata.EuWest2; import software.amazon.awssdk.regions.regionmetadata.EuWest3; +import software.amazon.awssdk.regions.regionmetadata.EuscDeEast1; +import software.amazon.awssdk.regions.regionmetadata.IlCentral1; +import software.amazon.awssdk.regions.regionmetadata.MeCentral1; import software.amazon.awssdk.regions.regionmetadata.MeSouth1; +import software.amazon.awssdk.regions.regionmetadata.MxCentral1; import software.amazon.awssdk.regions.regionmetadata.SaEast1; import software.amazon.awssdk.regions.regionmetadata.UsEast1; import software.amazon.awssdk.regions.regionmetadata.UsEast2; @@ -29,6 +48,8 @@ import software.amazon.awssdk.regions.regionmetadata.UsIsoEast1; import software.amazon.awssdk.regions.regionmetadata.UsIsoWest1; import software.amazon.awssdk.regions.regionmetadata.UsIsobEast1; +import software.amazon.awssdk.regions.regionmetadata.UsIsofEast1; +import software.amazon.awssdk.regions.regionmetadata.UsIsofSouth1; import software.amazon.awssdk.regions.regionmetadata.UsWest1; import software.amazon.awssdk.regions.regionmetadata.UsWest2; import software.amazon.awssdk.utils.ImmutableMap; @@ -40,15 +61,26 @@ public final class GeneratedRegionMetadataProvider implements RegionMetadataProv .put(Region.AF_SOUTH_1, new AfSouth1()).put(Region.AP_EAST_1, new ApEast1()) .put(Region.AP_NORTHEAST_1, new ApNortheast1()).put(Region.AP_NORTHEAST_2, new ApNortheast2()) .put(Region.AP_NORTHEAST_3, new ApNortheast3()).put(Region.AP_SOUTH_1, new ApSouth1()) - .put(Region.AP_SOUTHEAST_1, new ApSoutheast1()).put(Region.AP_SOUTHEAST_2, new ApSoutheast2()) - .put(Region.CA_CENTRAL_1, new CaCentral1()).put(Region.EU_CENTRAL_1, new EuCentral1()) - .put(Region.EU_NORTH_1, new EuNorth1()).put(Region.EU_SOUTH_1, new EuSouth1()).put(Region.EU_WEST_1, new EuWest1()) - .put(Region.EU_WEST_2, new EuWest2()).put(Region.EU_WEST_3, new EuWest3()).put(Region.ME_SOUTH_1, new MeSouth1()) + .put(Region.AP_SOUTH_2, new ApSouth2()).put(Region.AP_SOUTHEAST_1, new ApSoutheast1()) + .put(Region.AP_SOUTHEAST_2, new ApSoutheast2()).put(Region.AP_SOUTHEAST_3, new ApSoutheast3()) + .put(Region.AP_SOUTHEAST_4, new ApSoutheast4()).put(Region.AP_SOUTHEAST_5, new ApSoutheast5()) + .put(Region.AP_SOUTHEAST_7, new ApSoutheast7()).put(Region.AWS_GLOBAL, new AwsGlobal()) + .put(Region.CA_CENTRAL_1, new CaCentral1()).put(Region.CA_WEST_1, new CaWest1()) + .put(Region.EU_CENTRAL_1, new EuCentral1()).put(Region.EU_CENTRAL_2, new EuCentral2()) + .put(Region.EU_NORTH_1, new EuNorth1()).put(Region.EU_SOUTH_1, new EuSouth1()).put(Region.EU_SOUTH_2, new EuSouth2()) + .put(Region.EU_WEST_1, new EuWest1()).put(Region.EU_WEST_2, new EuWest2()).put(Region.EU_WEST_3, new EuWest3()) + .put(Region.IL_CENTRAL_1, new IlCentral1()).put(Region.ME_CENTRAL_1, new MeCentral1()) + .put(Region.ME_SOUTH_1, new MeSouth1()).put(Region.MX_CENTRAL_1, new MxCentral1()) .put(Region.SA_EAST_1, new SaEast1()).put(Region.US_EAST_1, new UsEast1()).put(Region.US_EAST_2, new UsEast2()) - .put(Region.US_WEST_1, new UsWest1()).put(Region.US_WEST_2, new UsWest2()).put(Region.CN_NORTH_1, new CnNorth1()) - .put(Region.CN_NORTHWEST_1, new CnNorthwest1()).put(Region.US_GOV_EAST_1, new UsGovEast1()) - .put(Region.US_GOV_WEST_1, new UsGovWest1()).put(Region.US_ISO_EAST_1, new UsIsoEast1()) - .put(Region.US_ISO_WEST_1, new UsIsoWest1()).put(Region.US_ISOB_EAST_1, new UsIsobEast1()).build(); + .put(Region.US_WEST_1, new UsWest1()).put(Region.US_WEST_2, new UsWest2()) + .put(Region.AWS_CN_GLOBAL, new AwsCnGlobal()).put(Region.CN_NORTH_1, new CnNorth1()) + .put(Region.CN_NORTHWEST_1, new CnNorthwest1()).put(Region.AWS_US_GOV_GLOBAL, new AwsUsGovGlobal()) + .put(Region.US_GOV_EAST_1, new UsGovEast1()).put(Region.US_GOV_WEST_1, new UsGovWest1()) + .put(Region.AWS_ISO_GLOBAL, new AwsIsoGlobal()).put(Region.US_ISO_EAST_1, new UsIsoEast1()) + .put(Region.US_ISO_WEST_1, new UsIsoWest1()).put(Region.AWS_ISO_B_GLOBAL, new AwsIsoBGlobal()) + .put(Region.US_ISOB_EAST_1, new UsIsobEast1()).put(Region.EU_ISOE_WEST_1, new EuIsoeWest1()) + .put(Region.AWS_ISO_F_GLOBAL, new AwsIsoFGlobal()).put(Region.US_ISOF_EAST_1, new UsIsofEast1()) + .put(Region.US_ISOF_SOUTH_1, new UsIsofSouth1()).put(Region.EUSC_DE_EAST_1, new EuscDeEast1()).build(); public RegionMetadata regionMetadata(Region region) { return REGION_METADATA.get(region); diff --git a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/regions.java b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/regions.java index e007c4fbaf79..9935ffd3f4d9 100644 --- a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/regions.java +++ b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/regions/regions.java @@ -36,17 +36,11 @@ @SdkPublicApi @Generated("software.amazon.awssdk:codegen") public final class Region { - public static final Region AP_SOUTH_1 = Region.of("ap-south-1"); - - public static final Region EU_SOUTH_1 = Region.of("eu-south-1"); - - public static final Region US_GOV_EAST_1 = Region.of("us-gov-east-1"); + public static final Region ME_CENTRAL_1 = Region.of("me-central-1"); - public static final Region CA_CENTRAL_1 = Region.of("ca-central-1"); + public static final Region AWS_CN_GLOBAL = Region.of("aws-cn-global", true); - public static final Region EU_CENTRAL_1 = Region.of("eu-central-1"); - - public static final Region US_ISO_WEST_1 = Region.of("us-iso-west-1"); + public static final Region US_ISOF_SOUTH_1 = Region.of("us-isof-south-1"); public static final Region US_WEST_1 = Region.of("us-west-1"); @@ -54,14 +48,6 @@ public final class Region { public static final Region AF_SOUTH_1 = Region.of("af-south-1"); - public static final Region EU_NORTH_1 = Region.of("eu-north-1"); - - public static final Region EU_WEST_3 = Region.of("eu-west-3"); - - public static final Region EU_WEST_2 = Region.of("eu-west-2"); - - public static final Region EU_WEST_1 = Region.of("eu-west-1"); - public static final Region AP_NORTHEAST_3 = Region.of("ap-northeast-3"); public static final Region AP_NORTHEAST_2 = Region.of("ap-northeast-2"); @@ -72,41 +58,89 @@ public final class Region { public static final Region SA_EAST_1 = Region.of("sa-east-1"); - public static final Region AP_EAST_1 = Region.of("ap-east-1"); - public static final Region CN_NORTH_1 = Region.of("cn-north-1"); - public static final Region US_GOV_WEST_1 = Region.of("us-gov-west-1"); - public static final Region AP_SOUTHEAST_1 = Region.of("ap-southeast-1"); public static final Region AP_SOUTHEAST_2 = Region.of("ap-southeast-2"); - public static final Region US_ISO_EAST_1 = Region.of("us-iso-east-1"); + public static final Region AP_SOUTHEAST_3 = Region.of("ap-southeast-3"); + + public static final Region AP_SOUTHEAST_4 = Region.of("ap-southeast-4"); + + public static final Region AP_SOUTHEAST_5 = Region.of("ap-southeast-5"); public static final Region US_EAST_1 = Region.of("us-east-1"); public static final Region US_EAST_2 = Region.of("us-east-2"); + public static final Region AP_SOUTHEAST_7 = Region.of("ap-southeast-7"); + public static final Region CN_NORTHWEST_1 = Region.of("cn-northwest-1"); - public static final Region US_ISOB_EAST_1 = Region.of("us-isob-east-1"); + public static final Region AP_SOUTH_2 = Region.of("ap-south-2"); + + public static final Region AP_SOUTH_1 = Region.of("ap-south-1"); + + public static final Region EU_SOUTH_1 = Region.of("eu-south-1"); + + public static final Region EU_SOUTH_2 = Region.of("eu-south-2"); + + public static final Region US_GOV_EAST_1 = Region.of("us-gov-east-1"); + + public static final Region IL_CENTRAL_1 = Region.of("il-central-1"); + + public static final Region CA_CENTRAL_1 = Region.of("ca-central-1"); + + public static final Region MX_CENTRAL_1 = Region.of("mx-central-1"); + + public static final Region EU_CENTRAL_1 = Region.of("eu-central-1"); + + public static final Region US_ISO_WEST_1 = Region.of("us-iso-west-1"); + + public static final Region EUSC_DE_EAST_1 = Region.of("eusc-de-east-1"); + + public static final Region EU_CENTRAL_2 = Region.of("eu-central-2"); + + public static final Region EU_ISOE_WEST_1 = Region.of("eu-isoe-west-1"); public static final Region AWS_GLOBAL = Region.of("aws-global", true); - public static final Region AWS_CN_GLOBAL = Region.of("aws-cn-global", true); + public static final Region EU_NORTH_1 = Region.of("eu-north-1"); - public static final Region AWS_US_GOV_GLOBAL = Region.of("aws-us-gov-global", true); + public static final Region EU_WEST_3 = Region.of("eu-west-3"); + + public static final Region EU_WEST_2 = Region.of("eu-west-2"); + + public static final Region EU_WEST_1 = Region.of("eu-west-1"); public static final Region AWS_ISO_GLOBAL = Region.of("aws-iso-global", true); + public static final Region AP_EAST_1 = Region.of("ap-east-1"); + + public static final Region CA_WEST_1 = Region.of("ca-west-1"); + + public static final Region US_GOV_WEST_1 = Region.of("us-gov-west-1"); + + public static final Region US_ISO_EAST_1 = Region.of("us-iso-east-1"); + public static final Region AWS_ISO_B_GLOBAL = Region.of("aws-iso-b-global", true); - private static final List REGIONS = Collections.unmodifiableList(Arrays.asList(AP_SOUTH_1, EU_SOUTH_1, US_GOV_EAST_1, - CA_CENTRAL_1, EU_CENTRAL_1, US_ISO_WEST_1, US_WEST_1, US_WEST_2, AF_SOUTH_1, EU_NORTH_1, EU_WEST_3, EU_WEST_2, - EU_WEST_1, AP_NORTHEAST_3, AP_NORTHEAST_2, AP_NORTHEAST_1, ME_SOUTH_1, SA_EAST_1, AP_EAST_1, CN_NORTH_1, - US_GOV_WEST_1, AP_SOUTHEAST_1, AP_SOUTHEAST_2, US_ISO_EAST_1, US_EAST_1, US_EAST_2, CN_NORTHWEST_1, US_ISOB_EAST_1, - AWS_GLOBAL, AWS_CN_GLOBAL, AWS_US_GOV_GLOBAL, AWS_ISO_GLOBAL, AWS_ISO_B_GLOBAL)); + public static final Region AWS_ISO_F_GLOBAL = Region.of("aws-iso-f-global", true); + + public static final Region AWS_US_GOV_GLOBAL = Region.of("aws-us-gov-global", true); + + public static final Region US_ISOB_EAST_1 = Region.of("us-isob-east-1"); + + public static final Region US_ISOF_EAST_1 = Region.of("us-isof-east-1"); + + private static final List REGIONS = Collections.unmodifiableList(Arrays.asList(ME_CENTRAL_1, AWS_CN_GLOBAL, + US_ISOF_SOUTH_1, US_WEST_1, US_WEST_2, AF_SOUTH_1, AP_NORTHEAST_3, AP_NORTHEAST_2, AP_NORTHEAST_1, ME_SOUTH_1, + SA_EAST_1, CN_NORTH_1, AP_SOUTHEAST_1, AP_SOUTHEAST_2, AP_SOUTHEAST_3, AP_SOUTHEAST_4, AP_SOUTHEAST_5, US_EAST_1, + US_EAST_2, AP_SOUTHEAST_7, CN_NORTHWEST_1, AP_SOUTH_2, AP_SOUTH_1, EU_SOUTH_1, EU_SOUTH_2, US_GOV_EAST_1, + IL_CENTRAL_1, CA_CENTRAL_1, MX_CENTRAL_1, EU_CENTRAL_1, US_ISO_WEST_1, EUSC_DE_EAST_1, EU_CENTRAL_2, EU_ISOE_WEST_1, + AWS_GLOBAL, EU_NORTH_1, EU_WEST_3, EU_WEST_2, EU_WEST_1, AWS_ISO_GLOBAL, AP_EAST_1, CA_WEST_1, US_GOV_WEST_1, + US_ISO_EAST_1, AWS_ISO_B_GLOBAL, AWS_ISO_F_GLOBAL, AWS_US_GOV_GLOBAL, US_ISOB_EAST_1, US_ISOF_EAST_1)); private final boolean isGlobalRegion; diff --git a/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/test-partitions.json.resource b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/test-partitions.json.resource new file mode 100644 index 000000000000..3d7e9530924e --- /dev/null +++ b/codegen-lite/src/test/resources/software/amazon/awssdk/codegen/lite/test-partitions.json.resource @@ -0,0 +1,255 @@ +{ + "partitions" : [ { + "id" : "aws", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-east-1", + "name" : "aws", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$", + "regions" : { + "af-south-1" : { + "description" : "Africa (Cape Town)" + }, + "ap-east-1" : { + "description" : "Asia Pacific (Hong Kong)" + }, + "ap-northeast-1" : { + "description" : "Asia Pacific (Tokyo)" + }, + "ap-northeast-2" : { + "description" : "Asia Pacific (Seoul)" + }, + "ap-northeast-3" : { + "description" : "Asia Pacific (Osaka)" + }, + "ap-south-1" : { + "description" : "Asia Pacific (Mumbai)" + }, + "ap-south-2" : { + "description" : "Asia Pacific (Hyderabad)" + }, + "ap-southeast-1" : { + "description" : "Asia Pacific (Singapore)" + }, + "ap-southeast-2" : { + "description" : "Asia Pacific (Sydney)" + }, + "ap-southeast-3" : { + "description" : "Asia Pacific (Jakarta)" + }, + "ap-southeast-4" : { + "description" : "Asia Pacific (Melbourne)" + }, + "ap-southeast-5" : { + "description" : "Asia Pacific (Malaysia)" + }, + "ap-southeast-7" : { + "description" : "Asia Pacific (Thailand)" + }, + "aws-global" : { + "description" : "AWS Standard global region" + }, + "ca-central-1" : { + "description" : "Canada (Central)" + }, + "ca-west-1" : { + "description" : "Canada West (Calgary)" + }, + "eu-central-1" : { + "description" : "Europe (Frankfurt)" + }, + "eu-central-2" : { + "description" : "Europe (Zurich)" + }, + "eu-north-1" : { + "description" : "Europe (Stockholm)" + }, + "eu-south-1" : { + "description" : "Europe (Milan)" + }, + "eu-south-2" : { + "description" : "Europe (Spain)" + }, + "eu-west-1" : { + "description" : "Europe (Ireland)" + }, + "eu-west-2" : { + "description" : "Europe (London)" + }, + "eu-west-3" : { + "description" : "Europe (Paris)" + }, + "il-central-1" : { + "description" : "Israel (Tel Aviv)" + }, + "me-central-1" : { + "description" : "Middle East (UAE)" + }, + "me-south-1" : { + "description" : "Middle East (Bahrain)" + }, + "mx-central-1" : { + "description" : "Mexico (Central)" + }, + "sa-east-1" : { + "description" : "South America (Sao Paulo)" + }, + "us-east-1" : { + "description" : "US East (N. Virginia)" + }, + "us-east-2" : { + "description" : "US East (Ohio)" + }, + "us-west-1" : { + "description" : "US West (N. California)" + }, + "us-west-2" : { + "description" : "US West (Oregon)" + } + } + }, { + "id" : "aws-cn", + "outputs" : { + "dnsSuffix" : "amazonaws.com.cn", + "dualStackDnsSuffix" : "api.amazonwebservices.com.cn", + "implicitGlobalRegion" : "cn-northwest-1", + "name" : "aws-cn", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^cn\\-\\w+\\-\\d+$", + "regions" : { + "aws-cn-global" : { + "description" : "AWS China global region" + }, + "cn-north-1" : { + "description" : "China (Beijing)" + }, + "cn-northwest-1" : { + "description" : "China (Ningxia)" + } + } + }, { + "id" : "aws-us-gov", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-gov-west-1", + "name" : "aws-us-gov", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regions" : { + "aws-us-gov-global" : { + "description" : "AWS GovCloud (US) global region" + }, + "us-gov-east-1" : { + "description" : "AWS GovCloud (US-East)" + }, + "us-gov-west-1" : { + "description" : "AWS GovCloud (US-West)" + } + } + }, { + "id" : "aws-iso", + "outputs" : { + "dnsSuffix" : "c2s.ic.gov", + "dualStackDnsSuffix" : "c2s.ic.gov", + "implicitGlobalRegion" : "us-iso-east-1", + "name" : "aws-iso", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-global" : { + "description" : "AWS ISO (US) global region" + }, + "us-iso-east-1" : { + "description" : "US ISO East" + }, + "us-iso-west-1" : { + "description" : "US ISO WEST" + } + } + }, { + "id" : "aws-iso-b", + "outputs" : { + "dnsSuffix" : "sc2s.sgov.gov", + "dualStackDnsSuffix" : "sc2s.sgov.gov", + "implicitGlobalRegion" : "us-isob-east-1", + "name" : "aws-iso-b", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-b-global" : { + "description" : "AWS ISOB (US) global region" + }, + "us-isob-east-1" : { + "description" : "US ISOB East (Ohio)" + } + } + }, { + "id" : "aws-iso-e", + "outputs" : { + "dnsSuffix" : "cloud.adc-e.uk", + "dualStackDnsSuffix" : "cloud.adc-e.uk", + "implicitGlobalRegion" : "eu-isoe-west-1", + "name" : "aws-iso-e", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", + "regions" : { + "eu-isoe-west-1" : { + "description" : "EU ISOE West" + } + } + }, { + "id" : "aws-iso-f", + "outputs" : { + "dnsSuffix" : "csp.hci.ic.gov", + "dualStackDnsSuffix" : "csp.hci.ic.gov", + "implicitGlobalRegion" : "us-isof-south-1", + "name" : "aws-iso-f", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-f-global" : { + "description" : "AWS ISOF global region" + }, + "us-isof-east-1" : { + "description" : "US ISOF EAST" + }, + "us-isof-south-1" : { + "description" : "US ISOF SOUTH" + } + } + }, { + "id" : "aws-eusc", + "outputs" : { + "dnsSuffix" : "amazonaws.eu", + "dualStackDnsSuffix" : "amazonaws.eu", + "implicitGlobalRegion" : "eusc-de-east-1", + "name" : "aws-eusc", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", + "regions" : { + "eusc-de-east-1" : { + "description" : "EU (Germany)" + } + } + } ], + "version" : "1.1" +} \ No newline at end of file diff --git a/codegen-maven-plugin/pom.xml b/codegen-maven-plugin/pom.xml index a9fcbe256f50..64fa80df5b50 100644 --- a/codegen-maven-plugin/pom.xml +++ b/codegen-maven-plugin/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../pom.xml codegen-maven-plugin @@ -57,6 +57,11 @@ software.amazon.awssdk ${awsjavasdk.version} + + utils + software.amazon.awssdk + ${awsjavasdk.version} + org.junit.jupiter junit-jupiter diff --git a/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java b/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java index 4ce4e7be116b..3d17b4d84bb2 100644 --- a/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java +++ b/codegen-maven-plugin/src/main/java/software/amazon/awssdk/codegen/maven/plugin/GenerationMojo.java @@ -17,11 +17,17 @@ import java.io.File; import java.io.IOException; +import java.io.Writer; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.attribute.BasicFileAttributes; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.Optional; +import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.maven.plugin.AbstractMojo; import org.apache.maven.plugin.MojoExecutionException; @@ -30,21 +36,26 @@ import org.apache.maven.project.MavenProject; import software.amazon.awssdk.codegen.C2jModels; import software.amazon.awssdk.codegen.CodeGenerator; +import software.amazon.awssdk.codegen.IntermediateModelBuilder; +import software.amazon.awssdk.codegen.internal.Jackson; import software.amazon.awssdk.codegen.internal.Utils; import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.rules.endpoints.EndpointTestSuiteModel; import software.amazon.awssdk.codegen.model.service.EndpointRuleSetModel; import software.amazon.awssdk.codegen.model.service.Paginators; import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Waiters; import software.amazon.awssdk.codegen.utils.ModelLoaderUtils; +import software.amazon.awssdk.codegen.validation.ModelInvalidException; +import software.amazon.awssdk.codegen.validation.ModelValidationReport; +import software.amazon.awssdk.utils.StringUtils; /** * The Maven mojo to generate Java client code using software.amazon.awssdk:codegen module. */ @Mojo(name = "generate") public class GenerationMojo extends AbstractMojo { - private static final String MODEL_FILE = "service-2.json"; private static final String CUSTOMIZATION_CONFIG_FILE = "customization.config"; private static final String WAITERS_FILE = "waiters-2.json"; @@ -62,6 +73,8 @@ public class GenerationMojo extends AbstractMojo { @Parameter(property = "writeIntermediateModel", defaultValue = "false") private boolean writeIntermediateModel; + @Parameter(property = "writeValidationReport", defaultValue = "false") + private boolean writeValidationReport; @Parameter(defaultValue = "${project}", readonly = true) private MavenProject project; @@ -76,22 +89,72 @@ public void execute() throws MojoExecutionException { this.resourcesDirectory = Paths.get(outputDirectory).resolve("generated-resources").resolve("sdk-resources"); this.testsDirectory = Paths.get(outputDirectory).resolve("generated-test-sources").resolve("sdk-tests"); - findModelRoots().forEach(p -> { - Path modelRootPath = p.modelRoot; - getLog().info("Loading from: " + modelRootPath.toString()); - generateCode(C2jModels.builder() - .customizationConfig(p.customizationConfig) - .serviceModel(loadServiceModel(modelRootPath)) - .waitersModel(loadWaiterModel(modelRootPath)) - .paginatorsModel(loadPaginatorModel(modelRootPath)) - .endpointRuleSetModel(loadEndpointRuleSetModel(modelRootPath)) - .endpointTestSuiteModel(loadEndpointTestSuiteModel(modelRootPath)) - .build()); + List generationParams; + + try { + generationParams = initGenerationParams(); + } catch (ModelInvalidException e) { + if (writeValidationReport) { + ModelValidationReport report = new ModelValidationReport(); + report.setValidationEntries(e.validationEntries()); + emitValidationReport(report); + } + throw e; + } + + Map serviceNameToModelMap = new HashMap<>(); + + generationParams.forEach( + params -> { + IntermediateModel model = params.intermediateModel; + String lowercaseServiceName = StringUtils.lowerCase(model.getMetadata().getServiceName()); + IntermediateModel previous = serviceNameToModelMap.put(lowercaseServiceName, model); + if (previous != null) { + String warning = String.format("Multiple service models found with service name %s. Model validation " + + "will likely be incorrect", lowercaseServiceName); + getLog().warn(warning); + } + }); + + // Update each param with the intermediate model it shares models with, if any + generationParams.forEach(params -> { + CustomizationConfig customizationConfig = params.intermediateModel.getCustomizationConfig(); + + if (customizationConfig.getShareModelConfig() != null) { + String shareModelWithName = customizationConfig.getShareModelConfig().getShareModelWith(); + params.withShareModelsTarget(serviceNameToModelMap.get(shareModelWithName)); + } }); + + generationParams.forEach(this::generateCode); + project.addCompileSourceRoot(sourcesDirectory.toFile().getAbsolutePath()); project.addTestCompileSourceRoot(testsDirectory.toFile().getAbsolutePath()); } + private List initGenerationParams() throws MojoExecutionException { + List modelRoots = findModelRoots().collect(Collectors.toList()); + + return modelRoots.stream().map(r -> { + Path modelRootPath = r.modelRoot; + getLog().info("Loading from: " + modelRootPath.toString()); + C2jModels c2jModels = C2jModels.builder() + .customizationConfig(r.customizationConfig) + .serviceModel(loadServiceModel(modelRootPath)) + .waitersModel(loadWaiterModel(modelRootPath)) + .paginatorsModel(loadPaginatorModel(modelRootPath)) + .endpointRuleSetModel(loadEndpointRuleSetModel(modelRootPath)) + .endpointTestSuiteModel(loadEndpointTestSuiteModel(modelRootPath)) + .build(); + String intermediateModelFileNamePrefix = intermediateModelFileNamePrefix(c2jModels); + IntermediateModel intermediateModel = new IntermediateModelBuilder(c2jModels).build(); + return new GenerationParams().withIntermediateModel(intermediateModel) + .withIntermediateModelFileNamePrefix(intermediateModelFileNamePrefix); + }).collect(Collectors.toList()); + } + + + private Stream findModelRoots() throws MojoExecutionException { try { return Files.find(codeGenResources.toPath(), 10, this::isModelFile) @@ -111,13 +174,15 @@ private boolean isModelFile(Path p, BasicFileAttributes a) { return p.toString().endsWith(MODEL_FILE); } - private void generateCode(C2jModels models) { + private void generateCode(GenerationParams params) { CodeGenerator.builder() - .models(models) + .intermediateModel(params.intermediateModel) + .shareModelsTarget(params.shareModelsTarget) .sourcesDirectory(sourcesDirectory.toFile().getAbsolutePath()) .resourcesDirectory(resourcesDirectory.toFile().getAbsolutePath()) .testsDirectory(testsDirectory.toFile().getAbsolutePath()) - .intermediateModelFileNamePrefix(intermediateModelFileNamePrefix(models)) + .intermediateModelFileNamePrefix(params.intermediateModelFileNamePrefix) + .emitValidationReport(writeValidationReport) .build() .execute(); } @@ -169,6 +234,17 @@ private Optional loadOptionalModel(Class clzz, Path location) { return ModelLoaderUtils.loadOptionalModel(clzz, location.toFile()); } + private void emitValidationReport(ModelValidationReport report) { + Path modelsDir = sourcesDirectory.resolve("models"); + try (Writer writer = Files.newBufferedWriter(modelsDir.resolve("validation-report.json"), + StandardCharsets.UTF_8);) { + + Jackson.writeWithObjectMapper(report, writer); + } catch (IOException e) { + getLog().warn("Failed to write validation report to " + modelsDir, e); + } + } + private static class ModelRoot { private final Path modelRoot; private final CustomizationConfig customizationConfig; @@ -178,4 +254,25 @@ private ModelRoot(Path modelRoot, CustomizationConfig customizationConfig) { this.customizationConfig = customizationConfig; } } + + private static class GenerationParams { + private IntermediateModel intermediateModel; + private IntermediateModel shareModelsTarget; + private String intermediateModelFileNamePrefix; + + public GenerationParams withIntermediateModel(IntermediateModel intermediateModel) { + this.intermediateModel = intermediateModel; + return this; + } + + public GenerationParams withShareModelsTarget(IntermediateModel shareModelsTarget) { + this.shareModelsTarget = shareModelsTarget; + return this; + } + + public GenerationParams withIntermediateModelFileNamePrefix(String intermediateModelFileNamePrefix) { + this.intermediateModelFileNamePrefix = intermediateModelFileNamePrefix; + return this; + } + } } diff --git a/codegen/pom.xml b/codegen/pom.xml index b8419b94d163..b5b94a594ad0 100644 --- a/codegen/pom.xml +++ b/codegen/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codegen AWS Java SDK :: Code Generator @@ -239,5 +239,10 @@ mockito-core compile + + nl.jqno.equalsverifier + equalsverifier + test + diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddMetadata.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddMetadata.java index 56ed58996ac8..41d1f32693f8 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddMetadata.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddMetadata.java @@ -28,6 +28,7 @@ import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.naming.DefaultNamingStrategy; import software.amazon.awssdk.codegen.naming.NamingStrategy; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; import software.amazon.awssdk.utils.Pair; import software.amazon.awssdk.utils.StringUtils; @@ -70,7 +71,7 @@ public static Metadata constructMetadata(ServiceModel serviceModel, .withBaseExceptionName(String.format(Constant.BASE_EXCEPTION_NAME_PATTERN, serviceName)) .withBaseRequestName(String.format(Constant.BASE_REQUEST_NAME_PATTERN, serviceName)) .withBaseResponseName(String.format(Constant.BASE_RESPONSE_NAME_PATTERN, serviceName)) - .withProtocol(Protocol.fromValue(serviceMetadata.getProtocol())) + .withProtocol(Protocol.fromValue(ProtocolUtils.resolveProtocol(serviceMetadata))) .withJsonVersion(serviceMetadata.getJsonVersion()) .withEndpointPrefix(serviceMetadata.getEndpointPrefix()) .withSigningName(serviceMetadata.getSigningName()) diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java index 1f247fe61b9f..6953ce6b066f 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddOperations.java @@ -37,6 +37,7 @@ import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Shape; import software.amazon.awssdk.codegen.naming.NamingStrategy; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; /** * Constructs the operation model for every operation defined by the service. @@ -164,7 +165,7 @@ public Map constructOperations() { OperationModel operationModel = new OperationModel(); operationModel.setOperationName(operationName); - operationModel.setServiceProtocol(serviceModel.getMetadata().getProtocol()); + operationModel.setServiceProtocol(ProtocolUtils.resolveProtocol(serviceModel.getMetadata())); operationModel.setDeprecated(op.isDeprecated()); operationModel.setDeprecatedMessage(op.getDeprecatedMessage()); operationModel.setDocumentation(op.getDocumentation()); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java b/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java index 4e7811bebdda..46b15ae7dbbd 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/AddShapes.java @@ -21,6 +21,7 @@ import static software.amazon.awssdk.codegen.internal.Utils.isMapShape; import static software.amazon.awssdk.codegen.internal.Utils.isScalar; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; @@ -37,9 +38,15 @@ import software.amazon.awssdk.codegen.model.intermediate.VariableModel; import software.amazon.awssdk.codegen.model.service.Location; import software.amazon.awssdk.codegen.model.service.Member; +import software.amazon.awssdk.codegen.model.service.Operation; import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Shape; import software.amazon.awssdk.codegen.naming.NamingStrategy; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; +import software.amazon.awssdk.codegen.validation.ModelInvalidException; +import software.amazon.awssdk.codegen.validation.ValidationEntry; +import software.amazon.awssdk.codegen.validation.ValidationErrorId; +import software.amazon.awssdk.codegen.validation.ValidationErrorSeverity; import software.amazon.awssdk.utils.StringUtils; import software.amazon.awssdk.utils.Validate; @@ -345,11 +352,20 @@ private boolean isGreedy(Shape parentShape, Map allC2jShapes, Par * @throws RuntimeException If operation can't be found. */ private String findRequestUri(Shape parentShape, Map allC2jShapes) { - return builder.getService().getOperations().values().stream() - .filter(o -> o.getInput() != null) - .filter(o -> allC2jShapes.get(o.getInput().getShape()).equals(parentShape)) - .map(o -> o.getHttp().getRequestUri()) - .findFirst().orElseThrow(() -> new RuntimeException("Could not find request URI for input shape")); + Optional operation = builder.getService().getOperations().values().stream() + .filter(o -> o.getInput() != null) + .filter(o -> allC2jShapes.get(o.getInput().getShape()).equals(parentShape)) + .findFirst(); + + return operation.map(o -> o.getHttp().getRequestUri()) + .orElseThrow(() -> { + String detailMsg = "Could not find request URI for input shape"; + ValidationEntry entry = + new ValidationEntry().withErrorId(ValidationErrorId.REQUEST_URI_NOT_FOUND) + .withDetailMessage(detailMsg) + .withSeverity(ValidationErrorSeverity.DANGER); + return ModelInvalidException.builder().validationEntries(Collections.singletonList(entry)).build(); + }); } private String deriveUnmarshallerLocationName(Shape memberShape, String memberName, Member member) { @@ -464,6 +480,6 @@ private void fillContainerTypeMemberMetadata(Map c2jShapes, } protected String getProtocol() { - return getServiceModel().getMetadata().getProtocol(); + return ProtocolUtils.resolveProtocol(getServiceModel().getMetadata()); } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java b/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java index 4c097fadb4d2..0361fc6d0fdd 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/CodeGenerator.java @@ -19,6 +19,9 @@ import java.io.File; import java.io.IOException; import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import java.util.concurrent.ForkJoinTask; import software.amazon.awssdk.codegen.emitters.GeneratorTask; import software.amazon.awssdk.codegen.emitters.GeneratorTaskParams; @@ -26,13 +29,26 @@ import software.amazon.awssdk.codegen.internal.Jackson; import software.amazon.awssdk.codegen.internal.Utils; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.validation.ModelInvalidException; +import software.amazon.awssdk.codegen.validation.ModelValidationContext; +import software.amazon.awssdk.codegen.validation.ModelValidationReport; +import software.amazon.awssdk.codegen.validation.ModelValidator; +import software.amazon.awssdk.codegen.validation.SharedModelsValidator; +import software.amazon.awssdk.codegen.validation.ValidationEntry; import software.amazon.awssdk.utils.Logger; public class CodeGenerator { private static final Logger log = Logger.loggerFor(CodeGenerator.class); private static final String MODEL_DIR_NAME = "models"; - private final C2jModels models; + private static final List DEFAULT_MODEL_VALIDATORS = Collections.singletonList( + new SharedModelsValidator() + ); + + private final C2jModels c2jModels; + + private final IntermediateModel intermediateModel; + private final IntermediateModel shareModelsTarget; private final String sourcesDirectory; private final String resourcesDirectory; private final String testsDirectory; @@ -42,6 +58,9 @@ public class CodeGenerator { */ private final String fileNamePrefix; + private final List modelValidators; + private final boolean emitValidationReport; + static { // Make sure ClassName is statically initialized before we do anything in parallel. // Parallel static initialization of ClassName and TypeName can result in a deadlock: @@ -50,12 +69,21 @@ public class CodeGenerator { } public CodeGenerator(Builder builder) { - this.models = builder.models; + this.c2jModels = builder.models; + this.intermediateModel = builder.intermediateModel; + + if (this.c2jModels != null && this.intermediateModel != null) { + throw new IllegalArgumentException("Only one of c2jModels and intermediateModel must be specified"); + } + + this.shareModelsTarget = builder.shareModelsTarget; this.sourcesDirectory = builder.sourcesDirectory; this.testsDirectory = builder.testsDirectory; this.resourcesDirectory = builder.resourcesDirectory != null ? builder.resourcesDirectory : builder.sourcesDirectory; this.fileNamePrefix = builder.fileNamePrefix; + this.modelValidators = builder.modelValidators == null ? DEFAULT_MODEL_VALIDATORS : builder.modelValidators; + this.emitValidationReport = builder.emitValidationReport; } public static File getModelDirectory(String outputDirectory) { @@ -76,22 +104,72 @@ public static Builder builder() { * code. */ public void execute() { - try { - IntermediateModel intermediateModel = new IntermediateModelBuilder(models).build(); + ModelValidationReport report = new ModelValidationReport(); + + IntermediateModel modelToGenerate; + if (c2jModels != null) { + modelToGenerate = new IntermediateModelBuilder(c2jModels).build(); + } else { + modelToGenerate = intermediateModel; + } + List validatorEntries = runModelValidators(modelToGenerate); + report.setValidationEntries(validatorEntries); + + if (emitValidationReport) { + writeValidationReport(report); + } + + if (!validatorEntries.isEmpty()) { + throw new RuntimeException("Validation failed. See validation report for details."); + } + + try { if (fileNamePrefix != null) { - writeIntermediateModel(intermediateModel); + writeIntermediateModel(modelToGenerate); } - emitCode(intermediateModel); + emitCode(modelToGenerate); } catch (Exception e) { log.error(() -> "Failed to generate code. ", e); + + if (e instanceof ModelInvalidException && emitValidationReport) { + ModelInvalidException invalidException = (ModelInvalidException) e; + report.setValidationEntries(invalidException.validationEntries()); + writeValidationReport(report); + } + throw new RuntimeException( "Failed to generate code. Exception message : " + e.getMessage(), e); } } + private List runModelValidators(IntermediateModel intermediateModel) { + ModelValidationContext ctx = ModelValidationContext.builder() + .intermediateModel(intermediateModel) + .shareModelsTarget(shareModelsTarget) + .build(); + + List validationEntries = new ArrayList<>(); + + modelValidators.forEach(v -> validationEntries.addAll(v.validateModels(ctx))); + + return validationEntries; + } + + private void writeValidationReport(ModelValidationReport report) { + try { + writeModel(report, "validation-report.json"); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + private void writeIntermediateModel(IntermediateModel model) throws IOException { + writeModel(model, fileNamePrefix + "-intermediate.json"); + } + + private void writeModel(Object model, String name) throws IOException { File modelDir = getModelDirectory(sourcesDirectory); PrintWriter writer = null; try { @@ -100,7 +178,7 @@ private void writeIntermediateModel(IntermediateModel model) throws IOException throw new RuntimeException("Failed to create " + outDir.getAbsolutePath()); } - File outputFile = new File(modelDir, fileNamePrefix + "-intermediate.json"); + File outputFile = new File(modelDir, name); if (!outputFile.exists() && !outputFile.createNewFile()) { throw new RuntimeException("Error creating file " + outputFile.getAbsolutePath()); @@ -134,10 +212,14 @@ private GeneratorTask createGeneratorTasks(IntermediateModel intermediateModel) public static final class Builder { private C2jModels models; + private IntermediateModel intermediateModel; + private IntermediateModel shareModelsTarget; private String sourcesDirectory; private String resourcesDirectory; private String testsDirectory; private String fileNamePrefix; + private List modelValidators; + private boolean emitValidationReport; private Builder() { } @@ -147,6 +229,16 @@ public Builder models(C2jModels models) { return this; } + public Builder intermediateModel(IntermediateModel intermediateModel) { + this.intermediateModel = intermediateModel; + return this; + } + + public Builder shareModelsTarget(IntermediateModel shareModelsTarget) { + this.shareModelsTarget = shareModelsTarget; + return this; + } + public Builder sourcesDirectory(String sourcesDirectory) { this.sourcesDirectory = sourcesDirectory; return this; @@ -167,6 +259,16 @@ public Builder intermediateModelFileNamePrefix(String fileNamePrefix) { return this; } + public Builder modelValidators(List modelValidators) { + this.modelValidators = modelValidators; + return this; + } + + public Builder emitValidationReport(boolean emitValidationReport) { + this.emitValidationReport = emitValidationReport; + return this; + } + /** * @return An immutable {@link CodeGenerator} object. */ diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/ExplicitStringPayloadQueryProtocolProcessor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/ExplicitStringPayloadQueryProtocolProcessor.java index 273c231b136d..74ab28dd3417 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/ExplicitStringPayloadQueryProtocolProcessor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/ExplicitStringPayloadQueryProtocolProcessor.java @@ -23,6 +23,7 @@ import software.amazon.awssdk.codegen.model.service.Output; import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Shape; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; /** * Operations with explicit String payloads are not supported for services with Query protocol. We fail the codegen if the @@ -31,7 +32,7 @@ public class ExplicitStringPayloadQueryProtocolProcessor implements CodegenCustomizationProcessor { @Override public void preprocess(ServiceModel serviceModel) { - String protocol = serviceModel.getMetadata().getProtocol(); + String protocol = ProtocolUtils.resolveProtocol(serviceModel.getMetadata()); if (!"ec2".equals(protocol) && !"query".equals(protocol)) { return; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/MetadataModifiersProcessor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/MetadataModifiersProcessor.java index 1c0786425507..6e4441203516 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/MetadataModifiersProcessor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/MetadataModifiersProcessor.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.codegen.customization.processors; +import java.util.Collections; import software.amazon.awssdk.codegen.customization.CodegenCustomizationProcessor; import software.amazon.awssdk.codegen.model.config.customization.MetadataConfig; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; @@ -44,7 +45,7 @@ public void preprocess(ServiceModel serviceModel) { String customProtocol = metadataConfig.getProtocol(); if (customProtocol != null) { - serviceMetadata.setProtocol(customProtocol); + serviceMetadata.setProtocols(Collections.singletonList(customProtocol)); } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/SmithyRpcV2CborProtocolProcessor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/SmithyRpcV2CborProtocolProcessor.java index 7db556fa63ed..2bfb74bef54c 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/SmithyRpcV2CborProtocolProcessor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/customization/processors/SmithyRpcV2CborProtocolProcessor.java @@ -20,6 +20,7 @@ import software.amazon.awssdk.codegen.model.service.Http; import software.amazon.awssdk.codegen.model.service.Operation; import software.amazon.awssdk.codegen.model.service.ServiceModel; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; /** * This processor only runs for services using the smithy-rpc-v2-cbor protocol. @@ -29,7 +30,8 @@ public class SmithyRpcV2CborProtocolProcessor implements CodegenCustomizationProcessor { @Override public void preprocess(ServiceModel serviceModel) { - if (!"smithy-rpc-v2-cbor".equals(serviceModel.getMetadata().getProtocol())) { + String protocol = ProtocolUtils.resolveProtocol(serviceModel.getMetadata()); + if (!"smithy-rpc-v2-cbor".equals(protocol)) { return; } serviceModel.getOperations().forEach((name, op) -> setRequestUri(serviceModel, name, op)); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/AuthSchemeGeneratorTasks.java b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/AuthSchemeGeneratorTasks.java index fbcec7931bd8..38c170898f27 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/AuthSchemeGeneratorTasks.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/AuthSchemeGeneratorTasks.java @@ -28,6 +28,7 @@ import software.amazon.awssdk.codegen.poet.auth.scheme.EndpointAwareAuthSchemeParamsSpec; import software.amazon.awssdk.codegen.poet.auth.scheme.EndpointBasedAuthSchemeProviderSpec; import software.amazon.awssdk.codegen.poet.auth.scheme.ModelBasedAuthSchemeProviderSpec; +import software.amazon.awssdk.codegen.poet.auth.scheme.PreferredAuthSchemeProviderSpec; public final class AuthSchemeGeneratorTasks extends BaseGeneratorTasks { private final GeneratorTaskParams generatorTaskParams; @@ -45,6 +46,7 @@ protected List createTasks() { tasks.add(generateProviderInterface()); tasks.add(generateDefaultParamsImpl()); tasks.add(generateModelBasedProvider()); + tasks.add(generatePreferenceProvider()); tasks.add(generateAuthSchemeInterceptor()); if (authSchemeSpecUtils.useEndpointBasedAuthProvider()) { tasks.add(generateEndpointBasedProvider()); @@ -69,6 +71,10 @@ private GeneratorTask generateModelBasedProvider() { return new PoetGeneratorTask(authSchemeInternalDir(), model.getFileHeader(), new ModelBasedAuthSchemeProviderSpec(model)); } + private GeneratorTask generatePreferenceProvider() { + return new PoetGeneratorTask(authSchemeInternalDir(), model.getFileHeader(), new PreferredAuthSchemeProviderSpec(model)); + } + private GeneratorTask generateEndpointBasedProvider() { return new PoetGeneratorTask(authSchemeInternalDir(), model.getFileHeader(), new EndpointBasedAuthSchemeProviderSpec(model)); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/BaseGeneratorTasks.java b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/BaseGeneratorTasks.java index 731f70e0cba3..cdabdbf219cd 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/BaseGeneratorTasks.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/BaseGeneratorTasks.java @@ -71,6 +71,8 @@ protected void compute() { ForkJoinTask.invokeAll(createTasks()); log.info(" Completed " + taskName + "."); } + } catch (RuntimeException e) { + throw e; } catch (Exception e) { throw new RuntimeException(e); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/CommonInternalGeneratorTasks.java b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/CommonInternalGeneratorTasks.java index 7d407f582f7d..c02571f181c0 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/CommonInternalGeneratorTasks.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/emitters/tasks/CommonInternalGeneratorTasks.java @@ -15,12 +15,14 @@ package software.amazon.awssdk.codegen.emitters.tasks; -import java.util.Arrays; +import java.util.ArrayList; import java.util.List; import software.amazon.awssdk.codegen.emitters.GeneratorTask; import software.amazon.awssdk.codegen.emitters.GeneratorTaskParams; import software.amazon.awssdk.codegen.emitters.PoetGeneratorTask; +import software.amazon.awssdk.codegen.poet.client.EnvironmentTokenSystemSettingsClass; import software.amazon.awssdk.codegen.poet.client.SdkClientOptions; +import software.amazon.awssdk.codegen.poet.client.specs.ServiceVersionInfoSpec; import software.amazon.awssdk.codegen.poet.common.UserAgentUtilsSpec; public class CommonInternalGeneratorTasks extends BaseGeneratorTasks { @@ -33,7 +35,14 @@ public CommonInternalGeneratorTasks(GeneratorTaskParams params) { @Override protected List createTasks() throws Exception { - return Arrays.asList(createClientOptionTask(), createUserAgentTask()); + List tasks = new ArrayList<>(); + tasks.add(createClientOptionTask()); + tasks.add(createUserAgentTask()); + if (params.getModel().getCustomizationConfig().isEnableEnvironmentBearerToken()) { + tasks.add(createEnvironmentTokenSystemSettingTask()); + } + tasks.add(createServiceVersionInfoTask()); + return tasks; } private PoetGeneratorTask createClientOptionTask() { @@ -46,6 +55,16 @@ private PoetGeneratorTask createUserAgentTask() { new UserAgentUtilsSpec(params.getModel())); } + private GeneratorTask createEnvironmentTokenSystemSettingTask() { + return new PoetGeneratorTask(clientOptionsDir(), params.getModel().getFileHeader(), + new EnvironmentTokenSystemSettingsClass(params.getModel())); + } + + private GeneratorTask createServiceVersionInfoTask() { + return new PoetGeneratorTask(clientOptionsDir(), params.getModel().getFileHeader(), + new ServiceVersionInfoSpec(params.getModel())); + } + private String clientOptionsDir() { return params.getPathProvider().getClientInternalDirectory(); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/internal/Utils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/internal/Utils.java index 1efeefb9b27a..becbdfd18a32 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/internal/Utils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/internal/Utils.java @@ -39,6 +39,7 @@ import software.amazon.awssdk.codegen.model.service.ServiceModel; import software.amazon.awssdk.codegen.model.service.Shape; import software.amazon.awssdk.codegen.model.service.XmlNamespace; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; import software.amazon.awssdk.utils.IoUtils; import software.amazon.awssdk.utils.StringUtils; @@ -331,11 +332,13 @@ public static ShapeMarshaller createInputShapeMarshaller(ServiceMetadata service "The operation parameter must be specified!"); } + String protocol = ProtocolUtils.resolveProtocol(service); + ShapeMarshaller marshaller = new ShapeMarshaller() .withAction(operation.getName()) .withVerb(operation.getHttp().getMethod()) .withRequestUri(operation.getHttp().getRequestUri()) - .withProtocol(service.getProtocol()); + .withProtocol(protocol); Input input = operation.getInput(); if (input != null) { marshaller.setLocationName(input.getLocationName()); @@ -345,7 +348,7 @@ public static ShapeMarshaller createInputShapeMarshaller(ServiceMetadata service marshaller.setXmlNameSpaceUri(xmlNamespace.getUri()); } } - if (Metadata.usesOperationIdentifier(service.getProtocol())) { + if (Metadata.usesOperationIdentifier(protocol)) { marshaller.setTarget(StringUtils.isEmpty(service.getTargetPrefix()) ? operation.getName() : service.getTargetPrefix() + "." + operation.getName()); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java index 32cecd79feb5..cfa719d4c738 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/config/customization/CustomizationConfig.java @@ -350,6 +350,18 @@ public class CustomizationConfig { */ private boolean enableFastUnmarshaller; + /** + * A boolean flag to indicate if support for configuring a bearer token sourced from the environment should be added to the + * generated service. When enabled, the generated client will use bearer auth with the token sourced from the + * `AWS_BEARER_TOKEN_[SigningName]` environment variable. + */ + private boolean enableEnvironmentBearerToken = false; + + /** + * A boolean flag to indicate if the code-generated endpoint providers class should cache the calls to URI constructors. + */ + private boolean enableEndpointProviderUriCaching; + private CustomizationConfig() { } @@ -924,4 +936,20 @@ public boolean getEnableFastUnmarshaller() { public void setEnableFastUnmarshaller(boolean enableFastUnmarshaller) { this.enableFastUnmarshaller = enableFastUnmarshaller; } + + public boolean isEnableEnvironmentBearerToken() { + return enableEnvironmentBearerToken; + } + + public void setEnableEnvironmentBearerToken(boolean enableEnvironmentBearerToken) { + this.enableEnvironmentBearerToken = enableEnvironmentBearerToken; + } + + public boolean getEnableEndpointProviderUriCaching() { + return enableEndpointProviderUriCaching; + } + + public void setEnableEndpointProviderUriCaching(boolean enableEndpointProviderUriCaching) { + this.enableEndpointProviderUriCaching = enableEndpointProviderUriCaching; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModel.java index 5013db7d3f9e..16e848303a4f 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModel.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.codegen.model.intermediate; +import java.util.Objects; + public class ArgumentModel extends DocumentationModel { private String name; @@ -61,4 +63,28 @@ public ArgumentModel withIsEnumArg(boolean isEnumArg) { this.isEnumArg = isEnumArg; return this; } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + ArgumentModel that = (ArgumentModel) o; + return isEnumArg == that.isEnumArg + && Objects.equals(name, that.name) + && Objects.equals(type, that.type); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(name); + result = 31 * result + Objects.hashCode(type); + result = 31 * result + Boolean.hashCode(isEnumArg); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModel.java index ce98c0dfea8e..316f4e741139 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModel.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.codegen.model.intermediate; import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; import software.amazon.awssdk.codegen.model.service.Location; public class AuthorizerModel extends DocumentationModel { @@ -63,4 +64,30 @@ public String getAddAuthTokenMethod() { authTokenLocation)); } } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + AuthorizerModel that = (AuthorizerModel) o; + return Objects.equals(name, that.name) + && Objects.equals(interfaceName, that.interfaceName) + && authTokenLocation == that.authTokenLocation + && Objects.equals(tokenName, that.tokenName); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(name); + result = 31 * result + Objects.hashCode(interfaceName); + result = 31 * result + Objects.hashCode(authTokenLocation); + result = 31 * result + Objects.hashCode(tokenName); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/DocumentationModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/DocumentationModel.java index 5be891040acc..55fd39f4a7c7 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/DocumentationModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/DocumentationModel.java @@ -17,6 +17,8 @@ import static software.amazon.awssdk.codegen.internal.DocumentationUtils.escapeIllegalCharacters; +import java.util.Objects; + public class DocumentationModel { protected String documentation; @@ -28,4 +30,22 @@ public String getDocumentation() { public void setDocumentation(String documentation) { this.documentation = escapeIllegalCharacters(documentation); } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DocumentationModel that = (DocumentationModel) o; + return Objects.equals(documentation, that.documentation); + } + + @Override + public int hashCode() { + return Objects.hashCode(documentation); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EndpointDiscovery.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EndpointDiscovery.java index 91a5f3b60f25..e372079fc541 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EndpointDiscovery.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EndpointDiscovery.java @@ -26,4 +26,22 @@ public boolean isRequired() { public void setRequired(boolean required) { this.required = required; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + EndpointDiscovery that = (EndpointDiscovery) o; + return required == that.required; + } + + @Override + public int hashCode() { + return Boolean.hashCode(required); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EnumModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EnumModel.java index f469b5de99fd..652f2c2aca6e 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EnumModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/EnumModel.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.codegen.model.intermediate; +import java.util.Objects; + /** * Represents a single enum field in a enum. */ @@ -49,4 +51,23 @@ public String getValue() { return value; } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + EnumModel enumModel = (EnumModel) o; + return Objects.equals(value, enumModel.value) && Objects.equals(name, enumModel.name); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(value); + result = 31 * result + Objects.hashCode(name); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java index fddf93d4d72d..3e905aa1ed56 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/MemberModel.java @@ -28,6 +28,7 @@ import com.squareup.javapoet.ClassName; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import software.amazon.awssdk.codegen.internal.TypeUtils; import software.amazon.awssdk.codegen.model.service.ContextParam; @@ -785,4 +786,98 @@ public void ignoreDataTypeConversionFailures(boolean ignoreDataTypeConversionFai public boolean ignoreDataTypeConversionFailures() { return ignoreDataTypeConversionFailures; } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + MemberModel that = (MemberModel) o; + return deprecated == that.deprecated + && required == that.required + && synthetic == that.synthetic + && idempotencyToken == that.idempotencyToken + && isJsonValue == that.isJsonValue + && eventPayload == that.eventPayload + && eventHeader == that.eventHeader + && endpointDiscoveryId == that.endpointDiscoveryId + && sensitive == that.sensitive + && xmlAttribute == that.xmlAttribute + && ignoreDataTypeConversionFailures == that.ignoreDataTypeConversionFailures + && Objects.equals(name, that.name) + && Objects.equals(c2jName, that.c2jName) + && Objects.equals(c2jShape, that.c2jShape) + && Objects.equals(variable, that.variable) + && Objects.equals(setterModel, that.setterModel) + && Objects.equals(getterModel, that.getterModel) + && Objects.equals(http, that.http) + && Objects.equals(deprecatedMessage, that.deprecatedMessage) + && Objects.equals(listModel, that.listModel) + && Objects.equals(mapModel, that.mapModel) + && Objects.equals(enumType, that.enumType) + && Objects.equals(xmlNameSpaceUri, that.xmlNameSpaceUri) + && Objects.equals(shape, that.shape) + && Objects.equals(fluentGetterMethodName, that.fluentGetterMethodName) + && Objects.equals(fluentEnumGetterMethodName, that.fluentEnumGetterMethodName) + && Objects.equals(fluentSetterMethodName, that.fluentSetterMethodName) + && Objects.equals(fluentEnumSetterMethodName, that.fluentEnumSetterMethodName) + && Objects.equals(existenceCheckMethodName, that.existenceCheckMethodName) + && Objects.equals(beanStyleGetterName, that.beanStyleGetterName) + && Objects.equals(beanStyleSetterName, that.beanStyleSetterName) + && Objects.equals(unionEnumTypeName, that.unionEnumTypeName) + && Objects.equals(timestampFormat, that.timestampFormat) + && Objects.equals(deprecatedName, that.deprecatedName) + && Objects.equals(fluentDeprecatedGetterMethodName, that.fluentDeprecatedGetterMethodName) + && Objects.equals(fluentDeprecatedSetterMethodName, that.fluentDeprecatedSetterMethodName) + && Objects.equals(deprecatedBeanStyleSetterMethodName, that.deprecatedBeanStyleSetterMethodName) + && Objects.equals(contextParam, that.contextParam); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(name); + result = 31 * result + Objects.hashCode(c2jName); + result = 31 * result + Objects.hashCode(c2jShape); + result = 31 * result + Objects.hashCode(variable); + result = 31 * result + Objects.hashCode(setterModel); + result = 31 * result + Objects.hashCode(getterModel); + result = 31 * result + Objects.hashCode(http); + result = 31 * result + Boolean.hashCode(deprecated); + result = 31 * result + Objects.hashCode(deprecatedMessage); + result = 31 * result + Boolean.hashCode(required); + result = 31 * result + Boolean.hashCode(synthetic); + result = 31 * result + Objects.hashCode(listModel); + result = 31 * result + Objects.hashCode(mapModel); + result = 31 * result + Objects.hashCode(enumType); + result = 31 * result + Objects.hashCode(xmlNameSpaceUri); + result = 31 * result + Boolean.hashCode(idempotencyToken); + result = 31 * result + Objects.hashCode(shape); + result = 31 * result + Objects.hashCode(fluentGetterMethodName); + result = 31 * result + Objects.hashCode(fluentEnumGetterMethodName); + result = 31 * result + Objects.hashCode(fluentSetterMethodName); + result = 31 * result + Objects.hashCode(fluentEnumSetterMethodName); + result = 31 * result + Objects.hashCode(existenceCheckMethodName); + result = 31 * result + Objects.hashCode(beanStyleGetterName); + result = 31 * result + Objects.hashCode(beanStyleSetterName); + result = 31 * result + Objects.hashCode(unionEnumTypeName); + result = 31 * result + Boolean.hashCode(isJsonValue); + result = 31 * result + Objects.hashCode(timestampFormat); + result = 31 * result + Boolean.hashCode(eventPayload); + result = 31 * result + Boolean.hashCode(eventHeader); + result = 31 * result + Boolean.hashCode(endpointDiscoveryId); + result = 31 * result + Boolean.hashCode(sensitive); + result = 31 * result + Boolean.hashCode(xmlAttribute); + result = 31 * result + Objects.hashCode(deprecatedName); + result = 31 * result + Objects.hashCode(fluentDeprecatedGetterMethodName); + result = 31 * result + Objects.hashCode(fluentDeprecatedSetterMethodName); + result = 31 * result + Objects.hashCode(deprecatedBeanStyleSetterMethodName); + result = 31 * result + Objects.hashCode(contextParam); + result = 31 * result + Boolean.hashCode(ignoreDataTypeConversionFailures); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java index a2a060c7a915..6b192644da1d 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/OperationModel.java @@ -19,6 +19,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Objects; import software.amazon.awssdk.codegen.checksum.HttpChecksum; import software.amazon.awssdk.codegen.compression.RequestCompression; import software.amazon.awssdk.codegen.docs.ClientType; @@ -379,4 +380,63 @@ public boolean isUnsignedPayload() { public void setUnsignedPayload(boolean unsignedPayload) { this.unsignedPayload = unsignedPayload; } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + OperationModel that = (OperationModel) o; + return deprecated == that.deprecated && hasBlobMemberAsPayload == that.hasBlobMemberAsPayload + && hasStringMemberAsPayload == that.hasStringMemberAsPayload && isAuthenticated == that.isAuthenticated + && isPaginated == that.isPaginated && endpointOperation == that.endpointOperation + && endpointCacheRequired == that.endpointCacheRequired && httpChecksumRequired == that.httpChecksumRequired + && unsignedPayload == that.unsignedPayload && Objects.equals(operationName, that.operationName) + && Objects.equals(serviceProtocol, that.serviceProtocol) + && Objects.equals(deprecatedMessage, that.deprecatedMessage) && Objects.equals(input, that.input) + && Objects.equals(returnType, that.returnType) && Objects.equals(exceptions, that.exceptions) + && Objects.equals(simpleMethods, that.simpleMethods) && authType == that.authType + && Objects.equals(auth, that.auth) && Objects.equals(endpointDiscovery, that.endpointDiscovery) + && Objects.equals(inputShape, that.inputShape) && Objects.equals(outputShape, that.outputShape) + && Objects.equals(endpointTrait, that.endpointTrait) && Objects.equals(httpChecksum, that.httpChecksum) + && Objects.equals(requestcompression, that.requestcompression) + && Objects.equals(staticContextParams, that.staticContextParams) + && Objects.equals(operationContextParams, that.operationContextParams); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(operationName); + result = 31 * result + Objects.hashCode(serviceProtocol); + result = 31 * result + Boolean.hashCode(deprecated); + result = 31 * result + Objects.hashCode(deprecatedMessage); + result = 31 * result + Objects.hashCode(input); + result = 31 * result + Objects.hashCode(returnType); + result = 31 * result + Objects.hashCode(exceptions); + result = 31 * result + Objects.hashCode(simpleMethods); + result = 31 * result + Boolean.hashCode(hasBlobMemberAsPayload); + result = 31 * result + Boolean.hashCode(hasStringMemberAsPayload); + result = 31 * result + Boolean.hashCode(isAuthenticated); + result = 31 * result + Objects.hashCode(authType); + result = 31 * result + Objects.hashCode(auth); + result = 31 * result + Boolean.hashCode(isPaginated); + result = 31 * result + Boolean.hashCode(endpointOperation); + result = 31 * result + Boolean.hashCode(endpointCacheRequired); + result = 31 * result + Objects.hashCode(endpointDiscovery); + result = 31 * result + Objects.hashCode(inputShape); + result = 31 * result + Objects.hashCode(outputShape); + result = 31 * result + Objects.hashCode(endpointTrait); + result = 31 * result + Boolean.hashCode(httpChecksumRequired); + result = 31 * result + Objects.hashCode(httpChecksum); + result = 31 * result + Objects.hashCode(requestcompression); + result = 31 * result + Objects.hashCode(staticContextParams); + result = 31 * result + Objects.hashCode(operationContextParams); + result = 31 * result + Boolean.hashCode(unsignedPayload); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMapping.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMapping.java index 22ed4a8e6880..fc9a776059a7 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMapping.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMapping.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.codegen.model.intermediate; +import java.util.Objects; import software.amazon.awssdk.codegen.model.service.Location; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -199,4 +200,40 @@ public MarshallLocation getMarshallLocation() { } } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ParameterHttpMapping that = (ParameterHttpMapping) o; + return isPayload == that.isPayload + && isStreaming == that.isStreaming + && flattened == that.flattened + && isGreedy == that.isGreedy + && requiresLength == that.requiresLength + && Objects.equals(unmarshallLocationName, that.unmarshallLocationName) + && Objects.equals(marshallLocationName, that.marshallLocationName) + && Objects.equals(additionalUnmarshallingPath, that.additionalUnmarshallingPath) + && Objects.equals(additionalMarshallingPath, that.additionalMarshallingPath) + && location == that.location; + } + + @Override + public int hashCode() { + int result = Objects.hashCode(unmarshallLocationName); + result = 31 * result + Objects.hashCode(marshallLocationName); + result = 31 * result + Objects.hashCode(additionalUnmarshallingPath); + result = 31 * result + Objects.hashCode(additionalMarshallingPath); + result = 31 * result + Boolean.hashCode(isPayload); + result = 31 * result + Boolean.hashCode(isStreaming); + result = 31 * result + Objects.hashCode(location); + result = 31 * result + Boolean.hashCode(flattened); + result = 31 * result + Boolean.hashCode(isGreedy); + result = 31 * result + Boolean.hashCode(requiresLength); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModel.java index 77dff4c71481..1d46c2802cda 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModel.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.codegen.model.intermediate; +import java.util.Objects; + public class ReturnTypeModel { private String returnType; @@ -48,4 +50,24 @@ public ReturnTypeModel withDocumentation(String documentation) { setDocumentation(documentation); return this; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ReturnTypeModel that = (ReturnTypeModel) o; + return Objects.equals(returnType, that.returnType) && Objects.equals(documentation, that.documentation); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(returnType); + result = 31 * result + Objects.hashCode(documentation); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java index 098ea46bc7e4..3c26965302d5 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModel.java @@ -26,6 +26,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.stream.Collectors; import software.amazon.awssdk.codegen.model.intermediate.customization.ShapeCustomizationInfo; import software.amazon.awssdk.codegen.model.service.XmlNamespace; @@ -669,4 +670,84 @@ public ShapeModel withIsThrottling(boolean throttling) { this.throttling = throttling; return this; } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + ShapeModel that = (ShapeModel) o; + return deprecated == that.deprecated + && hasPayloadMember == that.hasPayloadMember + && hasHeaderMember == that.hasHeaderMember + && hasStatusCodeMember == that.hasStatusCodeMember + && hasStreamingMember == that.hasStreamingMember + && hasRequiresLengthMember == that.hasRequiresLengthMember + && wrapper == that.wrapper + && simpleMethod == that.simpleMethod + && fault == that.fault + && isEventStream == that.isEventStream + && isEvent == that.isEvent + && document == that.document + && union == that.union + && retryable == that.retryable + && throttling == that.throttling + && Objects.equals(c2jName, that.c2jName) + && Objects.equals(shapeName, that.shapeName) + && Objects.equals(deprecatedMessage, that.deprecatedMessage) + && Objects.equals(type, that.type) + && Objects.equals(required, that.required) + && Objects.equals(requestSignerClassFqcn, that.requestSignerClassFqcn) + && Objects.equals(endpointDiscovery, that.endpointDiscovery) + && Objects.equals(members, that.members) + && Objects.equals(enums, that.enums) + && Objects.equals(variable, that.variable) + && Objects.equals(marshaller, that.marshaller) + && Objects.equals(unmarshaller, that.unmarshaller) + && Objects.equals(errorCode, that.errorCode) + && Objects.equals(httpStatusCode, that.httpStatusCode) + && Objects.equals(customization, that.customization) + && Objects.equals(xmlNamespace, that.xmlNamespace); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(c2jName); + result = 31 * result + Objects.hashCode(shapeName); + result = 31 * result + Boolean.hashCode(deprecated); + result = 31 * result + Objects.hashCode(deprecatedMessage); + result = 31 * result + Objects.hashCode(type); + result = 31 * result + Objects.hashCode(required); + result = 31 * result + Boolean.hashCode(hasPayloadMember); + result = 31 * result + Boolean.hashCode(hasHeaderMember); + result = 31 * result + Boolean.hashCode(hasStatusCodeMember); + result = 31 * result + Boolean.hashCode(hasStreamingMember); + result = 31 * result + Boolean.hashCode(hasRequiresLengthMember); + result = 31 * result + Boolean.hashCode(wrapper); + result = 31 * result + Boolean.hashCode(simpleMethod); + result = 31 * result + Objects.hashCode(requestSignerClassFqcn); + result = 31 * result + Objects.hashCode(endpointDiscovery); + result = 31 * result + Objects.hashCode(members); + result = 31 * result + Objects.hashCode(enums); + result = 31 * result + Objects.hashCode(variable); + result = 31 * result + Objects.hashCode(marshaller); + result = 31 * result + Objects.hashCode(unmarshaller); + result = 31 * result + Objects.hashCode(errorCode); + result = 31 * result + Objects.hashCode(httpStatusCode); + result = 31 * result + Boolean.hashCode(fault); + result = 31 * result + Objects.hashCode(customization); + result = 31 * result + Boolean.hashCode(isEventStream); + result = 31 * result + Boolean.hashCode(isEvent); + result = 31 * result + Objects.hashCode(xmlNamespace); + result = 31 * result + Boolean.hashCode(document); + result = 31 * result + Boolean.hashCode(union); + result = 31 * result + Boolean.hashCode(retryable); + result = 31 * result + Boolean.hashCode(throttling); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/VariableModel.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/VariableModel.java index bdf0668a9d21..b9355009e748 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/VariableModel.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/VariableModel.java @@ -17,6 +17,7 @@ import java.util.Collection; import java.util.List; +import java.util.Objects; public class VariableModel extends DocumentationModel { @@ -98,4 +99,31 @@ public String getVariableSetterType() { public String toString() { return variableName; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + VariableModel that = (VariableModel) o; + return Objects.equals(variableName, that.variableName) + && Objects.equals(variableType, that.variableType) + && Objects.equals(variableDeclarationType, that.variableDeclarationType); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(variableName); + result = 31 * result + Objects.hashCode(variableType); + result = 31 * result + Objects.hashCode(variableDeclarationType); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapper.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapper.java index e8adab25b48c..dd0b91d86301 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapper.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapper.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.codegen.model.intermediate.customization; +import java.util.Objects; + public class ArtificialResultWrapper { private String wrappedMemberName; private String wrappedMemberSimpleType; @@ -34,4 +36,25 @@ public String getWrappedMemberSimpleType() { public void setWrappedMemberSimpleType(String wrappedMemberSimpleType) { this.wrappedMemberSimpleType = wrappedMemberSimpleType; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ArtificialResultWrapper that = (ArtificialResultWrapper) o; + return Objects.equals(wrappedMemberName, that.wrappedMemberName) + && Objects.equals(wrappedMemberSimpleType, that.wrappedMemberSimpleType); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(wrappedMemberName); + result = 31 * result + Objects.hashCode(wrappedMemberSimpleType); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfo.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfo.java index b6d3950985b2..2e031eabb9a4 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfo.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfo.java @@ -16,6 +16,7 @@ package software.amazon.awssdk.codegen.model.intermediate.customization; import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; public class ShapeCustomizationInfo { @@ -72,4 +73,33 @@ public void setStaxTargetDepthOffset(int staxTargetDepthOffset) { public boolean hasStaxTargetDepthOffset() { return hasStaxTargetDepthOffset; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ShapeCustomizationInfo that = (ShapeCustomizationInfo) o; + return skipGeneratingModelClass == that.skipGeneratingModelClass + && skipGeneratingMarshaller == that.skipGeneratingMarshaller + && skipGeneratingUnmarshaller == that.skipGeneratingUnmarshaller + && staxTargetDepthOffset == that.staxTargetDepthOffset + && hasStaxTargetDepthOffset == that.hasStaxTargetDepthOffset + && Objects.equals(artificialResultWrapper, that.artificialResultWrapper); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(artificialResultWrapper); + result = 31 * result + Boolean.hashCode(skipGeneratingModelClass); + result = 31 * result + Boolean.hashCode(skipGeneratingMarshaller); + result = 31 * result + Boolean.hashCode(skipGeneratingUnmarshaller); + result = 31 * result + staxTargetDepthOffset; + result = 31 * result + Boolean.hashCode(hasStaxTargetDepthOffset); + return result; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ContextParam.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ContextParam.java index 96f363cd84f1..8650d1145bcb 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ContextParam.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ContextParam.java @@ -15,6 +15,8 @@ package software.amazon.awssdk.codegen.model.service; +import java.util.Objects; + public class ContextParam { private String name; @@ -25,4 +27,22 @@ public String getName() { public void setName(String name) { this.name = name; } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ContextParam that = (ContextParam) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hashCode(name); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ServiceMetadata.java b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ServiceMetadata.java index 073f97e05f8f..95f7f7349d95 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ServiceMetadata.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/model/service/ServiceMetadata.java @@ -17,6 +17,7 @@ import java.util.List; import java.util.Map; +import software.amazon.awssdk.codegen.utils.ProtocolUtils; public class ServiceMetadata { @@ -36,6 +37,8 @@ public class ServiceMetadata { private String protocol; + private List protocols; + private String jsonVersion; private Map awsQueryCompatible; @@ -103,14 +106,30 @@ public void setXmlNamespace(String xmlNamespace) { this.xmlNamespace = xmlNamespace; } + /** + * {@code protocol} superseded by {@code protocols} field, resolved in {@link ProtocolUtils#resolveProtocol(ServiceMetadata)} + */ + @Deprecated public String getProtocol() { return protocol; } + /** + * {@code protocol} superseded by {@code protocols} field, resolved in {@link ProtocolUtils#resolveProtocol(ServiceMetadata)} + */ + @Deprecated public void setProtocol(String protocol) { this.protocol = protocol; } + public List getProtocols() { + return protocols; + } + + public void setProtocols(List protocols) { + this.protocols = protocols; + } + public String getJsonVersion() { return jsonVersion; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java index e5a4904f295f..efb230313efb 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategy.java @@ -432,6 +432,22 @@ private boolean isDisallowedNameForShape(String name, Shape parentShape) { } } + @Override + public String getSigningName() { + return Optional.ofNullable(serviceModel.getMetadata().getSigningName()) + .orElseGet(() -> serviceModel.getMetadata().getEndpointPrefix()); + } + + @Override + public String getSigningNameForEnvironmentVariables() { + return screamCase(getSigningName()); + } + + @Override + public String getSigningNameForSystemProperties() { + return pascalCase(getSigningName()); + } + @Override public void validateCustomerVisibleNaming(IntermediateModel trimmedModel) { Metadata metadata = trimmedModel.getMetadata(); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/NamingStrategy.java b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/NamingStrategy.java index 1fe32773d71f..637920be14de 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/naming/NamingStrategy.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/naming/NamingStrategy.java @@ -200,6 +200,21 @@ public interface NamingStrategy { */ String getExistenceCheckMethodName(String memberName, Shape parentShape); + /** + * Retrieve the service's signing name that should be used based on the model. + */ + String getSigningName(); + + /** + * Retrieve the service's signing name that should be used for environment variables. + */ + String getSigningNameForEnvironmentVariables(); + + /** + * Retrieve the service's signing name that should be used for system properties. + */ + String getSigningNameForSystemProperties(); + /** * Verify the customer-visible naming in the provided intermediate model will compile and is idiomatic to Java. */ diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/PoetExtension.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/PoetExtension.java index b126fd2f201e..f119507809c2 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/PoetExtension.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/PoetExtension.java @@ -79,6 +79,14 @@ public ClassName getUserAgentClass() { return ClassName.get(model.getMetadata().getFullClientInternalPackageName(), "UserAgentUtils"); } + public ClassName getServiceVersionInfoClass() { + return ClassName.get(model.getMetadata().getFullClientInternalPackageName(), "ServiceVersionInfo"); + } + + public ClassName getEnvironmentTokenSystemSettingsClass() { + return ClassName.get(model.getMetadata().getFullClientInternalPackageName(), "EnvironmentTokenSystemSettings"); + } + /** * @param operationName Name of the operation * @return A Poet {@link ClassName} for the response type of a paginated operation in the base service package. diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeInterceptorSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeInterceptorSpec.java index 7686ef132271..043bf74ba9d9 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeInterceptorSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeInterceptorSpec.java @@ -49,8 +49,10 @@ import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.internal.util.MetricUtils; import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.core.useragent.BusinessMetricFeatureId; import software.amazon.awssdk.endpoints.EndpointProvider; import software.amazon.awssdk.http.auth.aws.signer.RegionSet; +import software.amazon.awssdk.http.auth.scheme.BearerAuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; @@ -70,8 +72,10 @@ public final class AuthSchemeInterceptorSpec implements ClassSpec { private final AuthSchemeSpecUtils authSchemeSpecUtils; private final EndpointRulesSpecUtils endpointRulesSpecUtils; + private final IntermediateModel intermediateModel; public AuthSchemeInterceptorSpec(IntermediateModel intermediateModel) { + this.intermediateModel = intermediateModel; this.authSchemeSpecUtils = new AuthSchemeSpecUtils(intermediateModel); this.endpointRulesSpecUtils = new EndpointRulesSpecUtils(intermediateModel); } @@ -99,9 +103,42 @@ public TypeSpec poetSpec() { .addMethod(generateTrySelectAuthScheme()) .addMethod(generateGetIdentityMetric()) .addMethod(putSelectedAuthSchemeMethodSpec()); + if (intermediateModel.getCustomizationConfig().isEnableEnvironmentBearerToken()) { + builder.addMethod(generateEnvironmentTokenMetric()); + } return builder.build(); } + private MethodSpec generateEnvironmentTokenMetric() { + return MethodSpec + .methodBuilder("recordEnvironmentTokenBusinessMetric") + .addModifiers(Modifier.PRIVATE) + .addTypeVariable(TypeVariableName.get("T", Identity.class)) + .addParameter(ParameterSpec.builder( + ParameterizedTypeName.get(ClassName.get(SelectedAuthScheme.class), + TypeVariableName.get("T")), + "selectedAuthScheme").build()) + .addParameter(ExecutionAttributes.class, "executionAttributes") + .addStatement("$T tokenFromEnv = executionAttributes.getAttribute($T.TOKEN_CONFIGURED_FROM_ENV)", + String.class, SdkInternalExecutionAttribute.class) + .beginControlFlow("if (selectedAuthScheme != null && selectedAuthScheme.authSchemeOption().schemeId().equals($T" + + ".SCHEME_ID) && selectedAuthScheme.identity().isDone())", BearerAuthScheme.class) + .beginControlFlow("if (selectedAuthScheme.identity().getNow(null) instanceof $T)", TokenIdentity.class) + + .addStatement("$T configuredToken = ($T) selectedAuthScheme.identity().getNow(null)", + TokenIdentity.class, TokenIdentity.class) + .beginControlFlow("if (configuredToken.token().equals(tokenFromEnv))") + .addStatement("executionAttributes.getAttribute($T.BUSINESS_METRICS)" + + ".addMetric($T.BEARER_SERVICE_ENV_VARS.value())", + SdkInternalExecutionAttribute.class, BusinessMetricFeatureId.class) + .endControlFlow() + .endControlFlow() + .endControlFlow() + .build(); + + + } + private MethodSpec generateBeforeExecution() { MethodSpec.Builder builder = MethodSpec.methodBuilder("beforeExecution") .addAnnotation(Override.class) @@ -116,6 +153,11 @@ private MethodSpec generateBeforeExecution() { .addStatement("$T selectedAuthScheme = selectAuthScheme(authOptions, executionAttributes)", wildcardSelectedAuthScheme()) .addStatement("putSelectedAuthScheme(executionAttributes, selectedAuthScheme)"); + + if (intermediateModel.getCustomizationConfig().isEnableEnvironmentBearerToken()) { + builder.addStatement("recordEnvironmentTokenBusinessMetric(selectedAuthScheme, " + + "executionAttributes)"); + } return builder.build(); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeProviderSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeProviderSpec.java index bc5255695ad1..b43c577e9931 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeProviderSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeProviderSpec.java @@ -21,6 +21,7 @@ import com.squareup.javapoet.ParameterizedTypeName; import com.squareup.javapoet.TypeName; import com.squareup.javapoet.TypeSpec; +import java.util.List; import java.util.function.Consumer; import javax.lang.model.element.Modifier; import software.amazon.awssdk.annotations.SdkPublicApi; @@ -54,6 +55,7 @@ public TypeSpec poetSpec() { .addMethod(resolveAuthSchemeMethod()) .addMethod(resolveAuthSchemeConsumerBuilderMethod()) .addMethod(defaultProviderMethod()) + .addMethod(defaultPreferredProviderMethod()) .build(); } @@ -93,6 +95,17 @@ private MethodSpec defaultProviderMethod() { .build(); } + private MethodSpec defaultPreferredProviderMethod() { + return MethodSpec.methodBuilder("defaultProvider") + .addModifiers(Modifier.PUBLIC, Modifier.STATIC) + .addParameter(ParameterizedTypeName.get(List.class, String.class), "authSchemePreference") + .returns(className()) + .addJavadoc("Get the default auth scheme provider the preferred auth schemes in order of preference.") + .addStatement("return new $T(defaultProvider(), authSchemePreference)", + authSchemeSpecUtils.preferredAuthSchemeProviderName()) + .build(); + } + private CodeBlock interfaceJavadoc() { CodeBlock.Builder b = CodeBlock.builder(); @@ -105,3 +118,4 @@ private CodeBlock interfaceJavadoc() { return b.build(); } } + diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecUtils.java index a02f3e8bc893..f6ea9e684b59 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecUtils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecUtils.java @@ -97,6 +97,15 @@ public ClassName modeledAuthSchemeProviderName() { return ClassName.get(internalPackage(), "Modeled" + providerInterfaceName().simpleName()); } + public ClassName preferredAuthSchemeProviderName() { + return ClassName.get(internalPackage(), "Preferred" + providerInterfaceName().simpleName()); + } + + public ClassName authSchemeProviderBuilderName() { + return ClassName.get(basePackage(), + intermediateModel.getMetadata().getServiceName() + "AuthSchemeProviderBuilder"); + } + public ClassName authSchemeInterceptor() { return ClassName.get(internalPackage(), intermediateModel.getMetadata().getServiceName() + "AuthSchemeInterceptor"); } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/PreferredAuthSchemeProviderSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/PreferredAuthSchemeProviderSpec.java new file mode 100644 index 000000000000..0481d707d999 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/auth/scheme/PreferredAuthSchemeProviderSpec.java @@ -0,0 +1,108 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.auth.scheme; + +import com.squareup.javapoet.ClassName; +import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.ParameterizedTypeName; +import com.squareup.javapoet.TypeSpec; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import javax.lang.model.element.Modifier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.poet.ClassSpec; +import software.amazon.awssdk.codegen.poet.PoetUtils; +import software.amazon.awssdk.utils.CollectionUtils; + +public class PreferredAuthSchemeProviderSpec implements ClassSpec { + private final AuthSchemeSpecUtils authSchemeSpecUtils; + + public PreferredAuthSchemeProviderSpec(IntermediateModel intermediateModel) { + this.authSchemeSpecUtils = new AuthSchemeSpecUtils(intermediateModel); + } + + @Override + public ClassName className() { + return authSchemeSpecUtils.preferredAuthSchemeProviderName(); + } + + @Override + public TypeSpec poetSpec() { + return PoetUtils.createClassBuilder(className()) + .addModifiers(Modifier.PUBLIC, Modifier.FINAL) + .addAnnotation(SdkInternalApi.class) + .addField( + authSchemeSpecUtils.providerInterfaceName(), "delegate", + Modifier.PRIVATE, Modifier.FINAL) + .addField( + ParameterizedTypeName.get(List.class, String.class), "authSchemePreference", + Modifier.PRIVATE, Modifier.FINAL) + .addSuperinterface(authSchemeSpecUtils.providerInterfaceName()) + .addMethod(constructor()) + .addMethod(resolveAuthSchemeMethod()) + .build(); + } + + private MethodSpec constructor() { + return MethodSpec + .constructorBuilder() + .addModifiers(Modifier.PUBLIC) + .addParameter(authSchemeSpecUtils.providerInterfaceName(), "delegate") + .addParameter(ParameterizedTypeName.get(List.class, String.class), "authSchemePreference") + .addStatement("this.delegate = delegate") + .addStatement("this.authSchemePreference = authSchemePreference != null ? authSchemePreference " + + ": $T.emptyList()", + Collections.class) + .build(); + } + + private MethodSpec resolveAuthSchemeMethod() { + MethodSpec.Builder b = MethodSpec.methodBuilder("resolveAuthScheme") + .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) + .returns(authSchemeSpecUtils.resolverReturnType()) + .addParameter(authSchemeSpecUtils.parametersInterfaceName(), "params"); + b.addJavadoc("Resolve the auth schemes based on the given set of parameters."); + b.addStatement("$T candidateAuthSchemes = delegate.resolveAuthScheme(params)", + authSchemeSpecUtils.resolverReturnType()); + b.beginControlFlow("if ($T.isNullOrEmpty(authSchemePreference))", CollectionUtils.class) + .addStatement("return candidateAuthSchemes") + .endControlFlow(); + + b.addStatement("$T authSchemes = new $T<>()", authSchemeSpecUtils.resolverReturnType(), ArrayList.class); + + b.beginControlFlow("authSchemePreference.forEach(preferredSchemeId -> "); + + b.beginControlFlow("candidateAuthSchemes.stream().filter(candidate -> "); + b.addStatement("String candidateSchemeName = candidate.schemeId().contains(\"#\") ? " + + "candidate.schemeId().split(\"#\")[1] : candidate.schemeId()"); + b.addStatement("return candidateSchemeName.equals(preferredSchemeId)"); + b.endControlFlow(").findFirst().ifPresent(authSchemes::add)"); + b.endControlFlow(")"); + + b.beginControlFlow("candidateAuthSchemes.forEach(candidate -> ") + .beginControlFlow("if (!authSchemes.contains(candidate))") + .addStatement("authSchemes.add(candidate)") + .endControlFlow() + .endControlFlow(")"); + + b.addStatement("return authSchemes"); + return b.build(); + } + +} \ No newline at end of file diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java index b57d7cb8d7aa..96d95f3650f8 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClass.java @@ -41,8 +41,10 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.signer.Aws4Signer; +import software.amazon.awssdk.auth.token.credentials.StaticTokenProvider; import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; import software.amazon.awssdk.auth.token.signer.aws.BearerTokenSigner; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -52,6 +54,7 @@ import software.amazon.awssdk.codegen.model.service.AuthType; import software.amazon.awssdk.codegen.model.service.ClientContextParam; import software.amazon.awssdk.codegen.poet.ClassSpec; +import software.amazon.awssdk.codegen.poet.PoetExtension; import software.amazon.awssdk.codegen.poet.PoetUtils; import software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeSpecUtils; import software.amazon.awssdk.codegen.poet.auth.scheme.ModelAuthSchemeClassesKnowledgeIndex; @@ -70,7 +73,9 @@ import software.amazon.awssdk.core.client.config.SdkClientOption; import software.amazon.awssdk.core.endpointdiscovery.providers.DefaultEndpointDiscoveryProviderChain; import software.amazon.awssdk.core.interceptor.ClasspathInterceptorChainFactory; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.http.Protocol; @@ -101,6 +106,8 @@ public class BaseClientBuilderClass implements ClassSpec { private final AuthSchemeSpecUtils authSchemeSpecUtils; private final ServiceClientConfigurationUtils configurationUtils; private final EndpointParamsKnowledgeIndex endpointParamsKnowledgeIndex; + private final PoetExtension poetExtensions; + public BaseClientBuilderClass(IntermediateModel model) { this.model = model; @@ -111,6 +118,7 @@ public BaseClientBuilderClass(IntermediateModel model) { this.authSchemeSpecUtils = new AuthSchemeSpecUtils(model); this.configurationUtils = new ServiceClientConfigurationUtils(model); this.endpointParamsKnowledgeIndex = EndpointParamsKnowledgeIndex.of(model); + this.poetExtensions = new PoetExtension(model); } @Override @@ -265,24 +273,24 @@ private MethodSpec serviceNameMethod() { } private MethodSpec mergeServiceDefaultsMethod() { - boolean crc32FromCompressedDataEnabled = model.getCustomizationConfig().isCalculateCrc32FromCompressedData(); - MethodSpec.Builder builder = MethodSpec.methodBuilder("mergeServiceDefaults") .addAnnotation(Override.class) .addModifiers(PROTECTED, FINAL) .returns(SdkClientConfiguration.class) - .addParameter(SdkClientConfiguration.class, "config") - .addCode("return config.merge(c -> c"); + .addParameter(SdkClientConfiguration.class, "config"); - builder.addCode(".option($T.ENDPOINT_PROVIDER, defaultEndpointProvider())", SdkClientOption.class); + boolean crc32FromCompressedDataEnabled = model.getCustomizationConfig().isCalculateCrc32FromCompressedData(); + + builder.beginControlFlow("return config.merge(c -> "); + builder.addCode("c.option($T.ENDPOINT_PROVIDER, defaultEndpointProvider())", SdkClientOption.class); if (authSchemeSpecUtils.useSraAuth()) { - builder.addCode(".option($T.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider())", SdkClientOption.class); - builder.addCode(".option($T.AUTH_SCHEMES, authSchemes())", SdkClientOption.class); - } else { - if (defaultAwsAuthSignerMethod().isPresent()) { - builder.addCode(".option($T.SIGNER, defaultSigner())\n", SdkAdvancedClientOption.class); + if (!model.getCustomizationConfig().isEnableEnvironmentBearerToken()) { + builder.addCode(".option($T.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config))", SdkClientOption.class); } + builder.addCode(".option($T.AUTH_SCHEMES, authSchemes())", SdkClientOption.class); + } else if (defaultAwsAuthSignerMethod().isPresent()) { + builder.addCode(".option($T.SIGNER, defaultSigner())\n", SdkAdvancedClientOption.class); } builder.addCode(".option($T.CRC32_FROM_COMPRESSED_DATA_ENABLED, $L)\n", SdkClientOption.class, crc32FromCompressedDataEnabled); @@ -301,11 +309,47 @@ private MethodSpec mergeServiceDefaultsMethod() { builder.addCode(".option($T.TOKEN_SIGNER, defaultTokenSigner())", SdkAdvancedClientOption.class); } } + builder.addStatement(""); - builder.addCode(");"); + if (model.getCustomizationConfig().isEnableEnvironmentBearerToken()) { + configureEnvironmentBearerToken(builder); + } + builder.endControlFlow(")"); return builder.build(); } + private void configureEnvironmentBearerToken(MethodSpec.Builder builder) { + if (!authSchemeSpecUtils.useSraAuth()) { + throw new IllegalStateException("The enableEnvironmentBearerToken customization requires SRA Auth."); + } + if (!AuthUtils.usesBearerAuth(model)) { + throw new IllegalStateException("The enableEnvironmentBearerToken customization requires the service to model and " + + "support smithy.api#httpBearerAuth."); + } + + builder.addStatement("$T tokenFromEnv = new $T().getStringValue()", + ParameterizedTypeName.get(Optional.class, String.class), + poetExtensions.getEnvironmentTokenSystemSettingsClass()); + + builder + .beginControlFlow("if (tokenFromEnv.isPresent() && config.option($T.AUTH_SCHEME_PROVIDER) == null && config.option($T" + + ".TOKEN_IDENTITY_PROVIDER) == null)", + SdkClientOption.class, AwsClientOption.class) + .addStatement("c.option($T.AUTH_SCHEME_PROVIDER, $T.defaultProvider($T.singletonList($S)))", + SdkClientOption.class, authSchemeSpecUtils.providerInterfaceName(), Collections.class, + "httpBearerAuth") + .addStatement("c.option($T.TOKEN_IDENTITY_PROVIDER, $T.create(tokenFromEnv::get))", + AwsClientOption.class, StaticTokenProvider.class) + .addStatement("c.option($T.EXECUTION_ATTRIBUTES, " + + "$T.builder().put($T.TOKEN_CONFIGURED_FROM_ENV, tokenFromEnv.get()).build())", + SdkClientOption.class, ExecutionAttributes.class, SdkInternalExecutionAttribute.class) + .endControlFlow() + .beginControlFlow("else") + .addStatement("c.option($T.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config))", SdkClientOption.class) + .endControlFlow(); + + } + private Optional mergeInternalDefaultsMethod() { String userAgent = model.getCustomizationConfig().getUserAgent(); RetryMode defaultRetryMode = model.getCustomizationConfig().getDefaultRetryMode(); @@ -442,7 +486,7 @@ private MethodSpec finalizeServiceConfigurationMethod() { // serviceConfigBuilder; the service configuration classes (e.g. S3Configuration) return primitive booleans that // have a default when not present. builder.addStatement("builder.option($T.DUALSTACK_ENDPOINT_ENABLED, serviceConfigBuilder.dualstackEnabled())", - AwsClientOption.class); + AwsClientOption.class); } if (model.getCustomizationConfig().getServiceConfig().hasFipsProperty()) { @@ -452,14 +496,14 @@ private MethodSpec finalizeServiceConfigurationMethod() { if (model.getEndpointOperation().isPresent()) { builder.addStatement("builder.option($T.ENDPOINT_DISCOVERY_ENABLED, endpointDiscoveryEnabled)\n", - SdkClientOption.class); + SdkClientOption.class); } if (StringUtils.isNotBlank(model.getCustomizationConfig().getCustomRetryStrategy())) { builder.addStatement("builder.option($1T.RETRY_STRATEGY, $2T.resolveRetryStrategy(config))", - SdkClientOption.class, - PoetUtils.classNameFromFqcn(model.getCustomizationConfig().getCustomRetryStrategy())); + SdkClientOption.class, + PoetUtils.classNameFromFqcn(model.getCustomizationConfig().getCustomRetryStrategy())); } if (StringUtils.isNotBlank(model.getCustomizationConfig().getCustomRetryPolicy())) { @@ -485,7 +529,7 @@ private MethodSpec finalizeServiceConfigurationMethod() { if (endpointParamsKnowledgeIndex.hasAccountIdEndpointModeBuiltIn()) { builder.addStatement("builder.option($T.$L, resolveAccountIdEndpointMode(config))", - AwsClientOption.class, model.getNamingStrategy().getEnumValueName("accountIdEndpointMode")); + AwsClientOption.class, model.getNamingStrategy().getEnumValueName("accountIdEndpointMode")); } String serviceNameForEnvVar = model.getNamingStrategy().getServiceNameForEnvironmentVariables(); @@ -829,7 +873,19 @@ private MethodSpec sigv4aSigningRegionSetMethod() { private MethodSpec defaultAuthSchemeProviderMethod() { return MethodSpec.methodBuilder("defaultAuthSchemeProvider") .addModifiers(PRIVATE) + .addParameter(SdkClientConfiguration.class, "config") .returns(authSchemeSpecUtils.providerInterfaceName()) + .addCode("$T authSchemePreferenceProvider = " + + "$T.builder()", + AuthSchemePreferenceResolver.class, AuthSchemePreferenceResolver.class) + .addCode(".profileFile(config.option($T.PROFILE_FILE_SUPPLIER))", SdkClientOption.class) + .addCode(".profileName(config.option($T.PROFILE_NAME))", SdkClientOption.class) + .addStatement(".build()") + .addStatement("List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference()") + .beginControlFlow("if(!preferences.isEmpty())") + .addStatement("return $T.defaultProvider(preferences)", + authSchemeSpecUtils.providerInterfaceName()) + .endControlFlow() .addStatement("return $T.defaultProvider()", authSchemeSpecUtils.providerInterfaceName()) .build(); } @@ -965,10 +1021,10 @@ private MethodSpec internalPluginsMethod() { List internalPlugins = model.getCustomizationConfig().getInternalPlugins(); if (internalPlugins.isEmpty()) { return builder.addStatement("return $T.emptyList()", Collections.class) - .build(); + .build(); } - builder.addStatement("$T internalPlugins = new $T<>()", parameterizedTypeName, ArrayList.class); + builder.addStatement("$T internalPlugins = new $T<>()", parameterizedTypeName, ArrayList.class); for (String internalPlugin : internalPlugins) { String arguments = internalPluginNewArguments(internalPlugin); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/AsyncClientClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/AsyncClientClass.java index aaa729662c17..9829133551c3 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/AsyncClientClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/AsyncClientClass.java @@ -26,6 +26,7 @@ import static software.amazon.awssdk.codegen.internal.Constant.EVENT_PUBLISHER_PARAM_NAME; import static software.amazon.awssdk.codegen.poet.client.ClientClassUtils.addS3ArnableFieldCode; import static software.amazon.awssdk.codegen.poet.client.ClientClassUtils.applySignerOverrideMethod; +import static software.amazon.awssdk.codegen.poet.client.ClientClassUtils.transformServiceId; import static software.amazon.awssdk.codegen.poet.client.SyncClientClass.addRequestModifierCode; import static software.amazon.awssdk.codegen.poet.client.SyncClientClass.getProtocolSpecs; @@ -229,7 +230,14 @@ private MethodSpec constructor(TypeSpec.Builder classBuilder) { .addStatement("this.clientHandler = new $T(clientConfiguration)", AwsAsyncClientHandler.class) .addStatement("this.clientConfiguration = clientConfiguration.toBuilder()" + ".option($T.SDK_CLIENT, this)" - + ".build()", SdkClientOption.class); + + ".option($T.API_METADATA, $S + \"#\" + $T.VERSION)" + + ".build()", + SdkClientOption.class, + SdkClientOption.class, + transformServiceId(model.getMetadata().getServiceId()), + ClassName.get(model.getMetadata().getFullClientInternalPackageName(), + "ServiceVersionInfo")); + FieldSpec protocolFactoryField = protocolSpec.protocolFactory(model); if (model.getMetadata().isJsonProtocol()) { builder.addStatement("this.$N = init($T.builder()).build()", protocolFactoryField.name, diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/ClientClassUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/ClientClassUtils.java index 87e8339e54a5..c058c1d7b654 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/ClientClassUtils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/ClientClassUtils.java @@ -280,4 +280,9 @@ public static MethodSpec updateRetryStrategyClientConfigurationMethod() { builder.addStatement("configuration.option($T.CONFIGURED_RETRY_CONFIGURATOR, null)", SdkClientOption.class); return builder.build(); } + + // According to User Agent 2.0 spec, replace spaces with underscores + static String transformServiceId(String serviceId) { + return serviceId.replace(" ", "_"); + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClass.java new file mode 100644 index 000000000000..3ca3fb56ab41 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClass.java @@ -0,0 +1,76 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.client; + +import com.squareup.javapoet.ClassName; +import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.TypeSpec; +import javax.lang.model.element.Modifier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.naming.NamingStrategy; +import software.amazon.awssdk.codegen.poet.ClassSpec; +import software.amazon.awssdk.codegen.poet.PoetExtension; +import software.amazon.awssdk.codegen.poet.PoetUtils; +import software.amazon.awssdk.utils.SystemSetting; + +public class EnvironmentTokenSystemSettingsClass implements ClassSpec { + protected final IntermediateModel model; + protected final PoetExtension poetExtensions; + + public EnvironmentTokenSystemSettingsClass(IntermediateModel model) { + this.model = model; + this.poetExtensions = new PoetExtension(model); + } + + @Override + public TypeSpec poetSpec() { + NamingStrategy namingStrategy = model.getNamingStrategy(); + + String systemPropertyName = "aws.bearerToken" + namingStrategy.getSigningNameForSystemProperties(); + String envName = "AWS_BEARER_TOKEN_" + namingStrategy.getSigningNameForEnvironmentVariables(); + + return TypeSpec.classBuilder(className()) + .addModifiers(Modifier.PUBLIC) + .addAnnotation(PoetUtils.generatedAnnotation()) + .addAnnotation(SdkInternalApi.class) + .addSuperinterface(SystemSetting.class) + .addMethod(MethodSpec.methodBuilder("property") + .addAnnotation(Override.class) + .addModifiers(Modifier.PUBLIC) + .returns(String.class) + .addStatement("return $S", systemPropertyName) + .build()) + .addMethod(MethodSpec.methodBuilder("environmentVariable") + .addAnnotation(Override.class) + .addModifiers(Modifier.PUBLIC) + .returns(String.class) + .addStatement("return $S", envName) + .build()) + .addMethod(MethodSpec.methodBuilder("defaultValue") + .addAnnotation(Override.class) + .addModifiers(Modifier.PUBLIC) + .returns(String.class) + .addStatement("return null") + .build()) + .build(); + } + + @Override + public ClassName className() { + return poetExtensions.getEnvironmentTokenSystemSettingsClass(); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java index 2695e43f8746..780d15a8298b 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/SyncClientClass.java @@ -23,6 +23,7 @@ import static software.amazon.awssdk.codegen.poet.PoetUtils.classNameFromFqcn; import static software.amazon.awssdk.codegen.poet.client.ClientClassUtils.addS3ArnableFieldCode; import static software.amazon.awssdk.codegen.poet.client.ClientClassUtils.applySignerOverrideMethod; +import static software.amazon.awssdk.codegen.poet.client.ClientClassUtils.transformServiceId; import com.squareup.javapoet.ClassName; import com.squareup.javapoet.CodeBlock; @@ -201,7 +202,13 @@ private MethodSpec constructor() { .addStatement("this.clientHandler = new $T(clientConfiguration)", protocolSpec.getClientHandlerClass()) .addStatement("this.clientConfiguration = clientConfiguration.toBuilder()" + ".option($T.SDK_CLIENT, this)" - + ".build()", SdkClientOption.class); + + ".option($T.API_METADATA, $S + \"#\" + $T.VERSION)" + + ".build()", + SdkClientOption.class, + SdkClientOption.class, + transformServiceId(model.getMetadata().getServiceId()), + ClassName.get(model.getMetadata().getFullClientInternalPackageName(), + "ServiceVersionInfo")); FieldSpec protocolFactoryField = protocolSpec.protocolFactory(model); if (model.getMetadata().isJsonProtocol()) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java index 9fa214efb8fe..8e1afb25e8e0 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/JsonProtocolSpec.java @@ -39,6 +39,7 @@ import software.amazon.awssdk.codegen.model.intermediate.OperationModel; import software.amazon.awssdk.codegen.model.intermediate.Protocol; import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; +import software.amazon.awssdk.codegen.model.intermediate.ShapeType; import software.amazon.awssdk.codegen.poet.PoetExtension; import software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeSpecUtils; import software.amazon.awssdk.codegen.poet.client.traits.HttpChecksumRequiredTrait; @@ -116,7 +117,6 @@ public MethodSpec initProtocolFactory(IntermediateModel model) { methodSpec.addCode("$L", hasAwsQueryCompatible()); } - registerModeledExceptions(model, poetExtensions).forEach(methodSpec::addCode); methodSpec.addCode(";"); return methodSpec.build(); @@ -170,11 +170,40 @@ public CodeBlock responseHandler(IntermediateModel model, OperationModel opModel public Optional errorResponseHandler(OperationModel opModel) { String protocolFactory = protocolFactoryLiteral(model, opModel); - return Optional.of( - CodeBlock.builder() - .add("\n\n$T<$T> errorResponseHandler = createErrorResponseHandler($L, operationMetadata);", - HttpResponseHandler.class, AwsServiceException.class, protocolFactory) - .build()); + CodeBlock.Builder builder = CodeBlock.builder(); + ParameterizedTypeName metadataMapperType = ParameterizedTypeName.get( + ClassName.get(Function.class), + ClassName.get(String.class), + ParameterizedTypeName.get(Optional.class, ExceptionMetadata.class)); + + builder.add("\n$T exceptionMetadataMapper = errorCode -> {\n", metadataMapperType); + builder.add("if (errorCode == null) {\n"); + builder.add("return $T.empty();\n", Optional.class); + builder.add("}\n"); + builder.add("switch (errorCode) {\n"); + model.getShapes().values().stream() + .filter(shape -> shape.getShapeType() == ShapeType.Exception) + .forEach(exceptionShape -> { + String exceptionName = exceptionShape.getShapeName(); + String errorCode = exceptionShape.getErrorCode(); + + builder.add("case $S:\n", errorCode); + builder.add("return $T.of($T.builder()\n", Optional.class, ExceptionMetadata.class) + .add(".errorCode($S)\n", errorCode); + builder.add(populateHttpStatusCode(exceptionShape, model)); + builder.add(".exceptionBuilderSupplier($T::builder)\n", + poetExtensions.getModelClassFromShape(exceptionShape)) + .add(".build());\n"); + }); + + builder.add("default: return $T.empty();\n", Optional.class); + builder.add("}\n"); + builder.add("};\n"); + + builder.add("$T<$T> errorResponseHandler = createErrorResponseHandler($L, operationMetadata, exceptionMetadataMapper);", + HttpResponseHandler.class, AwsServiceException.class, protocolFactory); + + return Optional.of(builder.build()); } @Override @@ -206,6 +235,10 @@ public CodeBlock executionHandler(OperationModel opModel) { codeBlock.add(RequestCompressionTrait.create(opModel, model)); + if (opModel.hasStreamingOutput()) { + codeBlock.add(".withResponseTransformer(responseTransformer)"); + } + if (opModel.hasStreamingInput()) { codeBlock.add(".withRequestBody(requestBody)") .add(".withMarshaller($L)", syncStreamingMarshaller(model, opModel, marshaller)); @@ -281,6 +314,10 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper builder.add(NoneAuthTypeRequestTrait.create(opModel)); } + if (opModel.hasStreamingOutput()) { + builder.add(".withAsyncResponseTransformer(asyncResponseTransformer)"); + } + builder.add(RequestCompressionTrait.create(opModel, model)) .add(".withInput($L)$L)", opModel.getInput().getVariableName(), asyncResponseTransformerVariable(isStreaming, isRestJson, opModel)) @@ -411,21 +448,6 @@ public Optional createErrorResponseHandler() { ClassName httpResponseHandler = ClassName.get(HttpResponseHandler.class); ClassName sdkBaseException = ClassName.get(AwsServiceException.class); TypeName responseHandlerOfException = ParameterizedTypeName.get(httpResponseHandler, sdkBaseException); - - return Optional.of(MethodSpec.methodBuilder("createErrorResponseHandler") - .addParameter(BaseAwsJsonProtocolFactory.class, "protocolFactory") - .addParameter(JsonOperationMetadata.class, "operationMetadata") - .returns(responseHandlerOfException) - .addModifiers(Modifier.PRIVATE) - .addStatement("return protocolFactory.createErrorResponseHandler(operationMetadata)") - .build()); - } - - @Override - public Optional createEventstreamErrorResponseHandler() { - ClassName httpResponseHandler = ClassName.get(HttpResponseHandler.class); - ClassName sdkBaseException = ClassName.get(AwsServiceException.class); - TypeName responseHandlerOfException = ParameterizedTypeName.get(httpResponseHandler, sdkBaseException); ParameterizedTypeName mapperType = ParameterizedTypeName.get(ClassName.get(Function.class), ClassName.get(String.class), ParameterizedTypeName.get(Optional.class, ExceptionMetadata.class)); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java index b6cca23e38cf..4bf7d45d5f9c 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/QueryProtocolSpec.java @@ -128,7 +128,9 @@ public CodeBlock executionHandler(OperationModel opModel) { } codeBlock.add(RequestCompressionTrait.create(opModel, intermediateModel)); - + if (opModel.hasStreamingOutput()) { + codeBlock.add(".withResponseTransformer(responseTransformer)"); + } if (opModel.hasStreamingInput()) { return codeBlock.add(".withRequestBody(requestBody)") .add(".withMarshaller($L));", syncStreamingMarshaller(intermediateModel, opModel, marshaller)) @@ -170,6 +172,10 @@ public CodeBlock asyncExecutionHandler(IntermediateModel intermediateModel, Oper builder.add(RequestCompressionTrait.create(opModel, intermediateModel)); + if (opModel.hasStreamingOutput()) { + builder.add(".withAsyncResponseTransformer(asyncResponseTransformer)"); + } + builder.add(hostPrefixExpression(opModel) + asyncRequestBody + ".withInput($L)$L);", opModel.getInput().getVariableName(), opModel.hasStreamingOutput() ? ", asyncResponseTransformer" : ""); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/ServiceVersionInfoSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/ServiceVersionInfoSpec.java new file mode 100644 index 000000000000..145e482ad899 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/client/specs/ServiceVersionInfoSpec.java @@ -0,0 +1,65 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.client.specs; + +import static software.amazon.awssdk.core.util.VersionInfo.SDK_VERSION; + +import com.squareup.javapoet.ClassName; +import com.squareup.javapoet.FieldSpec; +import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.TypeSpec; +import javax.lang.model.element.Modifier; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.poet.ClassSpec; +import software.amazon.awssdk.codegen.poet.PoetExtension; +import software.amazon.awssdk.codegen.poet.PoetUtils; + +public class ServiceVersionInfoSpec implements ClassSpec { + private final PoetExtension poetExtension; + + public ServiceVersionInfoSpec(IntermediateModel model) { + this.poetExtension = new PoetExtension(model); + } + + @Override + public TypeSpec poetSpec() { + TypeSpec.Builder builder = TypeSpec.classBuilder("ServiceVersionInfo") + .addModifiers(Modifier.PUBLIC, Modifier.FINAL) + .addAnnotation(PoetUtils.generatedAnnotation()) + .addAnnotation(SdkInternalApi.class) + .addField(FieldSpec.builder( + String.class, "VERSION", Modifier.PUBLIC, Modifier.STATIC, Modifier.FINAL) + .initializer("$S", SDK_VERSION) + .addJavadoc("Returns the current version for the AWS SDK in which" + + " this class is running.") + .build()) + .addMethod(privateConstructor()); + + return builder.build(); + } + + protected MethodSpec privateConstructor() { + return MethodSpec.constructorBuilder() + .addModifiers(Modifier.PRIVATE) + .build(); + } + + @Override + public ClassName className() { + return poetExtension.getServiceVersionInfoClass(); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelBuilderSpecs.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelBuilderSpecs.java index aa61dea0a16a..2573d2c0680d 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelBuilderSpecs.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ModelBuilderSpecs.java @@ -35,6 +35,8 @@ import java.util.Set; import java.util.function.Consumer; import javax.lang.model.element.Modifier; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.MemberModel; @@ -83,6 +85,8 @@ public ClassName builderImplName() { public TypeSpec builderInterface() { TypeSpec.Builder builder = TypeSpec.interfaceBuilder(builderInterfaceName()) .addSuperinterfaces(builderSuperInterfaces()) + .addAnnotation(Mutable.class) + .addAnnotation(NotThreadSafe.class) .addModifiers(PUBLIC); shapeModel.getNonStreamingMembers() diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ServiceClientConfigurationUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ServiceClientConfigurationUtils.java index 22f06d253b56..320506822765 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ServiceClientConfigurationUtils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/model/ServiceClientConfigurationUtils.java @@ -36,6 +36,7 @@ import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.poet.auth.scheme.AuthSchemeSpecUtils; import software.amazon.awssdk.codegen.poet.rules.EndpointRulesSpecUtils; +import software.amazon.awssdk.codegen.utils.AuthUtils; import software.amazon.awssdk.core.ClientEndpointProvider; import software.amazon.awssdk.core.checksums.RequestChecksumCalculation; import software.amazon.awssdk.core.checksums.ResponseChecksumValidation; @@ -48,6 +49,7 @@ import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeProvider; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.TokenIdentity; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.Validate; @@ -105,10 +107,34 @@ private List fields(IntermediateModel model) { authSchemeProviderField() )); fields.addAll(addCustomClientParams(model)); + fields.addAll(addModeledIdentityProviders(model)); fields.addAll(addCustomClientConfigParams(model)); return fields; } + private List addModeledIdentityProviders(IntermediateModel model) { + List identityProviderFields = new ArrayList<>(); + if (AuthUtils.usesBearerAuth(model)) { + identityProviderFields.add(tokenIdentityProviderField()); + } + return identityProviderFields; + } + + private Field tokenIdentityProviderField() { + TypeName tokenIdentityProviderType = + ParameterizedTypeName.get(ClassName.get(IdentityProvider.class), + WildcardTypeName.subtypeOf(TokenIdentity.class)); + + return fieldBuilder("tokenProvider", tokenIdentityProviderType) + .doc("token provider") + .isInherited(false) + .localSetter(basicLocalSetterCode("tokenProvider")) + .localGetter(basicLocalGetterCode("tokenProvider")) + .configSetter(basicConfigSetterCode(AwsClientOption.TOKEN_IDENTITY_PROVIDER, "tokenProvider")) + .configGetter(basicConfigGetterCode(AwsClientOption.TOKEN_IDENTITY_PROVIDER)) + .build(); + } + private List addCustomClientParams(IntermediateModel model) { List customClientParamFields = new ArrayList<>(); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesClientTestSpec.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesClientTestSpec.java index ce7adb6066ee..d077473f532a 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesClientTestSpec.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesClientTestSpec.java @@ -61,6 +61,10 @@ import software.amazon.awssdk.codegen.poet.PoetExtension; import software.amazon.awssdk.codegen.poet.PoetUtils; import software.amazon.awssdk.codegen.utils.AuthUtils; +import software.amazon.awssdk.codegen.validation.ModelInvalidException; +import software.amazon.awssdk.codegen.validation.ValidationEntry; +import software.amazon.awssdk.codegen.validation.ValidationErrorId; +import software.amazon.awssdk.codegen.validation.ValidationErrorSeverity; import software.amazon.awssdk.core.SdkSystemSetting; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.rules.testing.AsyncTestCase; @@ -445,6 +449,20 @@ private CodeBlock requestCreation(OperationModel opModel, Map if (opParams != null) { opParams.forEach((n, v) -> { MemberModel memberModel = opModel.getInputShape().getMemberByC2jName(n); + + if (memberModel == null) { + String detailMsg = String.format("Endpoint test definition references member '%s' on the input shape '%s' " + + "but no such member is defined.", n, opModel.getInputShape().getC2jName()); + ValidationEntry entry = + new ValidationEntry() + .withSeverity(ValidationErrorSeverity.DANGER) + .withErrorId(ValidationErrorId.UNKNOWN_SHAPE_MEMBER) + .withDetailMessage(detailMsg); + + throw ModelInvalidException.builder() + .validationEntries(Collections.singletonList(entry)) + .build(); + } CodeBlock memberValue = createMemberValue(memberModel, v); b.add(".$N($L)", memberModel.getFluentSetterMethodName(), memberValue); }); diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesSpecUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesSpecUtils.java index bad36fe6594f..dfcf68b056fd 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesSpecUtils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules/EndpointRulesSpecUtils.java @@ -29,7 +29,12 @@ import com.squareup.javapoet.TypeName; import java.io.IOException; import java.io.UncheckedIOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -52,6 +57,7 @@ import software.amazon.awssdk.utils.internal.CodegenNamingUtils; public class EndpointRulesSpecUtils { + private static final String RULES_ENGINE_RESOURCE_FILES_PREFIX = "software/amazon/awssdk/codegen/rules/"; private final IntermediateModel intermediateModel; public EndpointRulesSpecUtils(IntermediateModel intermediateModel) { @@ -213,16 +219,45 @@ public TypeName resolverReturnType() { public List rulesEngineResourceFiles() { URL currentJarUrl = EndpointRulesSpecUtils.class.getProtectionDomain().getCodeSource().getLocation(); + + // This would happen if the classes aren't loaded from a JAR, e.g. when unit testing + if (!currentJarUrl.toString().endsWith(".jar")) { + return rulesEngineFilesFromDirectory(currentJarUrl); + } + try (JarFile jarFile = new JarFile(currentJarUrl.getFile())) { return jarFile.stream() .map(ZipEntry::getName) - .filter(e -> e.startsWith("software/amazon/awssdk/codegen/rules/")) + .filter(e -> e.startsWith(RULES_ENGINE_RESOURCE_FILES_PREFIX)) .collect(Collectors.toList()); } catch (IOException e) { throw new UncheckedIOException(e); } } + public List rulesEngineFilesFromDirectory(URL location) { + URI locationUri; + try { + locationUri = location.toURI(); + if (!"file".equals(locationUri.getScheme())) { + throw new RuntimeException("Expected location to be a directory"); + } + } catch (URISyntaxException e) { + throw new RuntimeException(e); + } + + try { + Path directory = Paths.get(locationUri); + return Files.walk(directory) + // Remove the root directory if the classes, paths are expected to be relative to this directory + .map(f -> directory.relativize(f).toString()) + .filter(f -> f.startsWith(RULES_ENGINE_RESOURCE_FILES_PREFIX)) + .collect(Collectors.toList()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + public List rulesEngineResourceFiles2() { URL currentJarUrl = EndpointRulesSpecUtils.class.getProtectionDomain().getCodeSource().getLocation(); try (JarFile jarFile = new JarFile(currentJarUrl.getFile())) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodeGeneratorVisitor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodeGeneratorVisitor.java index 4cd94ace20ad..72f8b28a1fa2 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodeGeneratorVisitor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodeGeneratorVisitor.java @@ -21,26 +21,38 @@ import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import software.amazon.awssdk.awscore.endpoints.AwsEndpointAttribute; import software.amazon.awssdk.awscore.endpoints.authscheme.SigV4AuthScheme; import software.amazon.awssdk.awscore.endpoints.authscheme.SigV4aAuthScheme; import software.amazon.awssdk.codegen.model.config.customization.KeyTypePair; import software.amazon.awssdk.endpoints.Endpoint; +import software.amazon.awssdk.utils.uri.SdkUri; public class CodeGeneratorVisitor extends WalkRuleExpressionVisitor { + private static final Logger log = LoggerFactory.getLogger(CodeGeneratorVisitor.class); + private final CodeBlock.Builder builder; private final RuleRuntimeTypeMirror typeMirror; private final SymbolTable symbolTable; private final Map knownEndpointAttributes; + private final Map ruleIdToScope; + private final boolean endpointCaching; public CodeGeneratorVisitor(RuleRuntimeTypeMirror typeMirror, SymbolTable symbolTable, Map knownEndpointAttributes, + Map ruleIdToScope, + boolean endpointCaching, CodeBlock.Builder builder) { this.builder = builder; this.symbolTable = symbolTable; this.knownEndpointAttributes = knownEndpointAttributes; + this.ruleIdToScope = ruleIdToScope; this.typeMirror = typeMirror; + this.endpointCaching = endpointCaching; } @Override @@ -196,28 +208,14 @@ public Void visitRuleSetExpression(RuleSetExpression e) { @Override public Void visitLetExpression(LetExpression expr) { - for (String key : expr.bindings().keySet()) { - RuleType type = symbolTable.locals().get(key); - builder.addStatement("$T $L = null", type.javaType(), key); - } - - int count = 0; for (Map.Entry kvp : expr.bindings().entrySet()) { String k = kvp.getKey(); RuleExpression v = kvp.getValue(); - builder.add("if ("); - builder.add("($L = ", k); + RuleType type = symbolTable.locals().get(k); + builder.add("$T $L = ", type.javaType(), k); v.accept(this); - builder.add(") != null"); - - builder.beginControlFlow(")"); - builder.addStatement("locals = locals.toBuilder().$1L($1L).build()", k); - - if (++count < expr.bindings().size()) { - builder.nextControlFlow("else"); - builder.addStatement("return RuleResult.carryOn()"); - builder.endControlFlow(); - } + builder.addStatement(""); + builder.beginControlFlow("if ($L != null)", k); } return null; } @@ -235,46 +233,111 @@ private void conditionsPreamble(RuleSetExpression expr) { } private void conditionsEpilogue(RuleSetExpression expr) { - int blocksToClose = expr.conditions().size(); - for (int idx = 0; idx < blocksToClose; ++idx) { - builder.endControlFlow(); + for (RuleExpression condition : expr.conditions()) { + if (condition.kind() == RuleExpression.RuleExpressionKind.LET) { + LetExpression let = (LetExpression) condition; + for (int x = 0; x < let.bindings().size(); x++) { + builder.endControlFlow(); + } + } else { + builder.endControlFlow(); + } } - if (!expr.conditions().isEmpty()) { + if (needsReturn(expr)) { builder.addStatement("return $T.carryOn()", typeMirror.rulesResult().type()); } } + private boolean needsReturn(RuleSetExpression expr) { + // If the expression can be inlined, then it doesn't live in + // its own method, no return at the end required + if (canBeInlined(expr)) { + return false; + } + // If the expression has conditions all be be wrapped in + // if-blocks, thus at the end of the method we need to return + // carryOn() + if (!expr.conditions().isEmpty()) { + return true; + } + // If the expression doesn't have any conditions, and doesn't + // have any children then we need to return carryOn(). This + // case SHOULD NOT happen but we assume below that there are + // children, thus adding the test here. + if (expr.children().isEmpty()) { + return true; + } + // We have children, check the last one. + int size = expr.children().size(); + RuleSetExpression child = expr.children().get(size - 1); + // If a tree then we don't need a return. + if (child.isTree()) { + return false; + } + // The child is not a tree, so it was inlined. Check if it + // does have any conditions, if it so, its body will be inside + // a block already so we need to return after it. + return !child.conditions().isEmpty(); + } + private void codegenTreeBody(RuleSetExpression expr) { List children = expr.children(); int size = children.size(); + boolean isFirst = true; for (int idx = 0; idx < size; ++idx) { RuleSetExpression child = children.get(idx); + if (canBeInlined(child)) { + child.accept(this); + continue; + } boolean isLast = idx == size - 1; if (isLast) { - builder.addStatement("return $L(params, locals)", - child.ruleId()); + builder.addStatement("return $L($L)", + child.ruleId(), + callParams(child.ruleId())); continue; } - boolean isFirst = idx == 0; + if (isFirst) { - builder.addStatement("$T result = $L(params, locals)", + isFirst = false; + builder.addStatement("$T result = $L($L)", typeMirror.rulesResult().type(), - child.ruleId()); + child.ruleId(), + callParams(child.ruleId())); } else { - builder.addStatement("result = $L(params, locals)", - child.ruleId()); + builder.addStatement("result = $L($L)", + child.ruleId(), + callParams(child.ruleId())); } builder.beginControlFlow("if (result.isResolved())") .addStatement("return result") .endControlFlow(); } + } + private boolean canBeInlined(RuleSetExpression child) { + return !child.isTree(); + } + + private String callParams(String ruleId) { + ComputeScopeTree.Scope scope = ruleIdToScope.get(ruleId); + String args = scope.usesLocals().stream() + .filter(a -> !scope.defines().contains(a)) + .collect(Collectors.joining(", ")); + if (args.isEmpty()) { + return "params"; + } + return "params, " + args; } @Override public Void visitEndpointExpression(EndpointExpression e) { builder.add("return $T.endpoint(", typeMirror.rulesResult().type()); - builder.add("$T.builder().url($T.create(", Endpoint.class, URI.class); + if (endpointCaching) { + builder.add("$T.builder().url($T.getInstance().create(", Endpoint.class, SdkUri.class); + } else { + builder.add("$T.builder().url($T.create(", Endpoint.class, URI.class); + } e.url().accept(this); builder.add("))"); e.headers().accept(this); @@ -293,7 +356,7 @@ public Void visitPropertiesExpression(PropertiesExpression e) { } else if (knownEndpointAttributes.containsKey(k)) { addAttributeBlock(k, v); } else { - throw new RuntimeException("unknown endpoint property: " + k); + log.warn("Ignoring unknown endpoint property: {}", k); } }); return null; @@ -381,7 +444,6 @@ private void addAttributeBlock(String k, RuleExpression v) { builder.add(")"); } - public CodeBlock.Builder builder() { return builder; } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodegenExpressionBuidler.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodegenExpressionBuidler.java index 57aa63980d2b..6488f015ad7b 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodegenExpressionBuidler.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/CodegenExpressionBuidler.java @@ -21,10 +21,16 @@ public final class CodegenExpressionBuidler { private final RuleSetExpression root; private final SymbolTable symbolTable; + private final Map scopesByName; - public CodegenExpressionBuidler(RuleSetExpression root, SymbolTable symbolTable) { + public CodegenExpressionBuidler( + RuleSetExpression root, + SymbolTable symbolTable, + Map scopesByName + ) { this.root = root; this.symbolTable = symbolTable; + this.scopesByName = scopesByName; } public static CodegenExpressionBuidler from(RuleSetExpression root, RuleRuntimeTypeMirror typeMirror, SymbolTable table) { @@ -36,10 +42,17 @@ public static CodegenExpressionBuidler from(RuleSetExpression root, RuleRuntimeT } table = assignTypesVisitor.symbolTable(); root = assignIdentifier(root); - PrepareForCodegenVisitor prepareForCodegenVisitor = new PrepareForCodegenVisitor(table); - root = (RuleSetExpression) root.accept(prepareForCodegenVisitor); - table = prepareForCodegenVisitor.symbolTable(); - return new CodegenExpressionBuidler(root, table); + + RenameForCodegenVisitor renameForCodegenVisitor = new RenameForCodegenVisitor(table); + root = (RuleSetExpression) root.accept(renameForCodegenVisitor); + table = renameForCodegenVisitor.symbolTable(); + + ComputeScopeTree computeScopeTree = new ComputeScopeTree(table); + root.accept(computeScopeTree); + + PrepareForCodegenVisitor prepareForCodegenVisitor = new PrepareForCodegenVisitor(); + RuleSetExpression newRoot = (RuleSetExpression) root.accept(prepareForCodegenVisitor); + return new CodegenExpressionBuidler(newRoot, table, computeScopeTree.scopesByName()); } private static RuleSetExpression assignIdentifier(RuleSetExpression root) { @@ -51,27 +64,15 @@ public RuleSetExpression root() { return root; } - public boolean isParam(String name) { - return symbolTable.isParam(name); - } - - public boolean isLocal(String name) { - return symbolTable.isLocal(name); - } - public String regionParamName() { return symbolTable.regionParamName(); } - public Map locals() { - return symbolTable.locals(); - } - - public Map params() { - return symbolTable.params(); - } - public SymbolTable symbolTable() { return symbolTable; } + + public Map scopesByName() { + return scopesByName; + } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/ComputeScopeTree.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/ComputeScopeTree.java new file mode 100644 index 000000000000..42f612f87418 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/ComputeScopeTree.java @@ -0,0 +1,193 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.rules2; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Deque; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Computes all the symbols, locals and params, used by each of the rules, either directly or transitively. + */ +public final class ComputeScopeTree extends WalkRuleExpressionVisitor { + private final SymbolTable symbolTable; + private final Deque scopes = new ArrayDeque<>(); + private final Map scopesByName = new HashMap<>(); + private Scope result; + + public ComputeScopeTree(SymbolTable symbolTable) { + this.symbolTable = symbolTable; + } + + /** + * Returns the root scope. + */ + public Scope result() { + return result; + } + + /** + * Returns the mapping between rule id and scope. + */ + public Map scopesByName() { + return scopesByName; + } + + @Override + public Void visitRuleSetExpression(RuleSetExpression node) { + ScopeBuilder scopeBuilder = new ScopeBuilder(); + scopeBuilder.ruleId(node.ruleId()); + scopes.push(scopeBuilder); + super.visitRuleSetExpression(node); + result = scopes.pop().build(); + scopesByName.put(result.ruleId(), result); + if (!scopes.isEmpty()) { + scopes.peekFirst().addChild(result); + } + return null; + } + + @Override + public Void visitVariableReferenceExpression(VariableReferenceExpression e) { + String variableName = e.variableName(); + ScopeBuilder current = scopes.peekFirst(); + if (symbolTable.isLocal(variableName)) { + current.usesLocal(variableName); + } else if (symbolTable.isParam(variableName)) { + current.usesParam(variableName); + } + return null; + } + + @Override + public Void visitLetExpression(LetExpression e) { + ScopeBuilder scopeBuilder = scopes.peekFirst(); + for (String binding : e.bindings().keySet()) { + scopeBuilder.defines(binding); + } + return super.visitLetExpression(e); + } + + public static class Scope { + private final String ruleId; + private final Set defines; + private final Set usesLocals; + private final Set usesParams; + private final List children; + + public Scope(ScopeBuilder builder) { + this.ruleId = Objects.requireNonNull(builder.ruleId, "ruleId cannot be null"); + this.defines = Collections.unmodifiableSet(new LinkedHashSet<>(builder.defines)); + this.usesLocals = Collections.unmodifiableSet(new LinkedHashSet<>(builder.usesLocals)); + this.usesParams = Collections.unmodifiableSet(new LinkedHashSet<>(builder.usesParams)); + this.children = Collections.unmodifiableList(new ArrayList<>(builder.children)); + } + + public String ruleId() { + return ruleId; + } + + public Set defines() { + return defines; + } + + public Set usesLocals() { + return usesLocals; + } + + public Set usesParams() { + return usesParams; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + appendTo(0, builder); + return builder.toString(); + } + + public void appendTo(int level, StringBuilder sb) { + String prefix = levelValue(level); + sb.append(prefix).append("=========================================\n"); + sb.append(prefix).append("rule ").append(ruleId).append("\n"); + sb.append(prefix).append("defines ").append(defines).append("\n"); + sb.append(prefix).append("uses ").append(usesLocals).append("\n"); + for (Scope child : children) { + child.appendTo(level + 1, sb); + } + } + + private String levelValue(int level) { + StringBuilder result = new StringBuilder(); + for (int i = 0; i < level; i++) { + result.append(" "); + } + return result.toString(); + } + } + + public static class ScopeBuilder { + private String ruleId; + private final Set defines = new LinkedHashSet<>(); + private final Set usesLocals = new LinkedHashSet<>(); + private final Set usesParams = new LinkedHashSet<>(); + private final List children = new ArrayList<>(); + + public ScopeBuilder ruleId(String ruleId) { + this.ruleId = ruleId; + return this; + } + + public ScopeBuilder defines(String define) { + defines.add(define); + return this; + } + + public ScopeBuilder usesLocal(String use) { + usesLocals.add(use); + return this; + } + + public ScopeBuilder usesParam(String use) { + usesParams.add(use); + return this; + } + + public ScopeBuilder addChild(Scope child) { + children.add(child); + for (String local : child.usesLocals) { + if (!child.defines.contains(local)) { + usesLocals.add(local); + } + } + for (String param : child.usesParams) { + usesParams.add(param); + } + return this; + } + + public Scope build() { + return new Scope(this); + } + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/EndpointProviderSpec2.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/EndpointProviderSpec2.java index adbed805cc40..55dbf5358842 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/EndpointProviderSpec2.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/EndpointProviderSpec2.java @@ -123,8 +123,6 @@ public TypeSpec poetSpec() { .addSuperinterface(endpointRulesSpecUtils.providerInterfaceName()) .addAnnotation(SdkInternalApi.class); - builder.addType(codegenLocalState()); - builder.addType(codegenLocalStateBuilder()); builder.addMethod(resolveEndpointMethod()); List methods = new ArrayList<>(); createRuleMethod(utils.root(), methods); @@ -154,11 +152,11 @@ private MethodSpec resolveEndpointMethod() { builder.beginControlFlow("try"); String regionParamName = utils.regionParamName(); if (regionParamName != null) { - builder.addStatement("$T result = $L(params, new $T(params.$L()))", ruleResult(), utils.root().ruleId(), - ClassName.bestGuess("LocalState"), regionParamName); + builder.addStatement("$T region = params.$L()", Region.class, regionParamName); + builder.addStatement("$T regionId = region == null ? null : region.id()", String.class); + builder.addStatement("$T result = $L(params, regionId)", ruleResult(), utils.root().ruleId()); } else { - builder.addStatement("$T result = $L(params, new $T())", ruleResult(), utils.root().ruleId(), - ClassName.bestGuess("LocalState")); + builder.addStatement("$T result = $L(params)", ruleResult(), utils.root().ruleId()); } builder.beginControlFlow("if (result.canContinue())") .addStatement("throw $T.create($S)", SdkClientException.class, "Rule engine did not reach an error or " @@ -206,7 +204,9 @@ private void createRuleMethod(RuleSetExpression expr, List m builder.addCode(block.build()); if (expr.isTree()) { for (RuleSetExpression child : expr.children()) { - createRuleMethod(child, methods); + if (child.isTree()) { + createRuleMethod(child, methods); + } } } } @@ -215,110 +215,28 @@ private MethodSpec.Builder methodBuilderForRule(RuleSetExpression expr) { MethodSpec.Builder builder = MethodSpec.methodBuilder(expr.ruleId()) .addModifiers(Modifier.PRIVATE, Modifier.STATIC) - .returns(ruleResult()) - .addParameter(endpointRulesSpecUtils.parametersClassName(), "params"); - builder.addParameter(ClassName.bestGuess("LocalState"), "locals"); + .returns(ruleResult()); + ComputeScopeTree.Scope scope = utils.scopesByName().get(expr.ruleId()); + builder.addParameter(endpointRulesSpecUtils.parametersClassName(), "params"); + for (String param : scope.usesLocals()) { + if (scope.defines().contains(param)) { + continue; + } + RuleType type = utils.symbolTable().localType(param); + builder.addParameter(type.javaType(), param); + } return builder; } - private void codegenExpr(RuleExpression expr, CodeBlock.Builder builder) { + private void codegenExpr(RuleSetExpression expr, CodeBlock.Builder builder) { + boolean useEndpointCaching = intermediateModel.getCustomizationConfig().getEnableEndpointProviderUriCaching(); CodeGeneratorVisitor visitor = new CodeGeneratorVisitor(typeMirror, utils.symbolTable(), knownEndpointAttributes, + utils.scopesByName(), + useEndpointCaching, builder); - expr.accept(visitor); - } - - private TypeSpec codegenLocalState() { - TypeSpec.Builder b = TypeSpec.classBuilder("LocalState") - .addModifiers(Modifier.PRIVATE, Modifier.STATIC, Modifier.FINAL); - Map locals = utils.locals(); - locals.forEach((k, v) -> { - b.addField(v.javaType(), k, Modifier.PRIVATE, Modifier.FINAL); - }); - MethodSpec.Builder emptyCtor = MethodSpec.constructorBuilder(); - locals.forEach((k, v) -> { - emptyCtor.addStatement("this.$1L = null", k); - }); - b.addMethod(emptyCtor.build()); - String regionParamName = utils.regionParamName(); - if (regionParamName != null) { - MethodSpec.Builder regionCtor = MethodSpec.constructorBuilder() - .addParameter(Region.class, "region"); - locals.forEach((k, v) -> { - if (k.equals(regionParamName)) { - regionCtor.beginControlFlow("if (region != null)") - .addStatement("this.$L = region.id()", regionParamName) - .nextControlFlow("else") - .addStatement("this.$L = null", regionParamName) - .endControlFlow(); - } else { - regionCtor.addStatement("this.$1L = null", k); - } - }); - b.addMethod(regionCtor.build()); - - } - ClassName localStateBuilder = ClassName.bestGuess("LocalStateBuilder"); - MethodSpec.Builder builderCtor = MethodSpec - .constructorBuilder() - .addParameter(localStateBuilder, "builder"); - - locals.forEach((k, v) -> { - builderCtor.addStatement("this.$1L = builder.$1L", k); - }); - - b.addMethod(builderCtor.build()); - locals.forEach((k, v) -> { - b.addMethod(MethodSpec.methodBuilder(k) - .addModifiers(Modifier.PUBLIC) - .returns(v.javaType()) - .addStatement("return this.$L", k) - .build()); - }); - b.addMethod(MethodSpec.methodBuilder("toBuilder") - .addModifiers(Modifier.PUBLIC) - .returns(localStateBuilder) - .addStatement("return new $T(this)", localStateBuilder) - .build()); - return b.build(); - } - - private TypeSpec codegenLocalStateBuilder() { - ClassName localStateClass = ClassName.bestGuess("LocalState"); - ClassName builderClass = ClassName.bestGuess("LocalStateBuilder"); - TypeSpec.Builder b = TypeSpec.classBuilder("LocalStateBuilder") - .addModifiers(Modifier.PRIVATE, Modifier.STATIC, Modifier.FINAL); - Map locals = utils.locals(); - locals.forEach((k, v) -> { - b.addField(v.javaType(), k, Modifier.PRIVATE); - }); - MethodSpec.Builder emptyCtor = MethodSpec.constructorBuilder(); - locals.forEach((k, v) -> { - emptyCtor.addStatement("this.$1L = null", k); - }); - b.addMethod(emptyCtor.build()); - MethodSpec.Builder stateCtor = MethodSpec - .constructorBuilder() - .addParameter(localStateClass, "locals"); - locals.forEach((k, v) -> { - stateCtor.addStatement("this.$1L = locals.$1L", k); - }); - b.addMethod(stateCtor.build()); - locals.forEach((k, v) -> { - b.addMethod(MethodSpec.methodBuilder(k) - .addModifiers(Modifier.PUBLIC) - .returns(builderClass) - .addParameter(v.javaType(), "value") - .addStatement("this.$L = value", k) - .addStatement("return this") - .build()); - }); - b.addMethod(MethodSpec.methodBuilder("build") - .returns(localStateClass) - .addStatement("return new $T(this)", localStateClass) - .build()); - return b.build(); + visitor.visitRuleSetExpression(expr); } private TypeName ruleResult() { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/PrepareForCodegenVisitor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/PrepareForCodegenVisitor.java index 23c3c4ad7c43..a453812bd5a0 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/PrepareForCodegenVisitor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/PrepareForCodegenVisitor.java @@ -16,29 +16,13 @@ package software.amazon.awssdk.codegen.poet.rules2; import java.util.List; -import software.amazon.awssdk.codegen.internal.Utils; -import software.amazon.awssdk.utils.internal.CodegenNamingUtils; /** - * Visitor that rewrites some expressions in preparation for codegen and also renaming locals assignments to use idiomatic java - * names. This visitor in particular rewrites variable references to the equivalent to {@code getAttr(params, NAME)} or {@code - * getAttr(locals, NAME)}, depending on whether the reference is an endpoint params variable or a locally assigned one. + * Visitor that rewrites expressions in preparation for codegen. */ public final class PrepareForCodegenVisitor extends RewriteRuleExpressionVisitor { - private final SymbolTable symbolTable; - private final SymbolTable.Builder renames; - public PrepareForCodegenVisitor(SymbolTable symbolTable) { - this.symbolTable = symbolTable; - this.renames = SymbolTable.builder(); - } - - public SymbolTable symbolTable() { - String regionParamName = symbolTable.regionParamName(); - if (regionParamName != null) { - renames.regionParamName(javaName(regionParamName)); - } - return renames.build(); + public PrepareForCodegenVisitor() { } @Override @@ -72,34 +56,6 @@ public RuleExpression visitFunctionCallExpression(FunctionCallExpression e) { } } - @Override - public RuleExpression visitVariableReferenceExpression(VariableReferenceExpression e) { - String name = e.variableName(); - if (symbolTable.isLocal(name)) { - RuleType type = symbolTable.localType(name); - String newName = javaName(name); - renames.putLocal(newName, type); - return MemberAccessExpression - .builder() - .type(e.type()) - .source(VariableReferenceExpression.builder().variableName("locals").build()) - .name(newName) - .build(); - } - if (symbolTable.isParam(name)) { - RuleType type = symbolTable.paramType(name); - String newName = javaName(name); - renames.putParam(newName, type); - return MemberAccessExpression - .builder() - .type(e.type()) - .source(VariableReferenceExpression.builder().variableName("params").build()) - .name(newName) - .build(); - } - return e; - } - @Override public RuleExpression visitIndexedAccessExpression(IndexedAccessExpression e) { e = (IndexedAccessExpression) super.visitIndexedAccessExpression(e); @@ -112,18 +68,6 @@ public RuleExpression visitIndexedAccessExpression(IndexedAccessExpression e) { .build(); } - @Override - public RuleExpression visitLetExpression(LetExpression e) { - LetExpression.Builder builder = LetExpression.builder(); - e.bindings().forEach((k, v) -> { - String newName = javaName(k); - RuleExpression value = v.accept(this); - builder.putBinding(newName, value); - renames.putLocal(newName, value.type()); - }); - return builder.build(); - } - /** * Transforms the following expressions: *
    @@ -212,8 +156,4 @@ private RuleExpression simplifyNotExpression(FunctionCallExpression e) { } return e; } - - private String javaName(String name) { - return Utils.unCapitalize(CodegenNamingUtils.pascalCase(name)); - } } diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RenameForCodegenVisitor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RenameForCodegenVisitor.java new file mode 100644 index 000000000000..1c09b51c398a --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RenameForCodegenVisitor.java @@ -0,0 +1,86 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.rules2; + +import software.amazon.awssdk.codegen.internal.Utils; +import software.amazon.awssdk.utils.internal.CodegenNamingUtils; + +/** + * Visitor that renames locals assignments to use idiomatic java names. This visitor also rewrites variable references to + * the equivalent to {@code getAttr(params, NAME)}, to call the getter method in the params. + */ +public final class RenameForCodegenVisitor extends RewriteRuleExpressionVisitor { + private final SymbolTable symbolTable; + private final SymbolTable.Builder renames; + + public RenameForCodegenVisitor(SymbolTable symbolTable) { + this.symbolTable = symbolTable; + this.renames = SymbolTable.builder(); + } + + /** + * Returns the new symbol table with the renamed symbols. + */ + public SymbolTable symbolTable() { + String regionParamName = symbolTable.regionParamName(); + if (regionParamName != null) { + renames.regionParamName(javaName(regionParamName)); + } + return renames.build(); + } + + @Override + public RuleExpression visitVariableReferenceExpression(VariableReferenceExpression e) { + String name = e.variableName(); + if (symbolTable.isLocal(name)) { + RuleType type = symbolTable.localType(name); + String newName = javaName(name); + renames.putLocal(newName, type); + return VariableReferenceExpression + .builder() + .variableName(newName) + .build(); + } + if (symbolTable.isParam(name)) { + RuleType type = symbolTable.paramType(name); + String newName = javaName(name); + renames.putParam(newName, type); + return MemberAccessExpression + .builder() + .type(e.type()) + .source(VariableReferenceExpression.builder().variableName("params").build()) + .name(newName) + .build(); + } + return e; + } + + @Override + public RuleExpression visitLetExpression(LetExpression e) { + LetExpression.Builder builder = LetExpression.builder(); + e.bindings().forEach((k, v) -> { + String newName = javaName(k); + RuleExpression value = v.accept(this); + builder.putBinding(newName, value); + renames.putLocal(newName, value.type()); + }); + return builder.build(); + } + + private String javaName(String name) { + return Utils.unCapitalize(CodegenNamingUtils.pascalCase(name)); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RuleSetExpression.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RuleSetExpression.java index f7682eaec0cd..13a3ccafc51b 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RuleSetExpression.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/RuleSetExpression.java @@ -157,6 +157,16 @@ public boolean isTree() { return endpoint == null && error == null; } + public String category() { + if (isEndpoint()) { + return "endpoint"; + } + if (isError()) { + return "error"; + } + return "tree"; + } + @Override public RuleType type() { return type; diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/WalkRuleExpressionVisitor.java b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/WalkRuleExpressionVisitor.java index fa59498b61a1..de952b6d67da 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/WalkRuleExpressionVisitor.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/poet/rules2/WalkRuleExpressionVisitor.java @@ -93,7 +93,7 @@ public Void visitRuleSetExpression(RuleSetExpression e) { visitAll(e.conditions()); ErrorExpression error = e.error(); if (error != null) { - e.accept(this); + error.accept(this); } EndpointExpression endpoint = e.endpoint(); if (endpoint != null) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/utils/AuthUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/utils/AuthUtils.java index f870ceea284d..004d64fac245 100644 --- a/codegen/src/main/java/software/amazon/awssdk/codegen/utils/AuthUtils.java +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/utils/AuthUtils.java @@ -68,7 +68,8 @@ public static boolean isOpBearerAuth(IntermediateModel model, OperationModel opM } private static boolean isServiceBearerAuth(IntermediateModel model) { - return model.getMetadata().getAuthType() == AuthType.BEARER; + return model.getMetadata().getAuthType() == AuthType.BEARER || + (model.getMetadata().getAuth() != null && model.getMetadata().getAuth().contains(AuthType.BEARER)); } private static boolean isServiceSigv4a(IntermediateModel model) { diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/utils/ProtocolUtils.java b/codegen/src/main/java/software/amazon/awssdk/codegen/utils/ProtocolUtils.java new file mode 100644 index 000000000000..0c3dfbccf2e8 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/utils/ProtocolUtils.java @@ -0,0 +1,63 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.utils; + +import java.util.Arrays; +import java.util.List; +import software.amazon.awssdk.codegen.model.service.ServiceMetadata; + +/** + * Resolves the protocol from the service model {@code protocol} and {@code protocols} fields. + */ +public final class ProtocolUtils { + + /** + * Priority-ordered list of protocols supported by the SDK. + */ + private static final List SUPPORTED_PROTOCOLS = Arrays.asList( + "smithy-rpc-v2-cbor", "json", "rest-json", "rest-xml", "query", "ec2"); + + private ProtocolUtils() { + } + + /** + * {@code protocols} supersedes {@code protocol}. The highest priority protocol supported by the SDK that is present in the + * service model {@code protocols} list will be selected. If none of the values in {@code protocols} is supported by the + * SDK, an error will be thrown. If {@code protocols} is empty or null, the value from {@code protocol} will be returned. + */ + public static String resolveProtocol(ServiceMetadata serviceMetadata) { + + List protocols = serviceMetadata.getProtocols(); + String protocol = serviceMetadata.getProtocol(); + + if (protocols == null || protocols.isEmpty()) { + return protocol; + } + + // Kinesis uses customization.config customServiceMetadata to set cbor + if ("cbor".equals(protocols.get(0))) { + return "cbor"; + } + + for (String supportedProtocol : SUPPORTED_PROTOCOLS) { + if (protocols.contains(supportedProtocol)) { + return supportedProtocol; + } + } + + throw new IllegalArgumentException("The SDK does not support any of provided protocols: " + protocols); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelInvalidException.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelInvalidException.java new file mode 100644 index 000000000000..28f482328253 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelInvalidException.java @@ -0,0 +1,58 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * Exception thrown during code generation to signal that the model is invalid. + */ +public class ModelInvalidException extends RuntimeException { + private final List validationEntries; + + private ModelInvalidException(Builder b) { + super("Validation failed with the following errors: " + b.validationEntries); + this.validationEntries = Collections.unmodifiableList(new ArrayList<>(b.validationEntries)); + } + + public List validationEntries() { + return validationEntries; + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private List validationEntries; + + public Builder validationEntries(List validationEntries) { + if (validationEntries == null) { + this.validationEntries = Collections.emptyList(); + } else { + this.validationEntries = validationEntries; + } + + return this; + } + + public ModelInvalidException build() { + return new ModelInvalidException(this); + } + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationContext.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationContext.java new file mode 100644 index 000000000000..55c2bedcba19 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationContext.java @@ -0,0 +1,79 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import java.util.Optional; +import software.amazon.awssdk.codegen.model.config.customization.ShareModelConfig; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; + +/** + * Context object for {@link ModelValidator}s. This object contains all the information available to the validations in order + * for them to perform their tasks. + */ +public final class ModelValidationContext { + private final IntermediateModel intermediateModel; + private final IntermediateModel shareModelsTarget; + + private ModelValidationContext(Builder builder) { + this.intermediateModel = builder.intermediateModel; + this.shareModelsTarget = builder.shareModelsTarget; + } + + /** + * The service model for which code is being generated. + */ + public IntermediateModel intermediateModel() { + return intermediateModel; + } + + /** + * The model of the service that the currently generating service shares models with. In other words, this is the service + * model for the service defined in {@link ShareModelConfig#getShareModelWith()}. + */ + public Optional shareModelsTarget() { + return Optional.ofNullable(shareModelsTarget); + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private IntermediateModel intermediateModel; + private IntermediateModel shareModelsTarget; + + /** + * The service model for which code is being generated. + */ + public Builder intermediateModel(IntermediateModel intermediateModel) { + this.intermediateModel = intermediateModel; + return this; + } + + /** + * The model of the service that the currently generating service shares models with. In other words, this is the service + * model for the service defined in {@link ShareModelConfig#getShareModelWith()}. + */ + public Builder shareModelsTarget(IntermediateModel shareModelsTarget) { + this.shareModelsTarget = shareModelsTarget; + return this; + } + + public ModelValidationContext build() { + return new ModelValidationContext(this); + } + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationReport.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationReport.java new file mode 100644 index 000000000000..1112dc2190d5 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidationReport.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import java.util.Collections; +import java.util.List; + +public class ModelValidationReport { + private List validationEntries = Collections.emptyList(); + + public List getValidationEntries() { + return validationEntries; + } + + public void setValidationEntries(List validationEntries) { + if (validationEntries != null) { + this.validationEntries = validationEntries; + } else { + this.validationEntries = Collections.emptyList(); + } + } + + public ModelValidationReport withValidationEntries(List validationEntries) { + setValidationEntries(validationEntries); + return this; + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidator.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidator.java new file mode 100644 index 000000000000..b544a030eaf5 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ModelValidator.java @@ -0,0 +1,22 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import java.util.List; + +public interface ModelValidator { + List validateModels(ModelValidationContext context); +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/SharedModelsValidator.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/SharedModelsValidator.java new file mode 100644 index 000000000000..6b7f8471da7c --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/SharedModelsValidator.java @@ -0,0 +1,210 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.intermediate.ListModel; +import software.amazon.awssdk.codegen.model.intermediate.MapModel; +import software.amazon.awssdk.codegen.model.intermediate.MemberModel; +import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; +import software.amazon.awssdk.utils.Logger; + +/** + * Validator that ensures any shapes shared between two services are completely identical. This validator returns a validation + * entry for each shape that is present in both service models but has differing definitions in each model. + */ +public final class SharedModelsValidator implements ModelValidator { + private static final Logger LOG = Logger.loggerFor(SharedModelsValidator.class); + + @Override + public List validateModels(ModelValidationContext context) { + if (!context.shareModelsTarget().isPresent()) { + return Collections.emptyList(); + } + + return validateSharedShapes(context.intermediateModel(), context.shareModelsTarget().get()); + } + + private List validateSharedShapes(IntermediateModel m1, IntermediateModel m2) { + List errors = new ArrayList<>(); + + Map m1Shapes = m1.getShapes(); + Map m2Shapes = m2.getShapes(); + + m1Shapes.forEach((name, m1Shape) -> { + if (!m2Shapes.containsKey(name)) { + return; + } + + ShapeModel m2Shape = m2Shapes.get(name); + + if (!shapesAreIdentical(m1Shape, m2Shape)) { + String detailMsg = String.format("Services '%s' and '%s' have differing definitions of the shared model '%s'", + m1.getMetadata().getServiceName(), + m2.getMetadata().getServiceName(), + name); + LOG.warn(() -> detailMsg); + + errors.add(new ValidationEntry().withErrorId(ValidationErrorId.SHARED_MODELS_DIFFER) + .withSeverity(ValidationErrorSeverity.DANGER) + .withDetailMessage(detailMsg)); + } + }); + + return errors; + } + + private boolean shapesAreIdentical(ShapeModel m1, ShapeModel m2) { + // Note: We can't simply do m1.equals(m2) because shared models can still differ slightly in the + // marshalling/unmarshalling info such as the exact request operation name on the wire. + // In particular, we leave out comparing the `unmarshaller` and `marshaller` members of ShapeModel. + // Additionally, the List are not compared with equals() because we handle MemberModel equality specially + // as well. + return m1.isDeprecated() == m2.isDeprecated() + && m1.isHasPayloadMember() == m2.isHasPayloadMember() + && m1.isHasHeaderMember() == m2.isHasHeaderMember() + && m1.isHasStatusCodeMember() == m2.isHasStatusCodeMember() + && m1.isHasStreamingMember() == m2.isHasStreamingMember() + && m1.isHasRequiresLengthMember() == m2.isHasRequiresLengthMember() + && m1.isWrapper() == m2.isWrapper() + && m1.isSimpleMethod() == m2.isSimpleMethod() + && m1.isFault() == m2.isFault() + && m1.isEventStream() == m2.isEventStream() + && m1.isEvent() == m2.isEvent() + && m1.isDocument() == m2.isDocument() + && m1.isUnion() == m2.isUnion() + && m1.isRetryable() == m2.isRetryable() + && m1.isThrottling() == m2.isThrottling() + && Objects.equals(m1.getC2jName(), m2.getC2jName()) + && Objects.equals(m1.getShapeName(), m2.getShapeName()) + && Objects.equals(m1.getDeprecatedMessage(), m2.getDeprecatedMessage()) + && Objects.equals(m1.getType(), m2.getType()) + && Objects.equals(m1.getRequired(), m2.getRequired()) + && Objects.equals(m1.getRequestSignerClassFqcn(), m2.getRequestSignerClassFqcn()) + && Objects.equals(m1.getEndpointDiscovery(), m2.getEndpointDiscovery()) + && memberListsAreIdentical(m1.getMembers(), m2.getMembers()) + && Objects.equals(m1.getEnums(), m2.getEnums()) + && Objects.equals(m1.getVariable(), m2.getVariable()) + && Objects.equals(m1.getErrorCode(), m2.getErrorCode()) + && Objects.equals(m1.getHttpStatusCode(), m2.getHttpStatusCode()) + && Objects.equals(m1.getCustomization(), m2.getCustomization()) + && Objects.equals(m1.getXmlNamespace(), m2.getXmlNamespace()) + ; + } + + private boolean memberListsAreIdentical(List memberList1, List memberList2) { + if (memberList1.size() != memberList2.size()) { + return false; + } + + for (int i = 0; i < memberList1.size(); i++) { + MemberModel m1 = memberList1.get(i); + MemberModel m2 = memberList2.get(i); + if (!memberModelsAreIdentical(m1, m2)) { + return false; + } + } + + return true; + } + + private boolean memberModelsAreIdentical(MemberModel m1, MemberModel m2) { + // Similar to ShapeModel, can't call equals() directly. It has a ShapeModel property that is ignored, and ListModel and + // MapModel are treated similarly + return m1.isDeprecated() == m2.isDeprecated() + && m1.isRequired() == m2.isRequired() + && m1.isSynthetic() == m2.isSynthetic() + && m1.isIdempotencyToken() == m2.isIdempotencyToken() + && m1.isJsonValue() == m2.isJsonValue() + && m1.isEventPayload() == m2.isEventPayload() + && m1.isEventHeader() == m2.isEventHeader() + && m1.isEndpointDiscoveryId() == m2.isEndpointDiscoveryId() + && m1.isSensitive() == m2.isSensitive() + && m1.isXmlAttribute() == m2.isXmlAttribute() + && m1.ignoreDataTypeConversionFailures() == m2.ignoreDataTypeConversionFailures() + && Objects.equals(m1.getName(), m2.getName()) + && Objects.equals(m1.getC2jName(), m2.getC2jName()) + && Objects.equals(m1.getC2jShape(), m2.getC2jShape()) + && Objects.equals(m1.getVariable(), m2.getVariable()) + && Objects.equals(m1.getSetterModel(), m2.getSetterModel()) + && Objects.equals(m1.getGetterModel(), m2.getGetterModel()) + && Objects.equals(m1.getHttp(), m2.getHttp()) + && Objects.equals(m1.getDeprecatedMessage(), m2.getDeprecatedMessage()) + // Note: not equals() + && listModelsAreIdentical(m1.getListModel(), m2.getListModel()) + // Note: not equals() + && mapModelsAreIdentical(m1.getMapModel(), m2.getMapModel()) + && Objects.equals(m1.getEnumType(), m2.getEnumType()) + && Objects.equals(m1.getXmlNameSpaceUri(), m2.getXmlNameSpaceUri()) + && Objects.equals(m1.getFluentGetterMethodName(), m2.getFluentGetterMethodName()) + && Objects.equals(m1.getFluentEnumGetterMethodName(), m2.getFluentEnumGetterMethodName()) + && Objects.equals(m1.getFluentSetterMethodName(), m2.getFluentSetterMethodName()) + && Objects.equals(m1.getFluentEnumSetterMethodName(), m2.getFluentEnumSetterMethodName()) + && Objects.equals(m1.getExistenceCheckMethodName(), m2.getExistenceCheckMethodName()) + && Objects.equals(m1.getBeanStyleGetterMethodName(), m2.getBeanStyleGetterMethodName()) + && Objects.equals(m1.getBeanStyleSetterMethodName(), m2.getBeanStyleSetterMethodName()) + && Objects.equals(m1.getUnionEnumTypeName(), m2.getUnionEnumTypeName()) + && Objects.equals(m1.getTimestampFormat(), m2.getTimestampFormat()) + && Objects.equals(m1.getDeprecatedName(), m2.getDeprecatedName()) + && Objects.equals(m1.getDeprecatedFluentGetterMethodName(), m2.getDeprecatedFluentGetterMethodName()) + && Objects.equals(m1.getDeprecatedFluentSetterMethodName(), m2.getDeprecatedFluentSetterMethodName()) + && Objects.equals(m1.getDeprecatedBeanStyleSetterMethodName(), m2.getDeprecatedBeanStyleSetterMethodName()) + && Objects.equals(m1.getContextParam(), m2.getContextParam()); + } + + private boolean listModelsAreIdentical(ListModel m1, ListModel m2) { + if (m1 == null ^ m2 == null) { + return false; + } + + if (m1 == null) { + return true; + } + + return Objects.equals(m1.getImplType(), m2.getImplType()) + && Objects.equals(m1.getMemberType(), m2.getMemberType()) + && Objects.equals(m1.getInterfaceType(), m2.getInterfaceType()) + // Note: not equals() + && memberModelsAreIdentical(m1.getListMemberModel(), m2.getListMemberModel()) + && Objects.equals(m1.getMemberLocationName(), m2.getMemberLocationName()) + && Objects.equals(m1.getMemberAdditionalMarshallingPath(), m2.getMemberAdditionalMarshallingPath()) + && Objects.equals(m1.getMemberAdditionalUnmarshallingPath(), m2.getMemberAdditionalUnmarshallingPath()); + } + + private boolean mapModelsAreIdentical(MapModel m1, MapModel m2) { + if (m1 == null ^ m2 == null) { + return false; + } + + if (m1 == null) { + return true; + } + + return Objects.equals(m1.getImplType(), m2.getImplType()) + && Objects.equals(m1.getInterfaceType(), m2.getInterfaceType()) + && Objects.equals(m1.getKeyLocationName(), m2.getKeyLocationName()) + // Note: not equals() + && memberModelsAreIdentical(m1.getKeyModel(), m2.getKeyModel()) + && Objects.equals(m1.getValueLocationName(), m2.getValueLocationName()) + // Note: not equals() + && memberModelsAreIdentical(m1.getValueModel(), m2.getValueModel()); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationEntry.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationEntry.java new file mode 100644 index 000000000000..4e84bd625185 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationEntry.java @@ -0,0 +1,72 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import software.amazon.awssdk.utils.ToString; + +public final class ValidationEntry { + private ValidationErrorId errorId; + private ValidationErrorSeverity severity; + private String detailMessage; + + public ValidationErrorId getErrorId() { + return errorId; + } + + public void setErrorId(ValidationErrorId errorId) { + this.errorId = errorId; + } + + public ValidationEntry withErrorId(ValidationErrorId errorId) { + setErrorId(errorId); + return this; + } + + public ValidationErrorSeverity getSeverity() { + return severity; + } + + public void setSeverity(ValidationErrorSeverity severity) { + this.severity = severity; + } + + public ValidationEntry withSeverity(ValidationErrorSeverity severity) { + setSeverity(severity); + return this; + } + + public String getDetailMessage() { + return detailMessage; + } + + public void setDetailMessage(String detailMessage) { + this.detailMessage = detailMessage; + } + + public ValidationEntry withDetailMessage(String detailMessage) { + setDetailMessage(detailMessage); + return this; + } + + @Override + public String toString() { + return ToString.builder("ValidationEntry") + .add("errorId", errorId) + .add("severity", severity) + .add("detailMessage", detailMessage) + .build(); + } +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java new file mode 100644 index 000000000000..37c488e11fc5 --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorId.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +public enum ValidationErrorId { + SHARED_MODELS_DIFFER( + "The shared models between two services differ in their definition, which causes differences in the source" + + " files generated by the code generator." + ), + UNKNOWN_SHAPE_MEMBER("The model references an unknown shape member."), + REQUEST_URI_NOT_FOUND("The request URI does not exist."), + ; + + private final String description; + + ValidationErrorId(String description) { + this.description = description; + } + + +} diff --git a/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorSeverity.java b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorSeverity.java new file mode 100644 index 000000000000..39b6b015e42a --- /dev/null +++ b/codegen/src/main/java/software/amazon/awssdk/codegen/validation/ValidationErrorSeverity.java @@ -0,0 +1,22 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +public enum ValidationErrorSeverity { + // Denotes an error that MUST be addressed. + DANGER, + ; +} diff --git a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/Partition.java.resource b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/Partition.java.resource index bdded3575891..2cf9d20dc2ac 100644 --- a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/Partition.java.resource +++ b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/Partition.java.resource @@ -1,5 +1,6 @@ import java.util.HashMap; import java.util.Map; +import java.util.regex.Pattern; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.protocols.jsoncore.JsonNode; import software.amazon.awssdk.utils.ToString; @@ -15,6 +16,7 @@ public class Partition { private final String regionRegex; private final Map regions; private final Outputs outputs; + private Pattern regionPattern; private Partition(Builder builder) { this.id = builder.id; @@ -31,6 +33,13 @@ public class Partition { return regionRegex; } + public boolean regionMatches(String region) { + if (regionPattern == null) { + regionPattern = Pattern.compile(regionRegex); + } + return regionPattern.matcher(region).matches(); + } + public Map regions() { return regions; } diff --git a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource index a2bfa6ead490..456b07fca676 100644 --- a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource +++ b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules/partitions.json.resource @@ -17,6 +17,9 @@ "ap-east-1" : { "description" : "Asia Pacific (Hong Kong)" }, + "ap-east-2" : { + "description" : "Asia Pacific (Taipei)" + }, "ap-northeast-1" : { "description" : "Asia Pacific (Tokyo)" }, diff --git a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules2/RulesFunctions.java.resource b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules2/RulesFunctions.java.resource index a6c070217a11..5cfb8b857a5a 100644 --- a/codegen/src/main/resources/software/amazon/awssdk/codegen/rules2/RulesFunctions.java.resource +++ b/codegen/src/main/resources/software/amazon/awssdk/codegen/rules2/RulesFunctions.java.resource @@ -90,8 +90,7 @@ public class RulesFunctions { if (matchedPartition == null) { // try matching on region name pattern for (Partition p : data.partitions) { - Pattern regex = Pattern.compile(p.regionRegex()); - if (regex.matcher(regionName).matches()) { + if (p.regionMatches(regionName)) { matchedPartition = p; break; } diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java new file mode 100644 index 000000000000..c776e0295bea --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/CodeGeneratorTest.java @@ -0,0 +1,267 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileVisitResult; +import java.nio.file.FileVisitor; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.codegen.internal.Jackson; +import software.amazon.awssdk.codegen.model.config.customization.CustomizationConfig; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.rules.endpoints.EndpointTestSuiteModel; +import software.amazon.awssdk.codegen.model.service.ServiceModel; +import software.amazon.awssdk.codegen.poet.ClientTestModels; +import software.amazon.awssdk.codegen.validation.ModelInvalidException; +import software.amazon.awssdk.codegen.validation.ModelValidator; +import software.amazon.awssdk.codegen.validation.ValidationErrorId; + +public class CodeGeneratorTest { + private static final String VALIDATION_REPORT_NAME = "validation-report.json"; + + private Path outputDir; + + @BeforeEach + void methodSetup() throws IOException { + outputDir = Files.createTempDirectory(null); + } + + @AfterEach + void methodTeardown() throws IOException { + deleteDirectory(outputDir); + } + + @Test + void build_cj2ModelsAndIntermediateModelSet_throws() { + assertThatThrownBy(() -> CodeGenerator.builder() + .models(C2jModels.builder().build()) + .intermediateModel(new IntermediateModel()) + .build()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Only one of"); + } + + @Test + void execute_emitValidationReportIsFalse_doesNotEmitValidationReport() throws IOException { + generateCodeFromC2jModels(ClientTestModels.awsJsonServiceC2jModels(), outputDir); + assertThat(Files.exists(validationReportPath(outputDir))).isFalse(); + } + + @Test + void execute_emitValidationReportIsTrue_emitsValidationReport() throws IOException { + generateCodeFromC2jModels(ClientTestModels.awsJsonServiceC2jModels(), outputDir, true, null); + assertThat(Files.exists(validationReportPath(outputDir))).isTrue(); + } + + @Test + void execute_invokesModelValidators() { + ModelValidator mockValidator = mock(ModelValidator.class); + when(mockValidator.validateModels(any())).thenReturn(Collections.emptyList()); + + generateCodeFromC2jModels(ClientTestModels.awsJsonServiceC2jModels(), outputDir, true, + Collections.singletonList(mockValidator)); + + verify(mockValidator).validateModels(any()); + } + + @Test + void execute_c2jModelsAndIntermediateModel_generateSameCode() throws IOException { + Path c2jModelsOutputDir = outputDir.resolve("c2jModels"); + generateCodeFromC2jModels(ClientTestModels.awsJsonServiceC2jModels(), c2jModelsOutputDir, false, Collections.emptyList()); + + Path intermediateModelOutputDir = outputDir.resolve("intermediate-model"); + generateCodeFromIntermediateModel(ClientTestModels.awsJsonServiceModels(), intermediateModelOutputDir); + + List c2jModels_generatedFiles = Files.walk(c2jModelsOutputDir) + .sorted() + .map(c2jModelsOutputDir::relativize) + .collect(Collectors.toList()); + + List intermediateModels_generatedFiles = Files.walk(intermediateModelOutputDir) + .sorted() + .map(intermediateModelOutputDir::relativize) + .collect(Collectors.toList()); + + assertThat(c2jModels_generatedFiles).isNotEmpty(); + + // Ensure same exact set of files + assertThat(c2jModels_generatedFiles).isEqualTo(intermediateModels_generatedFiles); + + // All files should be exactly the same + for (Path generatedFile : c2jModels_generatedFiles) { + Path c2jGenerated = c2jModelsOutputDir.resolve(generatedFile); + Path intermediateGenerated = intermediateModelOutputDir.resolve(generatedFile); + + if (Files.isDirectory(c2jGenerated)) { + assertThat(Files.isDirectory(intermediateGenerated)).isTrue(); + } else { + assertThat(readToString(c2jGenerated)).isEqualTo(readToString(intermediateGenerated)); + } + } + } + + @Test + void execute_endpointsTestReferencesUnknownOperationMember_throwsValidationError() throws IOException { + ModelValidator mockValidator = mock(ModelValidator.class); + when(mockValidator.validateModels(any())).thenReturn(Collections.emptyList()); + + C2jModels referenceModels = ClientTestModels.awsJsonServiceC2jModels(); + + C2jModels c2jModelsWithBadTest = + C2jModels.builder() + .endpointTestSuiteModel(getBrokenEndpointTestSuiteModel()) + .customizationConfig(referenceModels.customizationConfig()) + .serviceModel(referenceModels.serviceModel()) + .paginatorsModel(referenceModels.paginatorsModel()) + .build(); + + assertThatThrownBy(() -> generateCodeFromC2jModels(c2jModelsWithBadTest, outputDir, true, + Collections.singletonList(mockValidator))) + .hasCauseInstanceOf(ModelInvalidException.class) + .matches(e -> { + ModelInvalidException exception = (ModelInvalidException) e.getCause(); + return exception.validationEntries().get(0).getErrorId() == ValidationErrorId.UNKNOWN_SHAPE_MEMBER; + }); + } + + @Test + void execute_operationHasNoRequestUri_throwsValidationError() throws IOException { + C2jModels models = C2jModels.builder() + .customizationConfig(CustomizationConfig.create()) + .serviceModel(getMissingRequestUriServiceModel()) + .build(); + + assertThatThrownBy(() -> generateCodeFromC2jModels(models, outputDir, true, Collections.emptyList())) + .isInstanceOf(ModelInvalidException.class) + .matches(e -> ((ModelInvalidException) e).validationEntries().get(0).getErrorId() + == ValidationErrorId.REQUEST_URI_NOT_FOUND); + } + + private void generateCodeFromC2jModels(C2jModels c2jModels, Path outputDir) { + generateCodeFromC2jModels(c2jModels, outputDir, false, null); + } + + private void generateCodeFromC2jModels(C2jModels c2jModels, Path outputDir, + boolean emitValidationReport, + List modelValidators) { + Path sources = outputDir.resolve("generated-sources").resolve("sdk"); + Path resources = outputDir.resolve("generated-resources").resolve("sdk-resources"); + Path tests = outputDir.resolve("generated-test-sources").resolve("sdk-tests"); + + CodeGenerator.builder() + .models(c2jModels) + .sourcesDirectory(sources.toAbsolutePath().toString()) + .resourcesDirectory(resources.toAbsolutePath().toString()) + .testsDirectory(tests.toAbsolutePath().toString()) + .emitValidationReport(emitValidationReport) + .modelValidators(modelValidators) + .build() + .execute(); + } + + private void generateCodeFromIntermediateModel(IntermediateModel intermediateModel, Path outputDir) { + Path sources = outputDir.resolve("generated-sources").resolve("sdk"); + Path resources = outputDir.resolve("generated-resources").resolve("sdk-resources"); + Path tests = outputDir.resolve("generated-test-sources").resolve("sdk-tests"); + + CodeGenerator.builder() + .intermediateModel(intermediateModel) + .sourcesDirectory(sources.toAbsolutePath().toString()) + .resourcesDirectory(resources.toAbsolutePath().toString()) + .testsDirectory(tests.toAbsolutePath().toString()) + .build() + .execute(); + } + + private static String readToString(Path p) throws IOException { + ByteBuffer bb = ByteBuffer.wrap(Files.readAllBytes(p)); + return StandardCharsets.UTF_8.decode(bb).toString(); + } + + private static Path validationReportPath(Path root) { + return root.resolve(Paths.get("generated-sources", "sdk", "models", VALIDATION_REPORT_NAME)); + } + + private EndpointTestSuiteModel getBrokenEndpointTestSuiteModel() throws IOException { + String json = resourceAsString("incorrect-endpoint-tests.json"); + return Jackson.load(EndpointTestSuiteModel.class, json); + } + + private ServiceModel getMissingRequestUriServiceModel() throws IOException { + String json = resourceAsString("no-request-uri-operation-service.json"); + return Jackson.load(ServiceModel.class, json); + } + + private String resourceAsString(String name) throws IOException { + ByteArrayOutputStream baos; + try (InputStream resourceAsStream = getClass().getResourceAsStream(name)) { + baos = new ByteArrayOutputStream(); + byte[] buffer = new byte[1024]; + int read; + while ((read = resourceAsStream.read(buffer)) != -1) { + baos.write(buffer, 0, read); + } + } + return StandardCharsets.UTF_8.decode(ByteBuffer.wrap(baos.toByteArray())).toString(); + } + + private static void deleteDirectory(Path dir) throws IOException { + Files.walkFileTree(dir, new FileVisitor() { + + @Override + public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException { + return FileVisitResult.TERMINATE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + }); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModelTest.java new file mode 100644 index 000000000000..107a6e6cdb66 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ArgumentModelTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ArgumentModelTest { + @Test + public void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(ArgumentModel.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModelTest.java new file mode 100644 index 000000000000..28d8dd845412 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/AuthorizerModelTest.java @@ -0,0 +1,26 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class AuthorizerModelTest { + @Test + public void equals_isCorrect() { + EqualsVerifier.simple().forClass(AuthorizerModel.class).usingGetClass().verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/MemberModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/MemberModelTest.java new file mode 100644 index 000000000000..bd4a0859603f --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/MemberModelTest.java @@ -0,0 +1,40 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class MemberModelTest { + @Test + public void equals_isCorrect() { + ListModel redListModel = new ListModel(); + redListModel.setMemberLocationName("RedLocation"); + ListModel blueListModel = new ListModel(); + blueListModel.setMemberLocationName("BlueLocation"); + + MemberModel redMemberModel = new MemberModel(); + redMemberModel.setC2jName("RedC2jName"); + MemberModel blueMemberModel = new MemberModel(); + blueMemberModel.setC2jName("BlueC2jName"); + + EqualsVerifier.simple().forClass(MemberModel.class) + .withPrefabValues(ListModel.class, redListModel, blueListModel) + .withPrefabValues(MemberModel.class, redMemberModel, blueMemberModel) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/OperationModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/OperationModelTest.java new file mode 100644 index 000000000000..531d0b1aa55e --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/OperationModelTest.java @@ -0,0 +1,35 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class OperationModelTest { + @Test + void equals_isCorrect() { + MemberModel blueMemberModel = new MemberModel(); + blueMemberModel.setName("blue"); + MemberModel redMemberModel = new MemberModel(); + redMemberModel.setName("red"); + + EqualsVerifier.simple() + .forClass(OperationModel.class) + .withPrefabValues(MemberModel.class, blueMemberModel, redMemberModel) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMappingTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMappingTest.java new file mode 100644 index 000000000000..cd142cb34c2c --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ParameterHttpMappingTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ParameterHttpMappingTest { + @Test + void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(ParameterHttpMapping.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModelTest.java new file mode 100644 index 000000000000..53e99f514403 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ReturnTypeModelTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ReturnTypeModelTest { + @Test + void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(ReturnTypeModel.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModelTest.java new file mode 100644 index 000000000000..08fb79681e96 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/ShapeModelTest.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ShapeModelTest { + + + @Test + public void equals_isCorrect() { + MemberModel blueMemberModel = new MemberModel(); + blueMemberModel.setName("blue"); + MemberModel redMemberModel = new MemberModel(); + redMemberModel.setName("red"); + + EqualsVerifier.simple() + .forClass(ShapeModel.class) + .withPrefabValues(MemberModel.class, blueMemberModel, redMemberModel) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/VariableModelTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/VariableModelTest.java new file mode 100644 index 000000000000..55ea2f39123a --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/VariableModelTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class VariableModelTest { + @Test + void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(VariableModel.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapperTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapperTest.java new file mode 100644 index 000000000000..aa29412a5f5f --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ArtificialResultWrapperTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate.customization; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ArtificialResultWrapperTest { + @Test + void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(ArtificialResultWrapper.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfoTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfoTest.java new file mode 100644 index 000000000000..3126117f100d --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/intermediate/customization/ShapeCustomizationInfoTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.intermediate.customization; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ShapeCustomizationInfoTest { + @Test + void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(ShapeCustomizationInfo.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/model/service/ContextParamTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/model/service/ContextParamTest.java new file mode 100644 index 000000000000..937688b70cb4 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/model/service/ContextParamTest.java @@ -0,0 +1,29 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.model.service; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class ContextParamTest { + @Test + void equals_isCorrect() { + EqualsVerifier.simple() + .forClass(ContextParam.class) + .usingGetClass() + .verify(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java index 6b8756474405..cec5a7fd4bb2 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/naming/DefaultNamingStrategyTest.java @@ -337,6 +337,39 @@ public void validateAllowsUnderscoresWithCustomization() { strategy.validateCustomerVisibleNaming(model); } + @Test + public void getSigningNameForEnvironmentVariables_convertsDashAndUppercases() { + when(serviceModel.getMetadata()).thenReturn(serviceMetadata); + when(serviceMetadata.getSigningName()).thenReturn("signing-name"); + + assertThat(strat.getSigningNameForEnvironmentVariables()).isEqualTo("SIGNING_NAME"); + } + + @Test + public void getSigningNameForSystemProperties_convertsDashAndUppercasesWords() { + when(serviceModel.getMetadata()).thenReturn(serviceMetadata); + when(serviceMetadata.getSigningName()).thenReturn("signing-name"); + + assertThat(strat.getSigningNameForSystemProperties()).isEqualTo("SigningName"); + } + + @Test + public void getSigningName_Uses_EndpointPrefix_whenSigningNameUnset() { + when(serviceModel.getMetadata()).thenReturn(serviceMetadata); + when(serviceMetadata.getSigningName()).thenReturn(null); + when(serviceMetadata.getEndpointPrefix()).thenReturn("EndpointPrefixFoo"); + + assertThat(strat.getSigningName()).isEqualTo("EndpointPrefixFoo"); + } + + @Test + public void getSigningName_Uses_SigningName() { + when(serviceModel.getMetadata()).thenReturn(serviceMetadata); + when(serviceMetadata.getSigningName()).thenReturn("Foo"); + + assertThat(strat.getSigningName()).isEqualTo("Foo"); + } + @Test public void validateServiceIdentifiersForEnvVarsAndProfileProperty() { when(serviceModel.getMetadata()).thenReturn(serviceMetadata); diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java index 308aa69ea487..920ee018c402 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/ClientTestModels.java @@ -47,6 +47,17 @@ public static IntermediateModel awsJsonServiceModels() { return new IntermediateModelBuilder(models).build(); } + public static C2jModels awsJsonServiceC2jModels() { + File serviceModel = new File(ClientTestModels.class.getResource("client/c2j/json/service-2.json").getFile()); + File customizationModel = new File(ClientTestModels.class.getResource("client/c2j/json/customization.config").getFile()); + File paginatorsModel = new File(ClientTestModels.class.getResource("client/c2j/json/paginators.json").getFile()); + return C2jModels.builder() + .serviceModel(getServiceModel(serviceModel)) + .customizationConfig(getCustomizationConfig(customizationModel)) + .paginatorsModel(getPaginatorsModel(paginatorsModel)) + .build(); + } + public static IntermediateModel cborServiceModels() { File serviceModel = new File(ClientTestModels.class.getResource("client/c2j/json/service-2.json").getFile()); File customizationModel = new File(ClientTestModels.class.getResource("client/c2j/cbor/customization.config").getFile()); @@ -86,6 +97,22 @@ public static IntermediateModel bearerAuthServiceModels() { return new IntermediateModelBuilder(models).build(); } + public static IntermediateModel envBearerTokenServiceModels() { + File serviceModel = new File(ClientTestModels.class.getResource( + "client/c2j/json-bearer-auth/service-2.json").getFile()); + File customizationModel = new File(ClientTestModels.class.getResource( + "client/c2j/json-bearer-auth/customization-env-bearer-token.config").getFile()); + File paginatorsModel = new File(ClientTestModels.class.getResource( + "client/c2j/json-bearer-auth/paginators.json").getFile()); + C2jModels models = C2jModels.builder() + .serviceModel(getServiceModel(serviceModel)) + .customizationConfig(getCustomizationConfig(customizationModel)) + .paginatorsModel(getPaginatorsModel(paginatorsModel)) + .build(); + + return new IntermediateModelBuilder(models).build(); + } + public static IntermediateModel restJsonServiceModels() { File serviceModel = new File(ClientTestModels.class.getResource("client/c2j/rest-json/service-2.json").getFile()); File customizationModel = new File(ClientTestModels.class.getResource("client/c2j/rest-json/customization.config").getFile()); @@ -159,6 +186,48 @@ public static IntermediateModel queryServiceModelsWithOverrideKnowProperties() { return new IntermediateModelBuilder(models).build(); } + public static IntermediateModel queryServiceModelsWithUnknownEndpointProperties() { + File serviceModel = new File(ClientTestModels.class.getResource("client/c2j/query/service-2.json").getFile()); + File waitersModel = new File(ClientTestModels.class.getResource("client/c2j/query/waiters-2.json").getFile()); + File endpointRuleSetModel = + new File(ClientTestModels.class.getResource("client/c2j/query/endpoint-rule-set-unknown-properties.json").getFile()); + File endpointTestsModel = + new File(ClientTestModels.class.getResource("client/c2j/query/endpoint-tests.json").getFile()); + + C2jModels models = C2jModels + .builder() + .serviceModel(getServiceModel(serviceModel)) + .waitersModel(getWaiters(waitersModel)) + .customizationConfig(CustomizationConfig.create()) + .endpointRuleSetModel(getEndpointRuleSet(endpointRuleSetModel)) + .endpointTestSuiteModel(getEndpointTestSuite(endpointTestsModel)) + .build(); + + return new IntermediateModelBuilder(models).build(); + } + + public static IntermediateModel queryServiceModelsWithUriCache() { + File serviceModel = new File(ClientTestModels.class.getResource("client/c2j/query/service-2.json").getFile()); + File customizationModel = + new File(ClientTestModels.class.getResource("client/c2j/query/customization-uri-cache.config").getFile()); + File waitersModel = new File(ClientTestModels.class.getResource("client/c2j/query/waiters-2.json").getFile()); + File endpointRuleSetModel = + new File(ClientTestModels.class.getResource("client/c2j/query/endpoint-rule-set.json").getFile()); + File endpointTestsModel = + new File(ClientTestModels.class.getResource("client/c2j/query/endpoint-tests.json").getFile()); + + C2jModels models = C2jModels + .builder() + .serviceModel(getServiceModel(serviceModel)) + .customizationConfig(getCustomizationConfig(customizationModel)) + .waitersModel(getWaiters(waitersModel)) + .endpointRuleSetModel(getEndpointRuleSet(endpointRuleSetModel)) + .endpointTestSuiteModel(getEndpointTestSuite(endpointTestsModel)) + .build(); + + return new IntermediateModelBuilder(models).build(); + } + public static IntermediateModel queryServiceModelsEndpointAuthParamsWithAllowList() { File serviceModel = new File(ClientTestModels.class.getResource("client/c2j/query/service-2.json").getFile()); File customizationModel = diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecTest.java index dba6bca98c74..3e2807600928 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/auth/scheme/AuthSchemeSpecTest.java @@ -66,6 +66,12 @@ static List parameters() { .caseName("query") .outputFileSuffix("default-params") .build(), + TestCase.builder() + .modelProvider(ClientTestModels::queryServiceModels) + .classSpecProvider(PreferredAuthSchemeProviderSpec::new) + .caseName("query") + .outputFileSuffix("preferred-provider") + .build(), // query-endpoint-auth-params TestCase.builder() .modelProvider(ClientTestModels::queryServiceModelsEndpointAuthParamsWithAllowList) @@ -214,6 +220,13 @@ static List parameters() { .classSpecProvider(AuthSchemeInterceptorSpec::new) .caseName("ops-auth-sigv4a-value") .outputFileSuffix("interceptor") + .build(), + // service with environment bearer token enabled + TestCase.builder() + .modelProvider(ClientTestModels::envBearerTokenServiceModels) + .classSpecProvider(AuthSchemeInterceptorSpec::new) + .caseName("env-bearer-token") + .outputFileSuffix("interceptor") .build() ); } diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClassTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClassTest.java index a09271f4001a..423ae5aba59a 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClassTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/builder/BaseClientBuilderClassTest.java @@ -17,6 +17,7 @@ import static software.amazon.awssdk.codegen.poet.ClientTestModels.bearerAuthServiceModels; import static software.amazon.awssdk.codegen.poet.ClientTestModels.composedClientJsonServiceModels; +import static software.amazon.awssdk.codegen.poet.ClientTestModels.envBearerTokenServiceModels; import static software.amazon.awssdk.codegen.poet.ClientTestModels.internalConfigModels; import static software.amazon.awssdk.codegen.poet.ClientTestModels.operationWithNoAuth; import static software.amazon.awssdk.codegen.poet.ClientTestModels.opsWithSigv4a; @@ -87,6 +88,13 @@ void baseClientBuilderClassWithBearerAuth_sra() { validateBaseClientBuilderClassGeneration(bearerAuthServiceModels(), "test-bearer-auth-client-builder-class.java", true); } + @Test + void baseClientBuilderClassWithEnvBearerToken_sra() { + validateBaseClientBuilderClassGeneration(envBearerTokenServiceModels(), + "test-env-bearer-token-client-builder-class.java", + true); + } + @Test void baseClientBuilderClassWithNoAuthOperation_sra() { validateBaseClientBuilderClassGeneration(operationWithNoAuth(), "test-no-auth-ops-client-builder-class.java", true); diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClassTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClassTest.java new file mode 100644 index 000000000000..3946483248a6 --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/EnvironmentTokenSystemSettingsClassTest.java @@ -0,0 +1,31 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.client; + +import static org.hamcrest.MatcherAssert.assertThat; +import static software.amazon.awssdk.codegen.poet.PoetMatchers.generatesTo; + +import org.junit.Test; +import software.amazon.awssdk.codegen.poet.ClassSpec; +import software.amazon.awssdk.codegen.poet.ClientTestModels; + +public class EnvironmentTokenSystemSettingsClassTest { + @Test + public void testEnvironmentTokenSystemSettingsClass() { + ClassSpec classSpec = new EnvironmentTokenSystemSettingsClass(ClientTestModels.restJsonServiceModels()); + assertThat(classSpec, generatesTo("test-environment-token-system-settings-class.java")); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/ServiceVersionInfoSpecTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/ServiceVersionInfoSpecTest.java new file mode 100644 index 000000000000..4fe4c9daf0bb --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/client/ServiceVersionInfoSpecTest.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.poet.client; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.hamcrest.Matchers.equalToIgnoringWhiteSpace; + +import com.squareup.javapoet.JavaFile; +import com.squareup.javapoet.TypeSpec; +import java.io.InputStream; +import java.util.Scanner; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.codegen.poet.ClassSpec; +import software.amazon.awssdk.codegen.poet.ClientTestModels; +import software.amazon.awssdk.codegen.poet.client.specs.ServiceVersionInfoSpec; +import software.amazon.awssdk.core.util.VersionInfo; + +public class ServiceVersionInfoSpecTest { + + // Fixture test that compares generated ServiceVersionInfo class against expected output. + // The fixture file uses {{VERSION}} as a placeholder for the SDK version. The placeholder get + // replaced with actual value at test time, since the generated code injects the actual + // version at build time. + @Test + void testServiceVersionInfoClass() { + String currVersion = VersionInfo.SDK_VERSION; + ClassSpec serviceVersionInfoSpec = new ServiceVersionInfoSpec(ClientTestModels.restJsonServiceModels()); + + String expectedContent = loadFixtureFile("test-service-version-info-class.java"); + expectedContent = expectedContent + .replace("{{VERSION}}", currVersion); + + String actualContent = generateContent(serviceVersionInfoSpec); + + assertThat(actualContent).isEqualToIgnoringWhitespace(expectedContent); + } + + private String loadFixtureFile(String filename) { + InputStream is = getClass().getResourceAsStream("specs/" + filename); + return new Scanner(is).useDelimiter("\\A").next(); + } + + private String generateContent(ClassSpec spec) { + TypeSpec typeSpec = spec.poetSpec(); + JavaFile javaFile = JavaFile.builder(spec.className().packageName(), typeSpec).build(); + return javaFile.toString(); + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/rules/EndpointProviderCompiledRulesClassSpecTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/rules/EndpointProviderCompiledRulesClassSpecTest.java index 3ee5c8757a37..5bc9a1f74d75 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/poet/rules/EndpointProviderCompiledRulesClassSpecTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/poet/rules/EndpointProviderCompiledRulesClassSpecTest.java @@ -37,4 +37,18 @@ void knowPropertiesOverride() { new EndpointProviderSpec2(ClientTestModels.queryServiceModelsWithOverrideKnowProperties()); assertThat(endpointProviderSpec, generatesTo("endpoint-provider-know-prop-override-class.java")); } + + @Test + void unknownEndpointProperties() { + ClassSpec endpointProviderSpec = + new EndpointProviderSpec2(ClientTestModels.queryServiceModelsWithUnknownEndpointProperties()); + assertThat(endpointProviderSpec, generatesTo("endpoint-provider-unknown-property-class.java")); + } + + @Test + void endpointProviderClassWithUriCache() { + ClassSpec endpointProviderSpec = + new EndpointProviderSpec2(ClientTestModels.queryServiceModelsWithUriCache()); + assertThat(endpointProviderSpec, generatesTo("endpoint-provider-uri-cache-class.java")); + } } diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/utils/AuthUtilsTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/utils/AuthUtilsTest.java index 66e2311978ee..f93f0172fbc9 100644 --- a/codegen/src/test/java/software/amazon/awssdk/codegen/utils/AuthUtilsTest.java +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/utils/AuthUtilsTest.java @@ -18,6 +18,7 @@ import static org.assertj.core.api.Assertions.assertThat; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -30,15 +31,17 @@ import software.amazon.awssdk.codegen.model.intermediate.Metadata; import software.amazon.awssdk.codegen.model.intermediate.OperationModel; import software.amazon.awssdk.codegen.model.service.AuthType; +import software.amazon.awssdk.utils.CollectionUtils; public class AuthUtilsTest { @ParameterizedTest @MethodSource("serviceValues") public void testIfServiceHasBearerAuth(AuthType serviceAuthType, + List serviceAuthTypes, List opAuthTypes, Boolean expectedResult) { - IntermediateModel model = modelWith(serviceAuthType); + IntermediateModel model = modelWith(serviceAuthType, serviceAuthTypes); model.setOperations(createOperations(opAuthTypes)); assertThat(AuthUtils.usesBearerAuth(model)).isEqualTo(expectedResult); } @@ -47,10 +50,11 @@ private static Stream serviceValues() { List oneBearerOp = Arrays.asList(AuthType.BEARER, AuthType.S3V4, AuthType.NONE); List noBearerOp = Arrays.asList(AuthType.S3V4, AuthType.S3V4, AuthType.NONE); - return Stream.of(Arguments.of(AuthType.BEARER, noBearerOp, true), - Arguments.of(AuthType.BEARER, oneBearerOp, true), - Arguments.of(AuthType.S3V4, noBearerOp, false), - Arguments.of(AuthType.S3V4, oneBearerOp, true)); + return Stream.of(Arguments.of(AuthType.BEARER, Collections.emptyList(), noBearerOp, true), + Arguments.of(AuthType.BEARER, Collections.emptyList(), oneBearerOp, true), + Arguments.of(AuthType.S3V4, Collections.emptyList(), noBearerOp, false), + Arguments.of(AuthType.S3V4, Collections.emptyList(), oneBearerOp, true), + Arguments.of(AuthType.S3V4, oneBearerOp, noBearerOp, true)); } @ParameterizedTest @@ -106,6 +110,12 @@ private static IntermediateModel modelWith(AuthType authType) { return model; } + private static IntermediateModel modelWith(AuthType authType, List authTypes) { + IntermediateModel model = modelWith(authType); + model.getMetadata().setAuth(authTypes); + return model; + } + private static Map createOperations(List opAuthTypes) { return IntStream.range(0, opAuthTypes.size()) .boxed() diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/utils/ProtocolUtilsTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/utils/ProtocolUtilsTest.java new file mode 100644 index 000000000000..9d714de98e4d --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/utils/ProtocolUtilsTest.java @@ -0,0 +1,79 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.utils; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Stream; +import org.junit.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.codegen.model.service.ServiceMetadata; + +public class ProtocolUtilsTest { + + @ParameterizedTest + @MethodSource("protocolsValues") + public void protocolSelection(List protocols, String expectedProtocol) { + ServiceMetadata serviceMetadata = serviceMetadata(protocols); + String selectedProtocol = ProtocolUtils.resolveProtocol(serviceMetadata); + assertThat(selectedProtocol).isEqualTo(expectedProtocol); + } + + @Test + public void emptyProtocolsWithPresentProtocol() { + ServiceMetadata serviceMetadata = new ServiceMetadata(); + serviceMetadata.setProtocol("json"); + String selectedProtocol = ProtocolUtils.resolveProtocol(serviceMetadata); + assertThat(selectedProtocol).isEqualTo("json"); + } + + @Test + public void protocolsWithJson_protocolCborV2_selectsJson() { + ServiceMetadata serviceMetadata = new ServiceMetadata(); + serviceMetadata.setProtocols(Collections.singletonList("json")); + serviceMetadata.setProtocol("smithy-rpc-v2-cbor"); + String selectedProtocol = ProtocolUtils.resolveProtocol(serviceMetadata); + assertThat(selectedProtocol).isEqualTo("json"); + } + + @Test + public void protocolsWithCborV1_protocolJson_selectsCborV1() { + ServiceMetadata serviceMetadata = new ServiceMetadata(); + serviceMetadata.setProtocols(Collections.singletonList("cbor")); + serviceMetadata.setProtocol("json"); + String selectedProtocol = ProtocolUtils.resolveProtocol(serviceMetadata); + assertThat(selectedProtocol).isEqualTo("cbor"); + } + + private static Stream protocolsValues() { + return Stream.of(Arguments.of(Arrays.asList("smithy-rpc-v2-cbor", "json"), "smithy-rpc-v2-cbor"), + Arguments.of(Collections.singletonList("smithy-rpc-v2-cbor"), "smithy-rpc-v2-cbor"), + Arguments.of(Arrays.asList("smithy-rpc-v2-cbor", "json", "query"), "smithy-rpc-v2-cbor"), + Arguments.of(Arrays.asList("json", "query"), "json"), + Arguments.of(Collections.singletonList("query"), "query")); + } + + private static ServiceMetadata serviceMetadata(List protocols) { + ServiceMetadata serviceMetadata = new ServiceMetadata(); + serviceMetadata.setProtocols(protocols); + return serviceMetadata; + } +} diff --git a/codegen/src/test/java/software/amazon/awssdk/codegen/validation/SharedModelsValidatorTest.java b/codegen/src/test/java/software/amazon/awssdk/codegen/validation/SharedModelsValidatorTest.java new file mode 100644 index 000000000000..a485956d94bc --- /dev/null +++ b/codegen/src/test/java/software/amazon/awssdk/codegen/validation/SharedModelsValidatorTest.java @@ -0,0 +1,148 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.codegen.validation; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; +import software.amazon.awssdk.codegen.model.intermediate.MemberModel; +import software.amazon.awssdk.codegen.model.intermediate.Metadata; +import software.amazon.awssdk.codegen.model.intermediate.ShapeModel; +import software.amazon.awssdk.codegen.poet.ClientTestModels; + +public class SharedModelsValidatorTest { + private final ModelValidator validator = new SharedModelsValidator(); + + @Test + void validateModels_noTargetService_noValidationErrors() { + assertThat(runValidation(ClientTestModels.awsJsonServiceModels(), null)).isEmpty(); + } + + @Test + void validateModels_targetServiceTriviallyIdentical_noValidationErrors() { + assertThat(runValidation(ClientTestModels.awsJsonServiceModels(), ClientTestModels.awsJsonServiceModels())).isEmpty(); + } + + @Test + void validateModels_noSharedShapes_noValidationErrors() { + IntermediateModel target = ClientTestModels.awsJsonServiceModels(); + Map renamedShapes = target.getShapes() + .entrySet() + .stream() + .collect(Collectors.toMap(e -> "Copy" + e.getKey(), Map.Entry::getValue)); + target.setShapes(renamedShapes); + + assertThat(runValidation(ClientTestModels.awsJsonServiceModels(), target)).isEmpty(); + } + + @Test + void validateModels_sharedShapesNotIdentical_emitsValidationError() { + IntermediateModel target = ClientTestModels.awsJsonServiceModels(); + Map modifiedShapes = target.getShapes() + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, + e -> { + ShapeModel shapeModel = e.getValue(); + shapeModel.setDeprecated(!shapeModel.isDeprecated()); + return shapeModel; + })); + + target.setShapes(modifiedShapes); + + List validationEntries = runValidation(ClientTestModels.awsJsonServiceModels(), target); + + assertThat(validationEntries).hasSize(modifiedShapes.size()); + + assertThat(validationEntries).allMatch(e -> e.getErrorId() == ValidationErrorId.SHARED_MODELS_DIFFER + && e.getSeverity() == ValidationErrorSeverity.DANGER); + } + + @Test + void validateModels_shapesDontHaveSameMemberNames_emitsValidationError() { + IntermediateModel fooService = new IntermediateModel(); + fooService.setMetadata(new Metadata().withServiceName("Foo")); + + IntermediateModel barService = new IntermediateModel(); + barService.setMetadata(new Metadata().withServiceName("Bar")); + + String shapeName = "TestShape"; + + ShapeModel shape1 = new ShapeModel(); + MemberModel member1 = new MemberModel(); + member1.setName("Shape1Member"); + shape1.setMembers(Arrays.asList(member1)); + + ShapeModel shape2 = new ShapeModel(); + MemberModel member2 = new MemberModel(); + member2.setName("Shape2Member"); + shape2.setMembers(Arrays.asList(member2)); + + Map fooServiceShapes = new HashMap<>(); + fooServiceShapes.put(shapeName, shape1); + fooService.setShapes(fooServiceShapes); + + Map barServiceShapes = new HashMap<>(); + barServiceShapes.put(shapeName, shape2); + barService.setShapes(barServiceShapes); + + List validationEntries = runValidation(fooService, barService); + + assertThat(validationEntries).hasSize(1); + } + + @Test + void validateModels_shapesDontHaveSameMembers_emitsValidationError() { + IntermediateModel fooService = new IntermediateModel(); + fooService.setMetadata(new Metadata().withServiceName("Foo")); + + IntermediateModel barService = new IntermediateModel(); + barService.setMetadata(new Metadata().withServiceName("Bar")); + + String shapeName = "TestShape"; + ShapeModel shape1 = new ShapeModel(); + + ShapeModel shape2 = new ShapeModel(); + shape2.setMembers(Arrays.asList(new MemberModel(), new MemberModel())); + + Map fooServiceShapes = new HashMap<>(); + fooServiceShapes.put(shapeName, shape1); + fooService.setShapes(fooServiceShapes); + + Map barServiceShapes = new HashMap<>(); + barServiceShapes.put(shapeName, shape2); + barService.setShapes(barServiceShapes); + + List validationEntries = runValidation(fooService, barService); + + assertThat(validationEntries).hasSize(1); + } + + private List runValidation(IntermediateModel m1, IntermediateModel m2) { + ModelValidationContext ctx = ModelValidationContext.builder() + .intermediateModel(m1) + .shareModelsTarget(m2) + .build(); + + return validator.validateModels(ctx); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/incorrect-endpoint-tests.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/incorrect-endpoint-tests.json new file mode 100644 index 000000000000..861ba12cf3c5 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/incorrect-endpoint-tests.json @@ -0,0 +1,26 @@ +{ + "testCases": [ + { + "documentation": "Test references undefined operation member", + "expect": { + "error": "Some error" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1" + }, + "operationName": "APostOperation", + "operationParams": { + "Foo": "bar" + } + } + ], + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + } + ] +} \ No newline at end of file diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/no-request-uri-operation-service.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/no-request-uri-operation-service.json new file mode 100644 index 000000000000..d7caffad37e0 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/no-request-uri-operation-service.json @@ -0,0 +1,43 @@ +{ + "version": "2.0", + "metadata": { + "apiVersion": "2010-05-08", + "endpointPrefix": "json-service-endpoint", + "globalEndpoint": "json-service.amazonaws.com", + "protocol": "rest-json", + "serviceAbbreviation": "Rest Json Service", + "serviceFullName": "Some Service That Uses Rest-Json Protocol", + "serviceId": "Rest Json Service", + "signingName": "json-service", + "signatureVersion": "v4", + "uid": "json-service-2010-05-08", + "xmlNamespace": "https://json-service.amazonaws.com/doc/2010-05-08/" + }, + "operations": { + "OperationWithUriMappedParam": { + "name": "OperationWithUriMappedParam", + "http": { + "method": "GET" + }, + "input": { + "shape": "OperationWithUriMappedParamRequest" + } + } + }, + "shapes": { + "OperationWithUriMappedParamRequest": { + "type": "structure", + "members": { + "StringMember": { + "shape": "String", + "location": "uri", + "locationName": "stringMember" + } + } + }, + "String": { + "type": "string" + } + }, + "documentation": "A service that is implemented using the rest-json protocol" +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/env-bearer-token-auth-scheme-interceptor.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/env-bearer-token-auth-scheme-interceptor.java new file mode 100644 index 000000000000..a7f8a8e8d336 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/env-bearer-token-auth-scheme-interceptor.java @@ -0,0 +1,164 @@ +package software.amazon.awssdk.services.json.auth.scheme.internal; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.SelectedAuthScheme; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkExecutionAttribute; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.internal.util.MetricUtils; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.core.useragent.BusinessMetricFeatureId; +import software.amazon.awssdk.http.auth.scheme.BearerAuthScheme; +import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; +import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; +import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; +import software.amazon.awssdk.identity.spi.Identity; +import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.identity.spi.ResolveIdentityRequest; +import software.amazon.awssdk.identity.spi.TokenIdentity; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.metrics.SdkMetric; +import software.amazon.awssdk.services.json.auth.scheme.JsonAuthSchemeParams; +import software.amazon.awssdk.services.json.auth.scheme.JsonAuthSchemeProvider; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +public final class JsonAuthSchemeInterceptor implements ExecutionInterceptor { + private static Logger LOG = Logger.loggerFor(JsonAuthSchemeInterceptor.class); + + @Override + public void beforeExecution(Context.BeforeExecution context, ExecutionAttributes executionAttributes) { + List authOptions = resolveAuthOptions(context, executionAttributes); + SelectedAuthScheme selectedAuthScheme = selectAuthScheme(authOptions, executionAttributes); + putSelectedAuthScheme(executionAttributes, selectedAuthScheme); + recordEnvironmentTokenBusinessMetric(selectedAuthScheme, executionAttributes); + } + + private List resolveAuthOptions(Context.BeforeExecution context, ExecutionAttributes executionAttributes) { + JsonAuthSchemeProvider authSchemeProvider = Validate.isInstanceOf(JsonAuthSchemeProvider.class, + executionAttributes.getAttribute(SdkInternalExecutionAttribute.AUTH_SCHEME_RESOLVER), + "Expected an instance of JsonAuthSchemeProvider"); + JsonAuthSchemeParams params = authSchemeParams(context.request(), executionAttributes); + return authSchemeProvider.resolveAuthScheme(params); + } + + private SelectedAuthScheme selectAuthScheme(List authOptions, + ExecutionAttributes executionAttributes) { + MetricCollector metricCollector = executionAttributes.getAttribute(SdkExecutionAttribute.API_CALL_METRIC_COLLECTOR); + Map> authSchemes = executionAttributes.getAttribute(SdkInternalExecutionAttribute.AUTH_SCHEMES); + IdentityProviders identityProviders = executionAttributes.getAttribute(SdkInternalExecutionAttribute.IDENTITY_PROVIDERS); + List> discardedReasons = new ArrayList<>(); + for (AuthSchemeOption authOption : authOptions) { + AuthScheme authScheme = authSchemes.get(authOption.schemeId()); + SelectedAuthScheme selectedAuthScheme = trySelectAuthScheme(authOption, authScheme, + identityProviders, discardedReasons, metricCollector, executionAttributes); + if (selectedAuthScheme != null) { + if (!discardedReasons.isEmpty()) { + LOG.debug(() -> String.format("%s auth will be used, discarded: '%s'", authOption.schemeId(), + discardedReasons.stream().map(Supplier::get).collect(Collectors.joining(", ")))); + } + return selectedAuthScheme; + } + } + throw SdkException + .builder() + .message( + "Failed to determine how to authenticate the user: " + + discardedReasons.stream().map(Supplier::get).collect(Collectors.joining(", "))).build(); + } + + private JsonAuthSchemeParams authSchemeParams(SdkRequest request, ExecutionAttributes executionAttributes) { + String operation = executionAttributes.getAttribute(SdkExecutionAttribute.OPERATION_NAME); + JsonAuthSchemeParams.Builder builder = JsonAuthSchemeParams.builder().operation(operation); + return builder.build(); + } + + private SelectedAuthScheme trySelectAuthScheme(AuthSchemeOption authOption, AuthScheme authScheme, + IdentityProviders identityProviders, List> discardedReasons, MetricCollector metricCollector, + ExecutionAttributes executionAttributes) { + if (authScheme == null) { + discardedReasons.add(() -> String.format("'%s' is not enabled for this request.", authOption.schemeId())); + return null; + } + IdentityProvider identityProvider = authScheme.identityProvider(identityProviders); + if (identityProvider == null) { + discardedReasons + .add(() -> String.format("'%s' does not have an identity provider configured.", authOption.schemeId())); + return null; + } + HttpSigner signer; + try { + signer = authScheme.signer(); + } catch (RuntimeException e) { + discardedReasons.add(() -> String.format("'%s' signer could not be retrieved: %s", authOption.schemeId(), + e.getMessage())); + return null; + } + ResolveIdentityRequest.Builder identityRequestBuilder = ResolveIdentityRequest.builder(); + authOption.forEachIdentityProperty(identityRequestBuilder::putProperty); + CompletableFuture identity; + SdkMetric metric = getIdentityMetric(identityProvider); + if (metric == null) { + identity = identityProvider.resolveIdentity(identityRequestBuilder.build()); + } else { + identity = MetricUtils.reportDuration(() -> identityProvider.resolveIdentity(identityRequestBuilder.build()), + metricCollector, metric); + } + return new SelectedAuthScheme<>(identity, signer, authOption); + } + + private SdkMetric getIdentityMetric(IdentityProvider identityProvider) { + Class identityType = identityProvider.identityType(); + if (identityType == AwsCredentialsIdentity.class) { + return CoreMetric.CREDENTIALS_FETCH_DURATION; + } + if (identityType == TokenIdentity.class) { + return CoreMetric.TOKEN_FETCH_DURATION; + } + return null; + } + + private void putSelectedAuthScheme(ExecutionAttributes attributes, + SelectedAuthScheme selectedAuthScheme) { + SelectedAuthScheme existingAuthScheme = attributes.getAttribute(SdkInternalExecutionAttribute.SELECTED_AUTH_SCHEME); + if (existingAuthScheme != null) { + AuthSchemeOption.Builder selectedOption = selectedAuthScheme.authSchemeOption().toBuilder(); + existingAuthScheme.authSchemeOption().forEachIdentityProperty(selectedOption::putIdentityPropertyIfAbsent); + existingAuthScheme.authSchemeOption().forEachSignerProperty(selectedOption::putSignerPropertyIfAbsent); + selectedAuthScheme = new SelectedAuthScheme<>(selectedAuthScheme.identity(), selectedAuthScheme.signer(), + selectedOption.build()); + } + attributes.putAttribute(SdkInternalExecutionAttribute.SELECTED_AUTH_SCHEME, selectedAuthScheme); + } + + private void recordEnvironmentTokenBusinessMetric(SelectedAuthScheme selectedAuthScheme, + ExecutionAttributes executionAttributes) { + String tokenFromEnv = executionAttributes.getAttribute(SdkInternalExecutionAttribute.TOKEN_CONFIGURED_FROM_ENV); + if (selectedAuthScheme != null && selectedAuthScheme.authSchemeOption().schemeId().equals(BearerAuthScheme.SCHEME_ID) + && selectedAuthScheme.identity().isDone()) { + if (selectedAuthScheme.identity().getNow(null) instanceof TokenIdentity) { + TokenIdentity configuredToken = (TokenIdentity) selectedAuthScheme.identity().getNow(null); + if (configuredToken.token().equals(tokenFromEnv)) { + executionAttributes.getAttribute(SdkInternalExecutionAttribute.BUSINESS_METRICS).addMetric( + BusinessMetricFeatureId.BEARER_SERVICE_ENV_VARS.value()); + } + } + } + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-preferred-provider.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-preferred-provider.java new file mode 100644 index 000000000000..279142374e6b --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-preferred-provider.java @@ -0,0 +1,51 @@ +package software.amazon.awssdk.services.query.auth.scheme.internal; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.services.query.auth.scheme.QueryAuthSchemeParams; +import software.amazon.awssdk.services.query.auth.scheme.QueryAuthSchemeProvider; +import software.amazon.awssdk.utils.CollectionUtils; + +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +public final class PreferredQueryAuthSchemeProvider implements QueryAuthSchemeProvider { + private final QueryAuthSchemeProvider delegate; + + private final List authSchemePreference; + + public PreferredQueryAuthSchemeProvider(QueryAuthSchemeProvider delegate, List authSchemePreference) { + this.delegate = delegate; + this.authSchemePreference = authSchemePreference != null ? authSchemePreference : Collections.emptyList(); + } + + /** + * Resolve the auth schemes based on the given set of parameters. + */ + @Override + public List resolveAuthScheme(QueryAuthSchemeParams params) { + List candidateAuthSchemes = delegate.resolveAuthScheme(params); + if (CollectionUtils.isNullOrEmpty(authSchemePreference)) { + return candidateAuthSchemes; + } + List authSchemes = new ArrayList<>(); + authSchemePreference.forEach(preferredSchemeId -> { + candidateAuthSchemes + .stream() + .filter(candidate -> { + String candidateSchemeName = candidate.schemeId().contains("#") ? candidate.schemeId().split("#")[1] + : candidate.schemeId(); + return candidateSchemeName.equals(preferredSchemeId); + }).findFirst().ifPresent(authSchemes::add); + }); + candidateAuthSchemes.forEach(candidate -> { + if (!authSchemes.contains(candidate)) { + authSchemes.add(candidate); + } + }); + return authSchemes; + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-provider.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-provider.java index a4f84dc2665a..cdbc7c4c24d0 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-provider.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-auth-scheme-provider.java @@ -1,18 +1,3 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - package software.amazon.awssdk.services.query.auth.scheme; import java.util.List; @@ -22,6 +7,7 @@ import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeProvider; import software.amazon.awssdk.services.query.auth.scheme.internal.DefaultQueryAuthSchemeProvider; +import software.amazon.awssdk.services.query.auth.scheme.internal.PreferredQueryAuthSchemeProvider; /** * An auth scheme provider for Query service. The auth scheme provider takes a set of parameters using @@ -50,4 +36,11 @@ default List resolveAuthScheme(Consumer authSchemePreference) { + return new PreferredQueryAuthSchemeProvider(defaultProvider(), authSchemePreference); + } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-provider.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-provider.java index a4f84dc2665a..cdbc7c4c24d0 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-provider.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/auth/scheme/query-endpoint-auth-params-auth-scheme-provider.java @@ -1,18 +1,3 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - package software.amazon.awssdk.services.query.auth.scheme; import java.util.List; @@ -22,6 +7,7 @@ import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeProvider; import software.amazon.awssdk.services.query.auth.scheme.internal.DefaultQueryAuthSchemeProvider; +import software.amazon.awssdk.services.query.auth.scheme.internal.PreferredQueryAuthSchemeProvider; /** * An auth scheme provider for Query service. The auth scheme provider takes a set of parameters using @@ -50,4 +36,11 @@ default List resolveAuthScheme(Consumer authSchemePreference) { + return new PreferredQueryAuthSchemeProvider(defaultProvider(), authSchemePreference); + } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-bearer-auth-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-bearer-auth-client-builder-class.java index 4dee534fba41..ee8f9a73d3e5 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-bearer-auth-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-bearer-auth-client-builder-class.java @@ -10,6 +10,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -59,14 +60,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + }); } @Override @@ -77,7 +79,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -93,21 +95,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -126,7 +128,14 @@ public B authSchemeProvider(JsonAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private JsonAuthSchemeProvider defaultAuthSchemeProvider() { + private JsonAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return JsonAuthSchemeProvider.defaultProvider(preferences); + } return JsonAuthSchemeProvider.defaultProvider(); } @@ -196,6 +205,6 @@ private List internalPlugins(SdkClientConfiguration config) { protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), - "The 'tokenProvider' must be configured in the client builder."); + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-class.java index 83b6266466fd..a0bdac67d04d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-class.java @@ -12,6 +12,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -70,15 +71,16 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + }); } @Override @@ -89,82 +91,82 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); ServiceConfiguration.Builder serviceConfigBuilder = ((ServiceConfiguration) config - .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); + .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); serviceConfigBuilder.profileFile(serviceConfigBuilder.profileFileSupplier() != null ? serviceConfigBuilder - .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); + .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); serviceConfigBuilder.profileName(serviceConfigBuilder.profileName() != null ? serviceConfigBuilder.profileName() : config - .option(SdkClientOption.PROFILE_NAME)); + .option(SdkClientOption.PROFILE_NAME)); if (serviceConfigBuilder.dualstackEnabled() != null) { Validate.validState( - config.option(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED) == null, - "Dualstack has been configured on both ServiceConfiguration and the client/global level. Please limit dualstack configuration to one location."); + config.option(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED) == null, + "Dualstack has been configured on both ServiceConfiguration and the client/global level. Please limit dualstack configuration to one location."); } else { serviceConfigBuilder.dualstackEnabled(config.option(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)); } if (serviceConfigBuilder.fipsModeEnabled() != null) { Validate.validState( - config.option(AwsClientOption.FIPS_ENDPOINT_ENABLED) == null, - "Fips has been configured on both ServiceConfiguration and the client/global level. Please limit fips configuration to one location."); + config.option(AwsClientOption.FIPS_ENDPOINT_ENABLED) == null, + "Fips has been configured on both ServiceConfiguration and the client/global level. Please limit fips configuration to one location."); } else { serviceConfigBuilder.fipsModeEnabled(config.option(AwsClientOption.FIPS_ENDPOINT_ENABLED)); } if (serviceConfigBuilder.useArnRegionEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.USE_ARN_REGION) == null, - "UseArnRegion has been configured on both ServiceConfiguration and the client/global level. Please limit UseArnRegion configuration to one location."); + clientContextParams.get(JsonClientContextParams.USE_ARN_REGION) == null, + "UseArnRegion has been configured on both ServiceConfiguration and the client/global level. Please limit UseArnRegion configuration to one location."); } else { serviceConfigBuilder.useArnRegionEnabled(clientContextParams.get(JsonClientContextParams.USE_ARN_REGION)); } if (serviceConfigBuilder.multiRegionEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS) == null, - "DisableMultiRegionAccessPoints has been configured on both ServiceConfiguration and the client/global level. Please limit DisableMultiRegionAccessPoints configuration to one location."); + clientContextParams.get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS) == null, + "DisableMultiRegionAccessPoints has been configured on both ServiceConfiguration and the client/global level. Please limit DisableMultiRegionAccessPoints configuration to one location."); } else if (clientContextParams.get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS) != null) { serviceConfigBuilder.multiRegionEnabled(!clientContextParams - .get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS)); + .get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS)); } if (serviceConfigBuilder.pathStyleAccessEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.FORCE_PATH_STYLE) == null, - "ForcePathStyle has been configured on both ServiceConfiguration and the client/global level. Please limit ForcePathStyle configuration to one location."); + clientContextParams.get(JsonClientContextParams.FORCE_PATH_STYLE) == null, + "ForcePathStyle has been configured on both ServiceConfiguration and the client/global level. Please limit ForcePathStyle configuration to one location."); } else { serviceConfigBuilder.pathStyleAccessEnabled(clientContextParams.get(JsonClientContextParams.FORCE_PATH_STYLE)); } if (serviceConfigBuilder.accelerateModeEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.ACCELERATE) == null, - "Accelerate has been configured on both ServiceConfiguration and the client/global level. Please limit Accelerate configuration to one location."); + clientContextParams.get(JsonClientContextParams.ACCELERATE) == null, + "Accelerate has been configured on both ServiceConfiguration and the client/global level. Please limit Accelerate configuration to one location."); } else { serviceConfigBuilder.accelerateModeEnabled(clientContextParams.get(JsonClientContextParams.ACCELERATE)); } Boolean checksumValidationEnabled = serviceConfigBuilder.checksumValidationEnabled(); if (checksumValidationEnabled != null) { Validate.validState( - config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION) == null, - "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); + config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION) == null, + "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); Validate.validState( - config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION) == null, - "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); + config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION) == null, + "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); if (checksumValidationEnabled) { config = config.toBuilder() - .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_SUPPORTED) - .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_SUPPORTED).build(); + .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_SUPPORTED) + .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_SUPPORTED).build(); } else { config = config.toBuilder() - .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_REQUIRED) - .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_REQUIRED).build(); + .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_REQUIRED) + .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_REQUIRED).build(); } } ServiceConfiguration finalServiceConfig = serviceConfigBuilder.build(); clientContextParams.put(JsonClientContextParams.USE_ARN_REGION, finalServiceConfig.useArnRegionEnabled()); clientContextParams.put(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS, - !finalServiceConfig.multiRegionEnabled()); + !finalServiceConfig.multiRegionEnabled()); clientContextParams.put(JsonClientContextParams.FORCE_PATH_STYLE, finalServiceConfig.pathStyleAccessEnabled()); clientContextParams.put(JsonClientContextParams.ACCELERATE, finalServiceConfig.accelerateModeEnabled()); SdkClientConfiguration.Builder builder = config.toBuilder(); @@ -189,21 +191,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon } builder.option(SdkClientOption.SERVICE_CONFIGURATION, finalServiceConfig); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); SdkClientConfiguration clientConfig = config; builder.lazyOption(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, c -> resolveRequestChecksumCalculation(clientConfig)); @@ -225,7 +227,14 @@ public B authSchemeProvider(JsonAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private JsonAuthSchemeProvider defaultAuthSchemeProvider() { + private JsonAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return JsonAuthSchemeProvider.defaultProvider(preferences); + } return JsonAuthSchemeProvider.defaultProvider(); } @@ -327,9 +336,9 @@ private RequestChecksumCalculation resolveRequestChecksumCalculation(SdkClientCo RequestChecksumCalculation configuredChecksumCalculation = config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION); if (configuredChecksumCalculation == null) { configuredChecksumCalculation = RequestChecksumCalculationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); } return configuredChecksumCalculation; } @@ -338,15 +347,15 @@ private ResponseChecksumValidation resolveResponseChecksumValidation(SdkClientCo ResponseChecksumValidation configuredChecksumValidation = config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION); if (configuredChecksumValidation == null) { configuredChecksumValidation = ResponseChecksumValidationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); } return configuredChecksumValidation; } protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), - "The 'tokenProvider' must be configured in the client builder."); + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-endpoints-auth-params.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-endpoints-auth-params.java index 80511b9556ce..360d3664eaad 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-endpoints-auth-params.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-endpoints-auth-params.java @@ -10,6 +10,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -68,14 +69,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + }); } @Override @@ -143,7 +145,14 @@ public B authSchemeProvider(QueryAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private QueryAuthSchemeProvider defaultAuthSchemeProvider() { + private QueryAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return QueryAuthSchemeProvider.defaultProvider(preferences); + } return QueryAuthSchemeProvider.defaultProvider(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-internal-defaults-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-internal-defaults-class.java index e7a2428380aa..9b143b9ccd69 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-internal-defaults-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-client-builder-internal-defaults-class.java @@ -8,6 +8,7 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -55,10 +56,12 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -77,7 +80,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -93,21 +96,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -126,7 +129,14 @@ public B authSchemeProvider(JsonAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private JsonAuthSchemeProvider defaultAuthSchemeProvider() { + private JsonAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return JsonAuthSchemeProvider.defaultProvider(preferences); + } return JsonAuthSchemeProvider.defaultProvider(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-composed-sync-default-client-builder.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-composed-sync-default-client-builder.java index 6baf26fa580e..117e19038881 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-composed-sync-default-client-builder.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-composed-sync-default-client-builder.java @@ -10,6 +10,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -65,15 +66,16 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + }); } @Override @@ -84,17 +86,17 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); ServiceConfiguration.Builder serviceConfigBuilder = ((ServiceConfiguration) config - .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); + .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); serviceConfigBuilder.profileFile(serviceConfigBuilder.profileFileSupplier() != null ? serviceConfigBuilder - .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); + .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); serviceConfigBuilder.profileName(serviceConfigBuilder.profileName() != null ? serviceConfigBuilder.profileName() : config - .option(SdkClientOption.PROFILE_NAME)); + .option(SdkClientOption.PROFILE_NAME)); ServiceConfiguration finalServiceConfig = serviceConfigBuilder.build(); SdkClientConfiguration.Builder builder = config.toBuilder(); builder.lazyOption(SdkClientOption.IDENTITY_PROVIDERS, c -> { @@ -112,21 +114,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.option(SdkClientOption.SERVICE_CONFIGURATION, finalServiceConfig); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); SdkClientConfiguration clientConfig = config; builder.lazyOption(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, c -> resolveRequestChecksumCalculation(clientConfig)); @@ -148,7 +150,14 @@ public B authSchemeProvider(JsonAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private JsonAuthSchemeProvider defaultAuthSchemeProvider() { + private JsonAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return JsonAuthSchemeProvider.defaultProvider(preferences); + } return JsonAuthSchemeProvider.defaultProvider(); } @@ -246,9 +255,9 @@ private RequestChecksumCalculation resolveRequestChecksumCalculation(SdkClientCo RequestChecksumCalculation configuredChecksumCalculation = config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION); if (configuredChecksumCalculation == null) { configuredChecksumCalculation = RequestChecksumCalculationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); } return configuredChecksumCalculation; } @@ -257,15 +266,15 @@ private ResponseChecksumValidation resolveResponseChecksumValidation(SdkClientCo ResponseChecksumValidation configuredChecksumValidation = config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION); if (configuredChecksumValidation == null) { configuredChecksumValidation = ResponseChecksumValidationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); } return configuredChecksumValidation; } protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), - "The 'tokenProvider' must be configured in the client builder."); + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-env-bearer-token-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-env-bearer-token-client-builder-class.java new file mode 100644 index 000000000000..48ecf08535fa --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-env-bearer-token-client-builder-class.java @@ -0,0 +1,227 @@ +package software.amazon.awssdk.services.json; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.credentials.TokenUtils; +import software.amazon.awssdk.auth.token.credentials.StaticTokenProvider; +import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; +import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; +import software.amazon.awssdk.awscore.client.config.AwsClientOption; +import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; +import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; +import software.amazon.awssdk.core.SdkPlugin; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientConfiguration; +import software.amazon.awssdk.core.client.config.SdkClientOption; +import software.amazon.awssdk.core.interceptor.ClasspathInterceptorChainFactory; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.core.retry.RetryMode; +import software.amazon.awssdk.http.auth.scheme.BearerAuthScheme; +import software.amazon.awssdk.http.auth.scheme.NoAuthAuthScheme; +import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; +import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.identity.spi.TokenIdentity; +import software.amazon.awssdk.protocols.json.internal.unmarshall.SdkClientJsonProtocolAdvancedOption; +import software.amazon.awssdk.regions.ServiceMetadataAdvancedOption; +import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.services.json.auth.scheme.JsonAuthSchemeProvider; +import software.amazon.awssdk.services.json.auth.scheme.internal.JsonAuthSchemeInterceptor; +import software.amazon.awssdk.services.json.endpoints.JsonEndpointProvider; +import software.amazon.awssdk.services.json.endpoints.internal.JsonRequestSetEndpointInterceptor; +import software.amazon.awssdk.services.json.endpoints.internal.JsonResolveEndpointInterceptor; +import software.amazon.awssdk.services.json.internal.EnvironmentTokenSystemSettings; +import software.amazon.awssdk.services.json.internal.JsonServiceClientConfigurationBuilder; +import software.amazon.awssdk.utils.CollectionUtils; +import software.amazon.awssdk.utils.Validate; + +/** + * Internal base class for {@link DefaultJsonClientBuilder} and {@link DefaultJsonAsyncClientBuilder}. + */ +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +abstract class DefaultJsonBaseClientBuilder, C> extends AwsDefaultClientBuilder { + private final Map> additionalAuthSchemes = new HashMap<>(); + + @Override + protected final String serviceEndpointPrefix() { + return "json-service-endpoint"; + } + + @Override + protected final String serviceName() { + return "Json"; + } + + @Override + protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + Optional tokenFromEnv = new EnvironmentTokenSystemSettings().getStringValue(); + if (tokenFromEnv.isPresent() && config.option(SdkClientOption.AUTH_SCHEME_PROVIDER) == null + && config.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER) == null) { + c.option(SdkClientOption.AUTH_SCHEME_PROVIDER, + JsonAuthSchemeProvider.defaultProvider(Collections.singletonList("httpBearerAuth"))); + c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, StaticTokenProvider.create(tokenFromEnv::get)); + c.option( + SdkClientOption.EXECUTION_ATTRIBUTES, + ExecutionAttributes.builder() + .put(SdkInternalExecutionAttribute.TOKEN_CONFIGURED_FROM_ENV, tokenFromEnv.get()).build()); + } else { + c.option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)); + } + }); + } + + @Override + protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientConfiguration config) { + List endpointInterceptors = new ArrayList<>(); + endpointInterceptors.add(new JsonAuthSchemeInterceptor()); + endpointInterceptors.add(new JsonResolveEndpointInterceptor()); + endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); + ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); + List interceptors = interceptorFactory + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + List additionalInterceptors = new ArrayList<>(); + interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); + interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); + interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); + SdkClientConfiguration.Builder builder = config.toBuilder(); + builder.lazyOption(SdkClientOption.IDENTITY_PROVIDERS, c -> { + IdentityProviders.Builder result = IdentityProviders.builder(); + IdentityProvider tokenIdentityProvider = c.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER); + if (tokenIdentityProvider != null) { + result.putIdentityProvider(tokenIdentityProvider); + } + return result.build(); + }); + builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); + builder.lazyOptionIfAbsent( + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); + return builder.build(); + } + + @Override + protected final String signingName() { + return "json-service"; + } + + private JsonEndpointProvider defaultEndpointProvider() { + return JsonEndpointProvider.defaultProvider(); + } + + public B authSchemeProvider(JsonAuthSchemeProvider authSchemeProvider) { + clientConfiguration.option(SdkClientOption.AUTH_SCHEME_PROVIDER, authSchemeProvider); + return thisBuilder(); + } + + private JsonAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return JsonAuthSchemeProvider.defaultProvider(preferences); + } + return JsonAuthSchemeProvider.defaultProvider(); + } + + @Override + public B putAuthScheme(AuthScheme authScheme) { + additionalAuthSchemes.put(authScheme.schemeId(), authScheme); + return thisBuilder(); + } + + private Map> authSchemes() { + Map> schemes = new HashMap<>(2 + this.additionalAuthSchemes.size()); + BearerAuthScheme bearerAuthScheme = BearerAuthScheme.create(); + schemes.put(bearerAuthScheme.schemeId(), bearerAuthScheme); + NoAuthAuthScheme noAuthAuthScheme = NoAuthAuthScheme.create(); + schemes.put(noAuthAuthScheme.schemeId(), noAuthAuthScheme); + schemes.putAll(this.additionalAuthSchemes); + return schemes; + } + + private IdentityProvider defaultTokenProvider() { + return DefaultAwsTokenProvider.create(); + } + + @Override + protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { + List internalPlugins = internalPlugins(config); + List externalPlugins = plugins(); + if (internalPlugins.isEmpty() && externalPlugins.isEmpty()) { + return config; + } + List plugins = CollectionUtils.mergeLists(internalPlugins, externalPlugins); + SdkClientConfiguration.Builder configuration = config.toBuilder(); + JsonServiceClientConfigurationBuilder serviceConfigBuilder = new JsonServiceClientConfigurationBuilder(configuration); + for (SdkPlugin plugin : plugins) { + plugin.configureClient(serviceConfigBuilder); + } + updateRetryStrategyClientConfiguration(configuration); + return configuration.build(); + } + + private void updateRetryStrategyClientConfiguration(SdkClientConfiguration.Builder configuration) { + ClientOverrideConfiguration.Builder builder = configuration.asOverrideConfigurationBuilder(); + RetryMode retryMode = builder.retryMode(); + if (retryMode != null) { + configuration.option(SdkClientOption.RETRY_STRATEGY, AwsRetryStrategy.forRetryMode(retryMode)); + } else { + Consumer> configurator = builder.retryStrategyConfigurator(); + if (configurator != null) { + RetryStrategy.Builder defaultBuilder = AwsRetryStrategy.defaultRetryStrategy().toBuilder(); + configurator.accept(defaultBuilder); + configuration.option(SdkClientOption.RETRY_STRATEGY, defaultBuilder.build()); + } else { + RetryStrategy retryStrategy = builder.retryStrategy(); + if (retryStrategy != null) { + configuration.option(SdkClientOption.RETRY_STRATEGY, retryStrategy); + } + } + } + configuration.option(SdkClientOption.CONFIGURED_RETRY_MODE, null); + configuration.option(SdkClientOption.CONFIGURED_RETRY_STRATEGY, null); + configuration.option(SdkClientOption.CONFIGURED_RETRY_CONFIGURATOR, null); + } + + private List internalPlugins(SdkClientConfiguration config) { + return Collections.emptyList(); + } + + protected static void validateClientOptions(SdkClientConfiguration c) { + Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), + "The 'tokenProvider' must be configured in the client builder."); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-multi-auth-sigv4a-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-multi-auth-sigv4a-client-builder-class.java index 1e5d47f1235c..75faf2cad7a8 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-multi-auth-sigv4a-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-multi-auth-sigv4a-client-builder-class.java @@ -8,6 +8,7 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -43,7 +44,7 @@ @Generated("software.amazon.awssdk:codegen") @SdkInternalApi abstract class DefaultDatabaseBaseClientBuilder, C> extends - AwsDefaultClientBuilder { + AwsDefaultClientBuilder { private final Map> additionalAuthSchemes = new HashMap<>(); @Override @@ -58,10 +59,12 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -72,7 +75,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new DatabaseRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -88,21 +91,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") - .serviceProfileProperty("database_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") + .serviceProfileProperty("database_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -121,7 +124,14 @@ public B authSchemeProvider(DatabaseAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private DatabaseAuthSchemeProvider defaultAuthSchemeProvider() { + private DatabaseAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return DatabaseAuthSchemeProvider.defaultProvider(preferences); + } return DatabaseAuthSchemeProvider.defaultProvider(); } @@ -153,7 +163,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List plugins = CollectionUtils.mergeLists(internalPlugins, externalPlugins); SdkClientConfiguration.Builder configuration = config.toBuilder(); DatabaseServiceClientConfigurationBuilder serviceConfigBuilder = new DatabaseServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } @@ -193,7 +203,7 @@ protected static void validateClientOptions(SdkClientConfiguration c) { public B sigv4aSigningRegionSet(RegionSet sigv4aSigningRegionSet) { clientConfiguration.option(AwsClientOption.AWS_SIGV4A_SIGNING_REGION_SET, - sigv4aSigningRegionSet == null ? Collections.emptySet() : sigv4aSigningRegionSet.asSet()); + sigv4aSigningRegionSet == null ? Collections.emptySet() : sigv4aSigningRegionSet.asSet()); return thisBuilder(); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-ops-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-ops-client-builder-class.java index 74e0b6ce2709..72d4f526bfb3 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-ops-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-ops-client-builder-class.java @@ -8,6 +8,9 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.credentials.TokenUtils; +import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -25,6 +28,7 @@ import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.identity.spi.IdentityProvider; import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.identity.spi.TokenIdentity; import software.amazon.awssdk.protocols.json.internal.unmarshall.SdkClientJsonProtocolAdvancedOption; import software.amazon.awssdk.regions.ServiceMetadataAdvancedOption; import software.amazon.awssdk.retries.api.RetryStrategy; @@ -35,6 +39,7 @@ import software.amazon.awssdk.services.database.endpoints.internal.DatabaseResolveEndpointInterceptor; import software.amazon.awssdk.services.database.internal.DatabaseServiceClientConfigurationBuilder; import software.amazon.awssdk.utils.CollectionUtils; +import software.amazon.awssdk.utils.Validate; /** * Internal base class for {@link DefaultDatabaseClientBuilder} and {@link DefaultDatabaseAsyncClientBuilder}. @@ -42,7 +47,7 @@ @Generated("software.amazon.awssdk:codegen") @SdkInternalApi abstract class DefaultDatabaseBaseClientBuilder, C> extends - AwsDefaultClientBuilder { + AwsDefaultClientBuilder { private final Map> additionalAuthSchemes = new HashMap<>(); @Override @@ -57,10 +62,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + }); } @Override @@ -71,7 +81,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new DatabaseRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -79,6 +89,10 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon SdkClientConfiguration.Builder builder = config.toBuilder(); builder.lazyOption(SdkClientOption.IDENTITY_PROVIDERS, c -> { IdentityProviders.Builder result = IdentityProviders.builder(); + IdentityProvider tokenIdentityProvider = c.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER); + if (tokenIdentityProvider != null) { + result.putIdentityProvider(tokenIdentityProvider); + } IdentityProvider credentialsIdentityProvider = c.get(AwsClientOption.CREDENTIALS_IDENTITY_PROVIDER); if (credentialsIdentityProvider != null) { result.putIdentityProvider(credentialsIdentityProvider); @@ -87,21 +101,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") - .serviceProfileProperty("database_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") + .serviceProfileProperty("database_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -120,7 +134,14 @@ public B authSchemeProvider(DatabaseAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private DatabaseAuthSchemeProvider defaultAuthSchemeProvider() { + private DatabaseAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return DatabaseAuthSchemeProvider.defaultProvider(preferences); + } return DatabaseAuthSchemeProvider.defaultProvider(); } @@ -142,6 +163,10 @@ private Map> authSchemes() { return schemes; } + private IdentityProvider defaultTokenProvider() { + return DefaultAwsTokenProvider.create(); + } + @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List internalPlugins = internalPlugins(config); @@ -152,7 +177,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List plugins = CollectionUtils.mergeLists(internalPlugins, externalPlugins); SdkClientConfiguration.Builder configuration = config.toBuilder(); DatabaseServiceClientConfigurationBuilder serviceConfigBuilder = new DatabaseServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } @@ -188,5 +213,7 @@ private List internalPlugins(SdkClientConfiguration config) { } protected static void validateClientOptions(SdkClientConfiguration c) { + Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-service-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-service-client-builder-class.java index 8a42dcba4138..0be9c031d828 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-service-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-no-auth-service-client-builder-class.java @@ -8,6 +8,7 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -39,7 +40,7 @@ @Generated("software.amazon.awssdk:codegen") @SdkInternalApi abstract class DefaultDatabaseBaseClientBuilder, C> extends - AwsDefaultClientBuilder { + AwsDefaultClientBuilder { private final Map> additionalAuthSchemes = new HashMap<>(); @Override @@ -54,10 +55,12 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -68,7 +71,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new DatabaseRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -80,21 +83,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") - .serviceProfileProperty("database_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") + .serviceProfileProperty("database_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -113,7 +116,14 @@ public B authSchemeProvider(DatabaseAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private DatabaseAuthSchemeProvider defaultAuthSchemeProvider() { + private DatabaseAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return DatabaseAuthSchemeProvider.defaultProvider(preferences); + } return DatabaseAuthSchemeProvider.defaultProvider(); } @@ -141,7 +151,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List plugins = CollectionUtils.mergeLists(internalPlugins, externalPlugins); SdkClientConfiguration.Builder configuration = config.toBuilder(); DatabaseServiceClientConfigurationBuilder serviceConfigBuilder = new DatabaseServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-query-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-query-client-builder-class.java index 724eb838439d..19b8d5abbae1 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-query-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/sra/test-query-client-builder-class.java @@ -10,6 +10,7 @@ import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -66,14 +67,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider()) - .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.AUTH_SCHEME_PROVIDER, defaultAuthSchemeProvider(config)) + .option(SdkClientOption.AUTH_SCHEMES, authSchemes()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()); + }); } @Override @@ -141,7 +143,14 @@ public B authSchemeProvider(QueryAuthSchemeProvider authSchemeProvider) { return thisBuilder(); } - private QueryAuthSchemeProvider defaultAuthSchemeProvider() { + private QueryAuthSchemeProvider defaultAuthSchemeProvider(SdkClientConfiguration config) { + AuthSchemePreferenceResolver authSchemePreferenceProvider = AuthSchemePreferenceResolver.builder() + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)).build(); + List preferences = authSchemePreferenceProvider.resolveAuthSchemePreference(); + if (!preferences.isEmpty()) { + return QueryAuthSchemeProvider.defaultProvider(preferences); + } return QueryAuthSchemeProvider.defaultProvider(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-bearer-auth-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-bearer-auth-client-builder-class.java index 9895ae765031..c7932ab9f7c6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-bearer-auth-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-bearer-auth-client-builder-class.java @@ -53,13 +53,14 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) - .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) + .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner()); + }); } @Override @@ -69,7 +70,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -85,21 +86,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -167,8 +168,8 @@ private List internalPlugins(SdkClientConfiguration config) { protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.TOKEN_SIGNER), - "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), - "The 'tokenProvider' must be configured in the client builder."); + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java index 6013da8b2ea5..2eca7dfdcf2c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-class.java @@ -64,15 +64,16 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) - .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) + .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner()); + }); } @Override @@ -82,82 +83,82 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); ServiceConfiguration.Builder serviceConfigBuilder = ((ServiceConfiguration) config - .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); + .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); serviceConfigBuilder.profileFile(serviceConfigBuilder.profileFileSupplier() != null ? serviceConfigBuilder - .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); + .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); serviceConfigBuilder.profileName(serviceConfigBuilder.profileName() != null ? serviceConfigBuilder.profileName() : config - .option(SdkClientOption.PROFILE_NAME)); + .option(SdkClientOption.PROFILE_NAME)); if (serviceConfigBuilder.dualstackEnabled() != null) { Validate.validState( - config.option(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED) == null, - "Dualstack has been configured on both ServiceConfiguration and the client/global level. Please limit dualstack configuration to one location."); + config.option(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED) == null, + "Dualstack has been configured on both ServiceConfiguration and the client/global level. Please limit dualstack configuration to one location."); } else { serviceConfigBuilder.dualstackEnabled(config.option(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)); } if (serviceConfigBuilder.fipsModeEnabled() != null) { Validate.validState( - config.option(AwsClientOption.FIPS_ENDPOINT_ENABLED) == null, - "Fips has been configured on both ServiceConfiguration and the client/global level. Please limit fips configuration to one location."); + config.option(AwsClientOption.FIPS_ENDPOINT_ENABLED) == null, + "Fips has been configured on both ServiceConfiguration and the client/global level. Please limit fips configuration to one location."); } else { serviceConfigBuilder.fipsModeEnabled(config.option(AwsClientOption.FIPS_ENDPOINT_ENABLED)); } if (serviceConfigBuilder.useArnRegionEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.USE_ARN_REGION) == null, - "UseArnRegion has been configured on both ServiceConfiguration and the client/global level. Please limit UseArnRegion configuration to one location."); + clientContextParams.get(JsonClientContextParams.USE_ARN_REGION) == null, + "UseArnRegion has been configured on both ServiceConfiguration and the client/global level. Please limit UseArnRegion configuration to one location."); } else { serviceConfigBuilder.useArnRegionEnabled(clientContextParams.get(JsonClientContextParams.USE_ARN_REGION)); } if (serviceConfigBuilder.multiRegionEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS) == null, - "DisableMultiRegionAccessPoints has been configured on both ServiceConfiguration and the client/global level. Please limit DisableMultiRegionAccessPoints configuration to one location."); + clientContextParams.get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS) == null, + "DisableMultiRegionAccessPoints has been configured on both ServiceConfiguration and the client/global level. Please limit DisableMultiRegionAccessPoints configuration to one location."); } else if (clientContextParams.get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS) != null) { serviceConfigBuilder.multiRegionEnabled(!clientContextParams - .get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS)); + .get(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS)); } if (serviceConfigBuilder.pathStyleAccessEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.FORCE_PATH_STYLE) == null, - "ForcePathStyle has been configured on both ServiceConfiguration and the client/global level. Please limit ForcePathStyle configuration to one location."); + clientContextParams.get(JsonClientContextParams.FORCE_PATH_STYLE) == null, + "ForcePathStyle has been configured on both ServiceConfiguration and the client/global level. Please limit ForcePathStyle configuration to one location."); } else { serviceConfigBuilder.pathStyleAccessEnabled(clientContextParams.get(JsonClientContextParams.FORCE_PATH_STYLE)); } if (serviceConfigBuilder.accelerateModeEnabled() != null) { Validate.validState( - clientContextParams.get(JsonClientContextParams.ACCELERATE) == null, - "Accelerate has been configured on both ServiceConfiguration and the client/global level. Please limit Accelerate configuration to one location."); + clientContextParams.get(JsonClientContextParams.ACCELERATE) == null, + "Accelerate has been configured on both ServiceConfiguration and the client/global level. Please limit Accelerate configuration to one location."); } else { serviceConfigBuilder.accelerateModeEnabled(clientContextParams.get(JsonClientContextParams.ACCELERATE)); } Boolean checksumValidationEnabled = serviceConfigBuilder.checksumValidationEnabled(); if (checksumValidationEnabled != null) { Validate.validState( - config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION) == null, - "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); + config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION) == null, + "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); Validate.validState( - config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION) == null, - "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); + config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION) == null, + "Checksum behavior has been configured on both ServiceConfiguration and the client/global level. Please limit checksum behavior configuration to one location."); if (checksumValidationEnabled) { config = config.toBuilder() - .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_SUPPORTED) - .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_SUPPORTED).build(); + .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_SUPPORTED) + .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_SUPPORTED).build(); } else { config = config.toBuilder() - .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_REQUIRED) - .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_REQUIRED).build(); + .option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, RequestChecksumCalculation.WHEN_REQUIRED) + .option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION, ResponseChecksumValidation.WHEN_REQUIRED).build(); } } ServiceConfiguration finalServiceConfig = serviceConfigBuilder.build(); clientContextParams.put(JsonClientContextParams.USE_ARN_REGION, finalServiceConfig.useArnRegionEnabled()); clientContextParams.put(JsonClientContextParams.DISABLE_MULTI_REGION_ACCESS_POINTS, - !finalServiceConfig.multiRegionEnabled()); + !finalServiceConfig.multiRegionEnabled()); clientContextParams.put(JsonClientContextParams.FORCE_PATH_STYLE, finalServiceConfig.pathStyleAccessEnabled()); clientContextParams.put(JsonClientContextParams.ACCELERATE, finalServiceConfig.accelerateModeEnabled()); SdkClientConfiguration.Builder builder = config.toBuilder(); @@ -182,21 +183,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon } builder.option(SdkClientOption.SERVICE_CONFIGURATION, finalServiceConfig); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); SdkClientConfiguration clientConfig = config; builder.lazyOption(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, c -> resolveRequestChecksumCalculation(clientConfig)); @@ -301,9 +302,9 @@ private RequestChecksumCalculation resolveRequestChecksumCalculation(SdkClientCo RequestChecksumCalculation configuredChecksumCalculation = config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION); if (configuredChecksumCalculation == null) { configuredChecksumCalculation = RequestChecksumCalculationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); } return configuredChecksumCalculation; } @@ -312,19 +313,19 @@ private ResponseChecksumValidation resolveResponseChecksumValidation(SdkClientCo ResponseChecksumValidation configuredChecksumValidation = config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION); if (configuredChecksumValidation == null) { configuredChecksumValidation = ResponseChecksumValidationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); } return configuredChecksumValidation; } protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.SIGNER), - "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); Validate.notNull(c.option(SdkAdvancedClientOption.TOKEN_SIGNER), - "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), - "The 'tokenProvider' must be configured in the client builder."); + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-endpoints-auth-params.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-endpoints-auth-params.java index 52c27dfcc8ac..4a8a346f1c76 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-endpoints-auth-params.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-endpoints-auth-params.java @@ -61,14 +61,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) - .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) + .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner()); + }); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java index 12dec5b9986d..4f1e5bbd8e0d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-client-builder-internal-defaults-class.java @@ -50,9 +50,11 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -70,7 +72,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -86,21 +88,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -164,6 +166,6 @@ private List internalPlugins(SdkClientConfiguration config) { protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.SIGNER), - "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-composed-sync-default-client-builder.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-composed-sync-default-client-builder.java index 52f9d10e821f..778b676c4975 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-composed-sync-default-client-builder.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-composed-sync-default-client-builder.java @@ -59,15 +59,16 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) - .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .option(SdkClientOption.SERVICE_CONFIGURATION, ServiceConfiguration.builder().build()) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) + .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner()); + }); } @Override @@ -77,17 +78,17 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new JsonRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/json/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); interceptors = CollectionUtils.mergeLists(interceptors, config.option(SdkClientOption.EXECUTION_INTERCEPTORS)); ServiceConfiguration.Builder serviceConfigBuilder = ((ServiceConfiguration) config - .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); + .option(SdkClientOption.SERVICE_CONFIGURATION)).toBuilder(); serviceConfigBuilder.profileFile(serviceConfigBuilder.profileFileSupplier() != null ? serviceConfigBuilder - .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); + .profileFileSupplier() : config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)); serviceConfigBuilder.profileName(serviceConfigBuilder.profileName() != null ? serviceConfigBuilder.profileName() : config - .option(SdkClientOption.PROFILE_NAME)); + .option(SdkClientOption.PROFILE_NAME)); ServiceConfiguration finalServiceConfig = serviceConfigBuilder.build(); SdkClientConfiguration.Builder builder = config.toBuilder(); builder.lazyOption(SdkClientOption.IDENTITY_PROVIDERS, c -> { @@ -105,21 +106,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.option(SdkClientOption.SERVICE_CONFIGURATION, finalServiceConfig); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") - .serviceProfileProperty("json_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_JSON_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlJson") + .serviceProfileProperty("json_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); SdkClientConfiguration clientConfig = config; builder.lazyOption(SdkClientOption.REQUEST_CHECKSUM_CALCULATION, c -> resolveRequestChecksumCalculation(clientConfig)); @@ -220,9 +221,9 @@ private RequestChecksumCalculation resolveRequestChecksumCalculation(SdkClientCo RequestChecksumCalculation configuredChecksumCalculation = config.option(SdkClientOption.REQUEST_CHECKSUM_CALCULATION); if (configuredChecksumCalculation == null) { configuredChecksumCalculation = RequestChecksumCalculationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumCalculation(RequestChecksumCalculation.WHEN_SUPPORTED).resolve(); } return configuredChecksumCalculation; } @@ -231,19 +232,19 @@ private ResponseChecksumValidation resolveResponseChecksumValidation(SdkClientCo ResponseChecksumValidation configuredChecksumValidation = config.option(SdkClientOption.RESPONSE_CHECKSUM_VALIDATION); if (configuredChecksumValidation == null) { configuredChecksumValidation = ResponseChecksumValidationResolver.create() - .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(config.option(SdkClientOption.PROFILE_NAME)) - .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); + .profileFile(config.option(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(config.option(SdkClientOption.PROFILE_NAME)) + .defaultChecksumValidation(ResponseChecksumValidation.WHEN_SUPPORTED).resolve(); } return configuredChecksumValidation; } protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.SIGNER), - "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); Validate.notNull(c.option(SdkAdvancedClientOption.TOKEN_SIGNER), - "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), - "The 'tokenProvider' must be configured in the client builder."); + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-service-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-service-client-builder-class.java index ddd2b3427e71..eb724ba82245 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-service-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-service-client-builder-class.java @@ -54,9 +54,11 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -66,7 +68,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new H2RequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/h2/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/h2/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -82,21 +84,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_H2_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlH2") - .serviceProfileProperty("h2_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_H2_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlH2") + .serviceProfileProperty("h2_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -118,7 +120,7 @@ private H2EndpointProvider defaultEndpointProvider() { protected final AttributeMap serviceHttpConfig() { AttributeMap result = AttributeMap.empty(); return result.merge(AttributeMap.builder().put(SdkHttpConfigurationOption.PROTOCOL, Protocol.HTTP2) - .put(SdkHttpConfigurationOption.PROTOCOL_NEGOTIATION, ProtocolNegotiation.ALPN).build()); + .put(SdkHttpConfigurationOption.PROTOCOL_NEGOTIATION, ProtocolNegotiation.ALPN).build()); } @Override @@ -167,6 +169,6 @@ private List internalPlugins(SdkClientConfiguration config) { protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.SIGNER), - "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-usePriorKnowledgeForH2-service-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-usePriorKnowledgeForH2-service-client-builder-class.java index a296652d9b41..a9dcec96847e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-usePriorKnowledgeForH2-service-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-h2-usePriorKnowledgeForH2-service-client-builder-class.java @@ -53,9 +53,11 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -65,7 +67,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new H2RequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/h2/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/h2/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -81,21 +83,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_H2_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlH2") - .serviceProfileProperty("h2_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_H2_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlH2") + .serviceProfileProperty("h2_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -165,6 +167,6 @@ private List internalPlugins(SdkClientConfiguration config) { protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.SIGNER), - "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-ops-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-ops-client-builder-class.java index 4c7699ac4c6d..5ec8c0facf9a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-ops-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-ops-client-builder-class.java @@ -6,7 +6,10 @@ import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.auth.credentials.TokenUtils; import software.amazon.awssdk.auth.signer.Aws4Signer; +import software.amazon.awssdk.auth.token.credentials.aws.DefaultAwsTokenProvider; +import software.amazon.awssdk.auth.token.signer.aws.BearerTokenSigner; import software.amazon.awssdk.awscore.client.builder.AwsDefaultClientBuilder; import software.amazon.awssdk.awscore.client.config.AwsClientOption; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; @@ -22,6 +25,7 @@ import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.identity.spi.IdentityProvider; import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.identity.spi.TokenIdentity; import software.amazon.awssdk.protocols.json.internal.unmarshall.SdkClientJsonProtocolAdvancedOption; import software.amazon.awssdk.regions.ServiceMetadataAdvancedOption; import software.amazon.awssdk.retries.api.RetryStrategy; @@ -38,7 +42,7 @@ @Generated("software.amazon.awssdk:codegen") @SdkInternalApi abstract class DefaultDatabaseBaseClientBuilder, C> extends - AwsDefaultClientBuilder { + AwsDefaultClientBuilder { @Override protected final String serviceEndpointPrefix() { return "database-service-endpoint"; @@ -51,9 +55,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) + .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner()); + }); } @Override @@ -63,7 +73,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new DatabaseRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -71,6 +81,10 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon SdkClientConfiguration.Builder builder = config.toBuilder(); builder.lazyOption(SdkClientOption.IDENTITY_PROVIDERS, c -> { IdentityProviders.Builder result = IdentityProviders.builder(); + IdentityProvider tokenIdentityProvider = c.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER); + if (tokenIdentityProvider != null) { + result.putIdentityProvider(tokenIdentityProvider); + } IdentityProvider credentialsIdentityProvider = c.get(AwsClientOption.CREDENTIALS_IDENTITY_PROVIDER); if (credentialsIdentityProvider != null) { result.putIdentityProvider(credentialsIdentityProvider); @@ -79,21 +93,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") - .serviceProfileProperty("database_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") + .serviceProfileProperty("database_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -111,6 +125,14 @@ private DatabaseEndpointProvider defaultEndpointProvider() { return DatabaseEndpointProvider.defaultProvider(); } + private IdentityProvider defaultTokenProvider() { + return DefaultAwsTokenProvider.create(); + } + + private Signer defaultTokenSigner() { + return BearerTokenSigner.create(); + } + @Override protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List internalPlugins = internalPlugins(config); @@ -121,7 +143,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List plugins = CollectionUtils.mergeLists(internalPlugins, externalPlugins); SdkClientConfiguration.Builder configuration = config.toBuilder(); DatabaseServiceClientConfigurationBuilder serviceConfigBuilder = new DatabaseServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } @@ -158,6 +180,10 @@ private List internalPlugins(SdkClientConfiguration config) { protected static void validateClientOptions(SdkClientConfiguration c) { Validate.notNull(c.option(SdkAdvancedClientOption.SIGNER), - "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + "The 'overrideConfiguration.advancedOption[SIGNER]' must be configured in the client builder."); + Validate.notNull(c.option(SdkAdvancedClientOption.TOKEN_SIGNER), + "The 'overrideConfiguration.advancedOption[TOKEN_SIGNER]' must be configured in the client builder."); + Validate.notNull(c.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER), + "The 'tokenProvider' must be configured in the client builder."); } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-service-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-service-client-builder-class.java index a5aedee94c63..6a8e2290d918 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-service-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-no-auth-service-client-builder-class.java @@ -33,7 +33,7 @@ @Generated("software.amazon.awssdk:codegen") @SdkInternalApi abstract class DefaultDatabaseBaseClientBuilder, C> extends - AwsDefaultClientBuilder { + AwsDefaultClientBuilder { @Override protected final String serviceEndpointPrefix() { return "database-service-endpoint"; @@ -46,8 +46,10 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()).option( - SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false)); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()).option( + SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false); + }); } @Override @@ -57,7 +59,7 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon endpointInterceptors.add(new DatabaseRequestSetEndpointInterceptor()); ClasspathInterceptorChainFactory interceptorFactory = new ClasspathInterceptorChainFactory(); List interceptors = interceptorFactory - .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); + .getInterceptors("software/amazon/awssdk/services/database/execution.interceptors"); List additionalInterceptors = new ArrayList<>(); interceptors = CollectionUtils.mergeLists(endpointInterceptors, interceptors); interceptors = CollectionUtils.mergeLists(interceptors, additionalInterceptors); @@ -69,21 +71,21 @@ protected final SdkClientConfiguration finalizeServiceConfiguration(SdkClientCon }); builder.option(SdkClientOption.EXECUTION_INTERCEPTORS, interceptors); builder.lazyOptionIfAbsent( - SdkClientOption.CLIENT_ENDPOINT_PROVIDER, - c -> AwsClientEndpointProvider - .builder() - .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") - .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") - .serviceProfileProperty("database_service") - .serviceEndpointPrefix(serviceEndpointPrefix()) - .defaultProtocol("https") - .region(c.get(AwsClientOption.AWS_REGION)) - .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) - .profileName(c.get(SdkClientOption.PROFILE_NAME)) - .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, - c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) - .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) - .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); + SdkClientOption.CLIENT_ENDPOINT_PROVIDER, + c -> AwsClientEndpointProvider + .builder() + .serviceEndpointOverrideEnvironmentVariable("AWS_ENDPOINT_URL_DATABASE_SERVICE") + .serviceEndpointOverrideSystemProperty("aws.endpointUrlDatabase") + .serviceProfileProperty("database_service") + .serviceEndpointPrefix(serviceEndpointPrefix()) + .defaultProtocol("https") + .region(c.get(AwsClientOption.AWS_REGION)) + .profileFile(c.get(SdkClientOption.PROFILE_FILE_SUPPLIER)) + .profileName(c.get(SdkClientOption.PROFILE_NAME)) + .putAdvancedOption(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT, + c.get(ServiceMetadataAdvancedOption.DEFAULT_S3_US_EAST_1_REGIONAL_ENDPOINT)) + .dualstackEnabled(c.get(AwsClientOption.DUALSTACK_ENDPOINT_ENABLED)) + .fipsEnabled(c.get(AwsClientOption.FIPS_ENDPOINT_ENABLED)).build()); builder.option(SdkClientJsonProtocolAdvancedOption.ENABLE_FAST_UNMARSHALLER, true); return builder.build(); } @@ -107,7 +109,7 @@ protected SdkClientConfiguration invokePlugins(SdkClientConfiguration config) { List plugins = CollectionUtils.mergeLists(internalPlugins, externalPlugins); SdkClientConfiguration.Builder configuration = config.toBuilder(); DatabaseServiceClientConfigurationBuilder serviceConfigBuilder = new DatabaseServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java index df04abcb8bef..2ef919b3671f 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/builder/test-query-client-builder-class.java @@ -60,14 +60,15 @@ protected final String serviceName() { @Override protected final SdkClientConfiguration mergeServiceDefaults(SdkClientConfiguration config) { - return config.merge(c -> c - .option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) - .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) - .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) - .lazyOption(AwsClientOption.TOKEN_PROVIDER, - p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) - .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) - .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner())); + return config.merge(c -> { + c.option(SdkClientOption.ENDPOINT_PROVIDER, defaultEndpointProvider()) + .option(SdkAdvancedClientOption.SIGNER, defaultSigner()) + .option(SdkClientOption.CRC32_FROM_COMPRESSED_DATA_ENABLED, false) + .lazyOption(AwsClientOption.TOKEN_PROVIDER, + p -> TokenUtils.toSdkTokenProvider(p.get(AwsClientOption.TOKEN_IDENTITY_PROVIDER))) + .option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, defaultTokenProvider()) + .option(SdkAdvancedClientOption.TOKEN_SIGNER, defaultTokenSigner()); + }); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json-bearer-auth/customization-env-bearer-token.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json-bearer-auth/customization-env-bearer-token.config new file mode 100644 index 000000000000..2edb12c857bc --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/json-bearer-auth/customization-env-bearer-token.config @@ -0,0 +1,3 @@ +{ + "enableEnvironmentBearerToken": true +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/customization-uri-cache.config b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/customization-uri-cache.config new file mode 100644 index 000000000000..75393cba13cf --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/customization-uri-cache.config @@ -0,0 +1,37 @@ +{ + "authPolicyActions" : { + "skip" : true + }, + "skipEndpointTests": { + "test case 4": "Does not work" + }, + "endpointParameters": { + "CustomEndpointArray": { + "required": false, + "documentation": "Parameter from the customization config", + "type": "StringArray" + }, + "ArnList": { + "required": false, + "documentation": "Parameter from the customization config", + "type": "StringArray" + } + }, + "customOperationContextParams": [ + { + "operationName": "OperationWithCustomizedOperationContextParam", + "operationContextParamsMap": { + "customEndpointArray": { + "path": "ListMember.StringList[*].LeafString" + } + } + } + ], + "preClientExecutionRequestCustomizer": { + "OperationWithCustomMember": { + "methodName": "dummyRequestModifier", + "className": "software.amazon.awssdk.codegen.internal.UtilsTest" + } + }, + "enableEndpointProviderUriCaching": true +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/endpoint-rule-set-unknown-properties.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/endpoint-rule-set-unknown-properties.json new file mode 100644 index 000000000000..ddc397230298 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/query/endpoint-rule-set-unknown-properties.json @@ -0,0 +1,46 @@ +{ + "version": "1.0", + "parameters": { + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": { + "unknownProperty": "value" + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Endpoint", + "type": "error" + } + ] +} \ No newline at end of file diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rpcv2/service-2.json b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rpcv2/service-2.json index 7120326e77ec..d9c2b68b4b6e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rpcv2/service-2.json +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/c2j/rpcv2/service-2.json @@ -4,8 +4,7 @@ "apiVersion":"2023-03-10", "auth":["aws.auth#sigv4"], "endpointPrefix":"smithyrpcv2protocol", - "protocol":"smithy-rpc-v2-cbor", - "protocols":["smithy-rpc-v2-cbor"], + "protocols":["smithy-rpc-v2-cbor", "json", "query"], "serviceFullName":"RpcV2 Protocol Service", "serviceId":"SmithyRpcV2Protocol", "signatureVersion":"v4", diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/specs/test-service-version-info-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/specs/test-service-version-info-class.java new file mode 100644 index 000000000000..ab582d908e73 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/specs/test-service-version-info-class.java @@ -0,0 +1,17 @@ +package software.amazon.awssdk.services.json.internal; + +import java.lang.String; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +public final class ServiceVersionInfo { + /** + * Returns the current version for the AWS SDK in which this class is running. + */ + public static final String VERSION = "{{VERSION}}"; + + private ServiceVersionInfo() { + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-aws-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-aws-json-async-client-class.java index 7298068aa6ad..62fc9a6420b1 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-aws-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-aws-json-async-client-class.java @@ -58,6 +58,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.json.internal.JsonServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.json.internal.ServiceVersionInfo; import software.amazon.awssdk.services.json.model.APostOperationRequest; import software.amazon.awssdk.services.json.model.APostOperationResponse; import software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest; @@ -140,7 +141,8 @@ final class DefaultJsonAsyncClient implements JsonAsyncClient { protected DefaultJsonAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Json_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); this.executor = clientConfiguration.option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR); } @@ -189,9 +191,23 @@ public CompletableFuture aPostOperation(APostOperationRe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", "aPostOperationRequest"); @@ -257,9 +273,23 @@ public CompletableFuture aPostOperationWithOut HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationWithOutputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -338,8 +368,23 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata, eventstreamExceptionMetadataMapper); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); @@ -420,9 +465,23 @@ public CompletableFuture eventStreamO HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); @@ -510,8 +569,23 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata, eventstreamExceptionMetadataMapper); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer . builder() @@ -587,9 +661,23 @@ public CompletableFuture getWithoutRequiredMe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, GetWithoutRequiredMembersResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -647,9 +735,23 @@ public CompletableFuture operationWithChe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithChecksumRequiredResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -710,9 +812,23 @@ public CompletableFuture operationWithNoneAut HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithNoneAuthTypeResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -770,9 +886,23 @@ public CompletableFuture operationWithR HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithRequestCompressionResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -835,9 +965,23 @@ public CompletableFuture paginatedOpera HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, PaginatedOperationWithResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -895,9 +1039,23 @@ public CompletableFuture paginatedOp HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -959,9 +1117,23 @@ public CompletableFuture streamingInputOperatio HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -1037,9 +1209,23 @@ public CompletableFuture streamingInputOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1053,8 +1239,8 @@ public CompletableFuture streamingInputOutputOperation( .asyncRequestBody(requestBody).transferEncoding(true).build()) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); + .withAsyncRequestBody(requestBody).withAsyncResponseTransformer(asyncResponseTransformer) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1123,9 +1309,23 @@ public CompletableFuture streamingOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1133,7 +1333,8 @@ public CompletableFuture streamingOutputOperation( .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1166,17 +1367,8 @@ public final String serviceName() { } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.AWS_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInputException") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .registerModeledException( - ExceptionMetadata.builder().errorCode("ServiceFaultException") - .exceptionBuilderSupplier(ServiceFaultException::builder).httpStatusCode(500).build()); + return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) + .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, @@ -1231,11 +1423,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-cbor-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-cbor-async-client-class.java index 8953d6a681c1..55e919112380 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-cbor-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-cbor-async-client-class.java @@ -59,6 +59,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.json.internal.JsonServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.json.internal.ServiceVersionInfo; import software.amazon.awssdk.services.json.model.APostOperationRequest; import software.amazon.awssdk.services.json.model.APostOperationResponse; import software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest; @@ -143,7 +144,9 @@ final class DefaultJsonAsyncClient implements JsonAsyncClient { protected DefaultJsonAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, + "Json_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsCborProtocolFactory.builder()).build(); this.jsonProtocolFactory = init(AwsJsonProtocolFactory.builder()).build(); this.executor = clientConfiguration.option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR); @@ -193,9 +196,23 @@ public CompletableFuture aPostOperation(APostOperationRe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", "aPostOperationRequest"); @@ -261,9 +278,23 @@ public CompletableFuture aPostOperationWithOut HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationWithOutputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -342,8 +373,23 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata, eventstreamExceptionMetadataMapper); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); @@ -424,9 +470,23 @@ public CompletableFuture eventStreamO HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); @@ -514,8 +574,23 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata, eventstreamExceptionMetadataMapper); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer . builder() @@ -591,9 +666,23 @@ public CompletableFuture getWithoutRequiredMe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, GetWithoutRequiredMembersResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -651,9 +740,23 @@ public CompletableFuture operationWithChe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithChecksumRequiredResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -714,9 +817,23 @@ public CompletableFuture operationWithNoneAut HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithNoneAuthTypeResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -774,9 +891,23 @@ public CompletableFuture operationWithR HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithRequestCompressionResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -839,9 +970,23 @@ public CompletableFuture paginatedOpera HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, PaginatedOperationWithResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -899,9 +1044,23 @@ public CompletableFuture paginatedOp HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -963,9 +1122,23 @@ public CompletableFuture streamingInputOperatio HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -1041,9 +1214,23 @@ public CompletableFuture streamingInputOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1057,8 +1244,8 @@ public CompletableFuture streamingInputOutputOperation( .asyncRequestBody(requestBody).transferEncoding(true).build()) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); + .withAsyncRequestBody(requestBody).withAsyncResponseTransformer(asyncResponseTransformer) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1127,9 +1314,23 @@ public CompletableFuture streamingOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1137,7 +1338,8 @@ public CompletableFuture streamingOutputOperation( .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1170,17 +1372,8 @@ public final String serviceName() { } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.AWS_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInputException") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .registerModeledException( - ExceptionMetadata.builder().errorCode("ServiceFaultException") - .exceptionBuilderSupplier(ServiceFaultException::builder).httpStatusCode(500).build()); + return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) + .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, @@ -1235,11 +1428,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-async-client-class.java index 4a99f346bc6c..6af0c4bce214 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-async-client-class.java @@ -64,6 +64,7 @@ import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.json.batchmanager.JsonAsyncBatchManager; import software.amazon.awssdk.services.json.internal.JsonServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.json.internal.ServiceVersionInfo; import software.amazon.awssdk.services.json.model.APostOperationRequest; import software.amazon.awssdk.services.json.model.APostOperationResponse; import software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest; @@ -153,7 +154,9 @@ final class DefaultJsonAsyncClient implements JsonAsyncClient { protected DefaultJsonAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, + "Json_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); this.executor = clientConfiguration.option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR); this.executorService = clientConfiguration.option(SdkClientOption.SCHEDULED_EXECUTOR_SERVICE); @@ -203,9 +206,20 @@ public CompletableFuture aPostOperation(APostOperationRe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", "aPostOperationRequest"); @@ -270,9 +284,20 @@ public CompletableFuture aPostOperationWithOut HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationWithOutputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -329,9 +354,20 @@ public CompletableFuture bearerAuthOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, BearerAuthOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -416,8 +452,20 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata, eventstreamExceptionMetadataMapper); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); @@ -501,9 +549,20 @@ public CompletableFuture eventStreamO HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); @@ -596,8 +655,20 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata, eventstreamExceptionMetadataMapper); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer . builder() @@ -675,9 +746,20 @@ public CompletableFuture getOperationWithCheck HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, GetOperationWithChecksumResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -746,9 +828,20 @@ public CompletableFuture getWithoutRequiredMe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, GetWithoutRequiredMembersResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -806,9 +899,20 @@ public CompletableFuture operationWithChe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithChecksumRequiredResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -870,9 +974,20 @@ public CompletableFuture operationWithR HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithRequestCompressionResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -935,9 +1050,20 @@ public CompletableFuture paginatedOpera HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, PaginatedOperationWithResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -995,9 +1121,20 @@ public CompletableFuture paginatedOp HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -1077,9 +1214,20 @@ public CompletableFuture putOperationWithChecksum( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, PutOperationWithChecksumResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1104,7 +1252,8 @@ public CompletableFuture putOperationWithChecksum( .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, DefaultChecksumAlgorithm.SHA256).build()) - .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(putOperationWithChecksumRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1168,9 +1317,20 @@ public CompletableFuture streamingInputOperatio HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -1246,9 +1406,20 @@ public CompletableFuture streamingInputOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1262,8 +1433,8 @@ public CompletableFuture streamingInputOutputOperation( .asyncRequestBody(requestBody).transferEncoding(true).build()) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); + .withAsyncRequestBody(requestBody).withAsyncResponseTransformer(asyncResponseTransformer) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1332,9 +1503,20 @@ public CompletableFuture streamingOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1342,7 +1524,8 @@ public CompletableFuture streamingOutputOperation( .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1380,14 +1563,8 @@ public final String serviceName() { } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.REST_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); + return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) + .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, @@ -1442,11 +1619,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-client-class.java index 2ac6c6ae6d23..2af4fa91155d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-json-client-class.java @@ -2,7 +2,9 @@ import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.function.Consumer; +import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler; @@ -41,6 +43,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.json.internal.JsonServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.json.internal.ServiceVersionInfo; import software.amazon.awssdk.services.json.model.APostOperationRequest; import software.amazon.awssdk.services.json.model.APostOperationResponse; import software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest; @@ -96,7 +99,7 @@ final class DefaultJsonClient implements JsonClient { private static final Logger log = Logger.loggerFor(DefaultJsonClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); + .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); private final SyncClientHandler clientHandler; @@ -106,7 +109,8 @@ final class DefaultJsonClient implements JsonClient { protected DefaultJsonClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Json_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); } @@ -132,34 +136,45 @@ protected DefaultJsonClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, JsonException { + AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - APostOperationResponse::builder); - + APostOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -187,33 +202,44 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); - + operationMetadata, APostOperationWithOutputResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -237,31 +263,42 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) - throws AwsServiceException, SdkClientException, JsonException { + throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, BearerAuthOperationResponse::builder); - + operationMetadata, BearerAuthOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -285,41 +322,52 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques */ @Override public GetOperationWithChecksumResponse getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, - JsonException { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, + JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetOperationWithChecksumResponse::builder); - + operationMetadata, GetOperationWithChecksumResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(getOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(getOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -347,33 +395,44 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( */ @Override public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); - + operationMetadata, GetWithoutRequiredMembersResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getWithoutRequiredMembersRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(getWithoutRequiredMembersRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(getWithoutRequiredMembersRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -397,38 +456,49 @@ public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, JsonException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithChecksumRequiredResponse::builder); - + operationMetadata, OperationWithChecksumRequiredResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -452,38 +522,49 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, JsonException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithRequestCompressionResponse::builder); - + operationMetadata, OperationWithRequestCompressionResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -507,33 +588,44 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); - + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithResultKeyRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -557,33 +649,44 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( */ @Override public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); - + operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithoutResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithoutResultKeyRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithoutResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -633,50 +736,62 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul */ @Override public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, - RequestBody requestBody, ResponseTransformer responseTransformer) - throws AwsServiceException, SdkClientException, JsonException { + RequestBody requestBody, ResponseTransformer responseTransformer) + throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PutOperationWithChecksumResponse::builder); - + operationMetadata, PutOperationWithChecksumResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(putOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build()), responseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(putOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build()), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -690,11 +805,11 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques * The content to send to the service. A {@link RequestBody} can be created using one of several factory * methods for various sources of data. For example, to create a request body from a file you can do the * following. - * + * *
          * {@code RequestBody.fromFile(new File("myfile.txt"))}
          * 
    - * + * * See documentation in {@link RequestBody} for additional details and which sources of data are supported. * The service documentation for the request content is as follows 'This be a stream' * @return Result of the StreamingInputOperation operation returned by the service. @@ -711,39 +826,50 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); - + operationMetadata, StreamingInputOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -757,11 +883,11 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe * The content to send to the service. A {@link RequestBody} can be created using one of several factory * methods for various sources of data. For example, to create a request body from a file you can do the * following. - * + * *
          * {@code RequestBody.fromFile(new File("myfile.txt"))}
          * 
    - * + * * See documentation in {@link RequestBody} for additional details and which sources of data are supported. * The service documentation for the request content is as follows 'This be a stream' * @param responseTransformer @@ -785,43 +911,55 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); - + operationMetadata, StreamingInputOutputOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -852,33 +990,44 @@ public ReturnT streamingInputOutputOperation( */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); - + operationMetadata, StreamingOutputOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -898,7 +1047,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -913,8 +1062,8 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } private void updateRetryStrategyClientConfiguration(SdkClientConfiguration.Builder configuration) { @@ -955,14 +1104,8 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.REST_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); + return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) + .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-async-client-class.java index 5c97f4ae619c..888fc0332acc 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-async-client-class.java @@ -45,6 +45,7 @@ import software.amazon.awssdk.protocols.query.AwsQueryProtocolFactory; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.query.internal.QueryServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.query.internal.ServiceVersionInfo; import software.amazon.awssdk.services.query.model.APostOperationRequest; import software.amazon.awssdk.services.query.model.APostOperationResponse; import software.amazon.awssdk.services.query.model.APostOperationWithOutputRequest; @@ -110,7 +111,7 @@ final class DefaultQueryAsyncClient implements QueryAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultQueryAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.QUERY).build(); + .serviceProtocol(AwsServiceProtocol.QUERY).build(); private final AsyncClientHandler clientHandler; @@ -122,7 +123,8 @@ final class DefaultQueryAsyncClient implements QueryAsyncClient { protected DefaultQueryAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Query_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(); this.executorService = clientConfiguration.option(SdkClientOption.SCHEDULED_EXECUTOR_SERVICE); } @@ -155,27 +157,27 @@ protected DefaultQueryAsyncClient(SdkClientConfiguration clientConfiguration) { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationResponse::builder); + .createResponseHandler(APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); String hostPrefix = "foo-"; String resolvedHostExpression = "foo-"; CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -213,29 +215,29 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationWithOutputResponse::builder); + .createResponseHandler(APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -269,29 +271,29 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture bearerAuthOperation( - BearerAuthOperationRequest bearerAuthOperationRequest) { + BearerAuthOperationRequest bearerAuthOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(BearerAuthOperationResponse::builder); + .createResponseHandler(BearerAuthOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -325,37 +327,37 @@ public CompletableFuture bearerAuthOperation( */ @Override public CompletableFuture getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(GetOperationWithChecksumResponse::builder); + .createResponseHandler(GetOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withInput(getOperationWithChecksumRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withInput(getOperationWithChecksumRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -390,33 +392,33 @@ public CompletableFuture getOperationWithCheck */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithChecksumRequiredResponse::builder); + .createResponseHandler(OperationWithChecksumRequiredResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -450,29 +452,29 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithContextParam( - OperationWithContextParamRequest operationWithContextParamRequest) { + OperationWithContextParamRequest operationWithContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithContextParamRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithContextParamResponse::builder); + .createResponseHandler(OperationWithContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -506,30 +508,30 @@ public CompletableFuture operationWithContext */ @Override public CompletableFuture operationWithCustomMember( - OperationWithCustomMemberRequest operationWithCustomMemberRequest) { + OperationWithCustomMemberRequest operationWithCustomMemberRequest) { operationWithCustomMemberRequest = UtilsTest.dummyRequestModifier(operationWithCustomMemberRequest); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithCustomMemberRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithCustomMemberRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomMember"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomMemberResponse::builder); + .createResponseHandler(OperationWithCustomMemberResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithCustomMemberRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithCustomMemberRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -565,30 +567,30 @@ public CompletableFuture operationWithCustomM */ @Override public CompletableFuture operationWithCustomizedOperationContextParam( - OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) { + OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration( - operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); + operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomizedOperationContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); + .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomizedOperationContextParam") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithCustomizedOperationContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomizedOperationContextParam") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithCustomizedOperationContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -624,29 +626,29 @@ public CompletableFuture o */ @Override public CompletableFuture operationWithMapOperationContextParam( - OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) { + OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithMapOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithMapOperationContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); + .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithMapOperationContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithMapOperationContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -680,29 +682,29 @@ public CompletableFuture operatio */ @Override public CompletableFuture operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); + .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithNoneAuthTypeRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithNoneAuthTypeRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -738,29 +740,29 @@ public CompletableFuture operationWithNoneAut */ @Override public CompletableFuture operationWithOperationContextParam( - OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) { + OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithOperationContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithOperationContextParamResponse::builder); + .createResponseHandler(OperationWithOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithOperationContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithOperationContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -795,34 +797,34 @@ public CompletableFuture operationWi */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithRequestCompressionResponse::builder); + .createResponseHandler(OperationWithRequestCompressionResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -857,29 +859,29 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture operationWithStaticContextParams( - OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) { + OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithStaticContextParamsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); + operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithStaticContextParams"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithStaticContextParamsResponse::builder); + .createResponseHandler(OperationWithStaticContextParamsResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithStaticContextParamsRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithStaticContextParamsRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -931,56 +933,57 @@ public CompletableFuture operationWith */ @Override public CompletableFuture putOperationWithChecksum( - PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(PutOperationWithChecksumResponse::builder); + .createResponseHandler(PutOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()).withAsyncRequestBody(requestBody) - .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withAsyncResponseTransformer(asyncResponseTransformer).withAsyncRequestBody(requestBody) + .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -990,7 +993,7 @@ public CompletableFuture putOperationWithChecksum( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1023,33 +1026,33 @@ public CompletableFuture putOperationWithChecksum( */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingInputOperationResponse::builder); + .createResponseHandler(StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1088,40 +1091,41 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingOutputOperationResponse::builder); + .createResponseHandler(StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1131,7 +1135,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1154,15 +1158,15 @@ public final String serviceName() { private AwsQueryProtocolFactory init() { return AwsQueryProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-client-class.java index 5961432e145d..c9fbcb8a1304 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-query-client-class.java @@ -39,6 +39,7 @@ import software.amazon.awssdk.protocols.query.AwsQueryProtocolFactory; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.query.internal.QueryServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.query.internal.ServiceVersionInfo; import software.amazon.awssdk.services.query.model.APostOperationRequest; import software.amazon.awssdk.services.query.model.APostOperationResponse; import software.amazon.awssdk.services.query.model.APostOperationWithOutputRequest; @@ -103,7 +104,7 @@ final class DefaultQueryClient implements QueryClient { private static final Logger log = Logger.loggerFor(DefaultQueryClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.QUERY).build(); + .serviceProtocol(AwsServiceProtocol.QUERY).build(); private final SyncClientHandler clientHandler; @@ -113,7 +114,8 @@ final class DefaultQueryClient implements QueryClient { protected DefaultQueryClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Query_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(); } @@ -139,17 +141,17 @@ protected DefaultQueryClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, QueryException { + AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationResponse::builder); + .createResponseHandler(APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); @@ -157,11 +159,11 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio String resolvedHostExpression = "foo-"; return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -189,30 +191,30 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, QueryException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationWithOutputResponse::builder); + .createResponseHandler(APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -236,28 +238,28 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) - throws AwsServiceException, SdkClientException, QueryException { + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(BearerAuthOperationResponse::builder); + .createResponseHandler(BearerAuthOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -281,38 +283,38 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques */ @Override public GetOperationWithChecksumResponse getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, - QueryException { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, + QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(GetOperationWithChecksumResponse::builder); + .createResponseHandler(GetOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(getOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(getOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -336,35 +338,35 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithChecksumRequiredResponse::builder); + .createResponseHandler(OperationWithChecksumRequiredResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -388,30 +390,30 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithContextParamResponse operationWithContextParam( - OperationWithContextParamRequest operationWithContextParamRequest) throws AwsServiceException, SdkClientException, - QueryException { + OperationWithContextParamRequest operationWithContextParamRequest) throws AwsServiceException, SdkClientException, + QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithContextParamResponse::builder); + .createResponseHandler(OperationWithContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithContextParamRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithContextParamRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithContextParamRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -435,31 +437,31 @@ public OperationWithContextParamResponse operationWithContextParam( */ @Override public OperationWithCustomMemberResponse operationWithCustomMember( - OperationWithCustomMemberRequest operationWithCustomMemberRequest) throws AwsServiceException, SdkClientException, - QueryException { + OperationWithCustomMemberRequest operationWithCustomMemberRequest) throws AwsServiceException, SdkClientException, + QueryException { operationWithCustomMemberRequest = UtilsTest.dummyRequestModifier(operationWithCustomMemberRequest); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomMemberResponse::builder); + .createResponseHandler(OperationWithCustomMemberResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithCustomMemberRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithCustomMemberRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomMember"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithCustomMemberRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithCustomMemberRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -484,31 +486,31 @@ public OperationWithCustomMemberResponse operationWithCustomMember( */ @Override public OperationWithCustomizedOperationContextParamResponse operationWithCustomizedOperationContextParam( - OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) - throws AwsServiceException, SdkClientException, QueryException { + OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); + .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration( - operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); + operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomizedOperationContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomizedOperationContextParam") - .withProtocolMetadata(protocolMetadata).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withInput(operationWithCustomizedOperationContextParamRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomizedOperationContextParam") + .withProtocolMetadata(protocolMetadata).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withInput(operationWithCustomizedOperationContextParamRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -533,30 +535,30 @@ public OperationWithCustomizedOperationContextParamResponse operationWithCustomi */ @Override public OperationWithMapOperationContextParamResponse operationWithMapOperationContextParam( - OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) - throws AwsServiceException, SdkClientException, QueryException { + OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); + .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithMapOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithMapOperationContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithMapOperationContextParamRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithMapOperationContextParamRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -580,30 +582,30 @@ public OperationWithMapOperationContextParamResponse operationWithMapOperationCo */ @Override public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, - QueryException { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, + QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); + .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -628,30 +630,30 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( */ @Override public OperationWithOperationContextParamResponse operationWithOperationContextParam( - OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithOperationContextParamResponse::builder); + .createResponseHandler(OperationWithOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithOperationContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithOperationContextParamRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithOperationContextParamRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -675,35 +677,35 @@ public OperationWithOperationContextParamResponse operationWithOperationContextP */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithRequestCompressionResponse::builder); + .createResponseHandler(OperationWithRequestCompressionResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -727,30 +729,30 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public OperationWithStaticContextParamsResponse operationWithStaticContextParams( - OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithStaticContextParamsResponse::builder); + .createResponseHandler(OperationWithStaticContextParamsResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithStaticContextParamsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); + operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithStaticContextParams"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithStaticContextParamsRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithStaticContextParamsRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -800,47 +802,48 @@ public OperationWithStaticContextParamsResponse operationWithStaticContextParams */ @Override public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, - RequestBody requestBody, ResponseTransformer responseTransformer) - throws AwsServiceException, SdkClientException, QueryException { + RequestBody requestBody, ResponseTransformer responseTransformer) + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(PutOperationWithChecksumResponse::builder); + .createResponseHandler(PutOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(putOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(putOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -875,36 +878,36 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, QueryException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingInputOperationResponse::builder); + .createResponseHandler(StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -935,30 +938,30 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, QueryException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingOutputOperationResponse::builder); + .createResponseHandler(StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -983,7 +986,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1036,11 +1039,11 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private AwsQueryProtocolFactory init() { return AwsQueryProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-async-client-class.java index c082dd8f8ab0..cd9f7feed73e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-async-client-class.java @@ -50,6 +50,7 @@ import software.amazon.awssdk.protocols.xml.AwsXmlProtocolFactory; import software.amazon.awssdk.protocols.xml.XmlOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.services.xml.internal.ServiceVersionInfo; import software.amazon.awssdk.services.xml.internal.XmlServiceClientConfigurationBuilder; import software.amazon.awssdk.services.xml.model.APostOperationRequest; import software.amazon.awssdk.services.xml.model.APostOperationResponse; @@ -102,7 +103,7 @@ final class DefaultXmlAsyncClient implements XmlAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultXmlAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_XML).build(); + .serviceProtocol(AwsServiceProtocol.REST_XML).build(); private final AsyncClientHandler clientHandler; @@ -114,7 +115,8 @@ final class DefaultXmlAsyncClient implements XmlAsyncClient { protected DefaultXmlAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Xml_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(); this.executor = clientConfiguration.option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR); } @@ -147,26 +149,26 @@ protected DefaultXmlAsyncClient(SdkClientConfiguration clientConfiguration) { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(APostOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(APostOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); String hostPrefix = "foo-"; String resolvedHostExpression = "foo-"; CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).hostPrefixExpression(resolvedHostExpression) - .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).hostPrefixExpression(resolvedHostExpression) + .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -205,28 +207,28 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -261,28 +263,28 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture bearerAuthOperation( - BearerAuthOperationRequest bearerAuthOperationRequest) { + BearerAuthOperationRequest bearerAuthOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(BearerAuthOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(BearerAuthOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).credentialType(CredentialType.TOKEN) - .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("BearerAuthOperation").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).credentialType(CredentialType.TOKEN) + .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -317,51 +319,51 @@ public CompletableFuture bearerAuthOperation( */ @Override public CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, - EventStreamOperationResponseHandler asyncResponseHandler) { + EventStreamOperationResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, eventStreamOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperation"); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - EventStreamOperationResponse::builder, XmlOperationMetadata.builder().hasStreamingSuccessResponse(true) - .build()); + EventStreamOperationResponse::builder, XmlOperationMetadata.builder().hasStreamingSuccessResponse(true) + .build()); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - EventStreamTaggedUnionPojoSupplier.builder() - .putSdkPojoSupplier("EventPayloadEvent", EventStream::eventPayloadEventBuilder) - .putSdkPojoSupplier("NonEventPayloadEvent", EventStream::nonEventPayloadEventBuilder) - .putSdkPojoSupplier("SecondEventPayloadEvent", EventStream::secondEventPayloadEventBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build(), XmlOperationMetadata - .builder().hasStreamingSuccessResponse(false).build()); + EventStreamTaggedUnionPojoSupplier.builder() + .putSdkPojoSupplier("EventPayloadEvent", EventStream::eventPayloadEventBuilder) + .putSdkPojoSupplier("NonEventPayloadEvent", EventStream::nonEventPayloadEventBuilder) + .putSdkPojoSupplier("SecondEventPayloadEvent", EventStream::secondEventPayloadEventBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build(), XmlOperationMetadata + .builder().hasStreamingSuccessResponse(false).build()); CompletableFuture eventStreamTransformFuture = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder().eventStreamResponseHandler(asyncResponseHandler) - .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) - .exceptionResponseHandler(errorResponseHandler).future(eventStreamTransformFuture).executor(executor) - .serviceName(serviceName()).build(); + . builder().eventStreamResponseHandler(asyncResponseHandler) + .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) + .exceptionResponseHandler(errorResponseHandler).future(eventStreamTransformFuture).executor(executor) + .serviceName(serviceName()).build(); RestEventStreamAsyncResponseTransformer restAsyncResponseTransformer = RestEventStreamAsyncResponseTransformer - . builder() - .eventStreamAsyncResponseTransformer(asyncResponseTransformer) - .eventStreamResponseHandler(asyncResponseHandler).build(); + . builder() + .eventStreamAsyncResponseTransformer(asyncResponseTransformer) + .eventStreamResponseHandler(asyncResponseHandler).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperation").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), - restAsyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperation").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), + restAsyncResponseTransformer); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(e)); + () -> asyncResponseHandler.exceptionOccurred(e)); eventStreamTransformFuture.completeExceptionally(e); } metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -370,7 +372,7 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest return CompletableFutureUtils.forwardExceptionTo(eventStreamTransformFuture, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -398,35 +400,35 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest */ @Override public CompletableFuture getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withInput(getOperationWithChecksumRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withInput(getOperationWithChecksumRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -462,31 +464,31 @@ public CompletableFuture getOperationWithCheck */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -521,28 +523,28 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithNoneAuthTypeRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithNoneAuthTypeRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -578,32 +580,32 @@ public CompletableFuture operationWithNoneAut */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -656,56 +658,57 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture putOperationWithChecksum( - PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()).withAsyncRequestBody(requestBody) - .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withAsyncResponseTransformer(asyncResponseTransformer).withAsyncRequestBody(requestBody) + .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -715,7 +718,7 @@ public CompletableFuture putOperationWithChecksum( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -748,32 +751,32 @@ public CompletableFuture putOperationWithChecksum( */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(StreamingInputOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(StreamingInputOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -813,40 +816,41 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -856,7 +860,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -874,15 +878,15 @@ public final String serviceName() { private AwsXmlProtocolFactory init() { return AwsXmlProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-client-class.java index 457d5a36c2c3..b96f7fc24865 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/sra/test-xml-client-class.java @@ -39,6 +39,7 @@ import software.amazon.awssdk.protocols.xml.AwsXmlProtocolFactory; import software.amazon.awssdk.protocols.xml.XmlOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.services.xml.internal.ServiceVersionInfo; import software.amazon.awssdk.services.xml.internal.XmlServiceClientConfigurationBuilder; import software.amazon.awssdk.services.xml.model.APostOperationRequest; import software.amazon.awssdk.services.xml.model.APostOperationResponse; @@ -85,7 +86,7 @@ final class DefaultXmlClient implements XmlClient { private static final Logger log = Logger.loggerFor(DefaultXmlClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_XML).build(); + .serviceProtocol(AwsServiceProtocol.REST_XML).build(); private final SyncClientHandler clientHandler; @@ -95,7 +96,8 @@ final class DefaultXmlClient implements XmlClient { protected DefaultXmlClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Xml_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(); } @@ -121,15 +123,15 @@ protected DefaultXmlClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, XmlException { + AwsServiceException, SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler( - APostOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + APostOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); @@ -137,10 +139,10 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio String resolvedHostExpression = "foo-"; return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -168,28 +170,28 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, XmlException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -213,27 +215,27 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) - throws AwsServiceException, SdkClientException, XmlException { + throws AwsServiceException, SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(BearerAuthOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(BearerAuthOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withInput(bearerAuthOperationRequest) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withInput(bearerAuthOperationRequest) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -257,36 +259,36 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques */ @Override public GetOperationWithChecksumResponse getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, - XmlException { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, + XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(getOperationWithChecksumRequest) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(getOperationWithChecksumRequest) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -310,33 +312,33 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, XmlException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -360,28 +362,28 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, - XmlException { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, + XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -405,33 +407,33 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, XmlException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -481,47 +483,48 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, - RequestBody requestBody, ResponseTransformer responseTransformer) - throws AwsServiceException, SdkClientException, XmlException { + RequestBody requestBody, ResponseTransformer responseTransformer) + throws AwsServiceException, SdkClientException, XmlException { HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(putOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(putOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -556,34 +559,34 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, XmlException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(StreamingInputOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(StreamingInputOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -614,30 +617,30 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, XmlException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -649,7 +652,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -702,11 +705,11 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private AwsXmlProtocolFactory init() { return AwsXmlProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java index 9384c1c5d766..9622a7daa009 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-json-async-client-class.java @@ -63,6 +63,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.json.internal.JsonServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.json.internal.ServiceVersionInfo; import software.amazon.awssdk.services.json.model.APostOperationRequest; import software.amazon.awssdk.services.json.model.APostOperationResponse; import software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest; @@ -146,7 +147,8 @@ final class DefaultJsonAsyncClient implements JsonAsyncClient { protected DefaultJsonAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Json_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); this.executor = clientConfiguration.option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR); } @@ -195,9 +197,23 @@ public CompletableFuture aPostOperation(APostOperationRe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", "aPostOperationRequest"); @@ -263,9 +279,23 @@ public CompletableFuture aPostOperationWithOut HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationWithOutputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -345,8 +375,23 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata, eventstreamExceptionMetadataMapper); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); @@ -429,9 +474,23 @@ public CompletableFuture eventStreamO HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); @@ -519,8 +578,23 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata, eventstreamExceptionMetadataMapper); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer . builder() @@ -596,9 +670,23 @@ public CompletableFuture getWithoutRequiredMe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, GetWithoutRequiredMembersResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -656,9 +744,23 @@ public CompletableFuture operationWithChe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithChecksumRequiredResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -719,9 +821,23 @@ public CompletableFuture operationWithNoneAut HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithNoneAuthTypeResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -780,9 +896,23 @@ public CompletableFuture operationWithR HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithRequestCompressionResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -845,9 +975,23 @@ public CompletableFuture paginatedOpera HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, PaginatedOperationWithResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -905,9 +1049,23 @@ public CompletableFuture paginatedOp HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -972,9 +1130,23 @@ public CompletableFuture streamingInputOperatio HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -1052,9 +1224,23 @@ public CompletableFuture streamingInputOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1068,8 +1254,8 @@ public CompletableFuture streamingInputOutputOperation( .asyncRequestBody(requestBody).transferEncoding(true).build()) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); + .withAsyncRequestBody(requestBody).withAsyncResponseTransformer(asyncResponseTransformer) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1138,9 +1324,23 @@ public CompletableFuture streamingOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1148,7 +1348,8 @@ public CompletableFuture streamingOutputOperation( .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1181,17 +1382,8 @@ public final String serviceName() { } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.AWS_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInputException") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .registerModeledException( - ExceptionMetadata.builder().errorCode("ServiceFaultException") - .exceptionBuilderSupplier(ServiceFaultException::builder).httpStatusCode(500).build()); + return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) + .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, @@ -1261,11 +1453,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-query-compatible-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-query-compatible-json-async-client-class.java index d06d668892ea..629de25187ad 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-query-compatible-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-query-compatible-json-async-client-class.java @@ -38,6 +38,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.querytojsoncompatible.internal.QueryToJsonCompatibleServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.querytojsoncompatible.internal.ServiceVersionInfo; import software.amazon.awssdk.services.querytojsoncompatible.model.APostOperationRequest; import software.amazon.awssdk.services.querytojsoncompatible.model.APostOperationResponse; import software.amazon.awssdk.services.querytojsoncompatible.model.InvalidInputException; @@ -67,7 +68,8 @@ final class DefaultQueryToJsonCompatibleAsyncClient implements QueryToJsonCompat protected DefaultQueryToJsonCompatibleAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "QueryToJsonCompatibleService" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); } @@ -110,9 +112,20 @@ public CompletableFuture aPostOperation(APostOperationRe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", "aPostOperationRequest"); @@ -147,15 +160,9 @@ public final String serviceName() { } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(QueryToJsonCompatibleException::builder) - .protocol(AwsJsonProtocol.AWS_JSON) - .protocolVersion("1.1") - .hasAwsQueryCompatible(true) - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); + return builder.clientConfiguration(clientConfiguration) + .defaultServiceExceptionSupplier(QueryToJsonCompatibleException::builder).protocol(AwsJsonProtocol.AWS_JSON) + .protocolVersion("1.1").hasAwsQueryCompatible(true); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, @@ -211,11 +218,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-query-compatible-json-sync-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-query-compatible-json-sync-client-class.java index 78f87a74dde3..4397f32d85b5 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-query-compatible-json-sync-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-aws-query-compatible-json-sync-client-class.java @@ -2,7 +2,9 @@ import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.function.Consumer; +import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler; @@ -32,6 +34,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.querytojsoncompatible.internal.QueryToJsonCompatibleServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.querytojsoncompatible.internal.ServiceVersionInfo; import software.amazon.awssdk.services.querytojsoncompatible.model.APostOperationRequest; import software.amazon.awssdk.services.querytojsoncompatible.model.APostOperationResponse; import software.amazon.awssdk.services.querytojsoncompatible.model.InvalidInputException; @@ -51,7 +54,7 @@ final class DefaultQueryToJsonCompatibleClient implements QueryToJsonCompatibleC private static final Logger log = Logger.loggerFor(DefaultQueryToJsonCompatibleClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.AWS_JSON).build(); + .serviceProtocol(AwsServiceProtocol.AWS_JSON).build(); private final SyncClientHandler clientHandler; @@ -61,7 +64,8 @@ final class DefaultQueryToJsonCompatibleClient implements QueryToJsonCompatibleC protected DefaultQueryToJsonCompatibleClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "QueryToJsonCompatibleService" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); } @@ -87,34 +91,45 @@ protected DefaultQueryToJsonCompatibleClient(SdkClientConfiguration clientConfig */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, QueryToJsonCompatibleException { + AwsServiceException, SdkClientException, QueryToJsonCompatibleException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - APostOperationResponse::builder); - + APostOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "QueryToJsonCompatibleService"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -126,7 +141,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -141,8 +156,8 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } private void updateRetryStrategyClientConfiguration(SdkClientConfiguration.Builder configuration) { @@ -175,7 +190,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } QueryToJsonCompatibleServiceClientConfigurationBuilder serviceConfigBuilder = new QueryToJsonCompatibleServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } @@ -184,15 +199,9 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(QueryToJsonCompatibleException::builder) - .protocol(AwsJsonProtocol.AWS_JSON) - .protocolVersion("1.1") - .hasAwsQueryCompatible(true) - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); + return builder.clientConfiguration(clientConfiguration) + .defaultServiceExceptionSupplier(QueryToJsonCompatibleException::builder).protocol(AwsJsonProtocol.AWS_JSON) + .protocolVersion("1.1").hasAwsQueryCompatible(true); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-batchmanager-async.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-batchmanager-async.java index 8081d9331cad..66136524d4cc 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-batchmanager-async.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-batchmanager-async.java @@ -40,6 +40,7 @@ import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.batchmanagertest.batchmanager.BatchManagerTestAsyncBatchManager; import software.amazon.awssdk.services.batchmanagertest.internal.BatchManagerTestServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.batchmanagertest.internal.ServiceVersionInfo; import software.amazon.awssdk.services.batchmanagertest.model.BatchManagerTestException; import software.amazon.awssdk.services.batchmanagertest.model.SendRequestRequest; import software.amazon.awssdk.services.batchmanagertest.model.SendRequestResponse; @@ -69,7 +70,8 @@ final class DefaultBatchManagerTestAsyncClient implements BatchManagerTestAsyncC protected DefaultBatchManagerTestAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "BatchManagerTest" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); this.executorService = clientConfiguration.option(SdkClientOption.SCHEDULED_EXECUTOR_SERVICE); } @@ -109,9 +111,17 @@ public CompletableFuture sendRequest(SendRequestRequest sen HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, SendRequestResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -205,11 +215,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-async-client-class.java index 308abd62a314..7a975ac9082e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-async-client-class.java @@ -64,6 +64,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.json.internal.JsonServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.json.internal.ServiceVersionInfo; import software.amazon.awssdk.services.json.model.APostOperationRequest; import software.amazon.awssdk.services.json.model.APostOperationResponse; import software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest; @@ -149,7 +150,8 @@ final class DefaultJsonAsyncClient implements JsonAsyncClient { protected DefaultJsonAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Json_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsCborProtocolFactory.builder()).build(); this.jsonProtocolFactory = init(AwsJsonProtocolFactory.builder()).build(); this.executor = clientConfiguration.option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR); @@ -199,9 +201,23 @@ public CompletableFuture aPostOperation(APostOperationRe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", "aPostOperationRequest"); @@ -267,9 +283,23 @@ public CompletableFuture aPostOperationWithOut HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationWithOutputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -349,8 +379,23 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata, eventstreamExceptionMetadataMapper); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); @@ -433,9 +478,23 @@ public CompletableFuture eventStreamO HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); @@ -523,8 +582,23 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata, eventstreamExceptionMetadataMapper); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer . builder() @@ -600,9 +674,23 @@ public CompletableFuture getWithoutRequiredMe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, GetWithoutRequiredMembersResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -660,9 +748,23 @@ public CompletableFuture operationWithChe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithChecksumRequiredResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -723,9 +825,23 @@ public CompletableFuture operationWithNoneAut HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithNoneAuthTypeResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -784,9 +900,23 @@ public CompletableFuture operationWithR HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithRequestCompressionResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -849,9 +979,23 @@ public CompletableFuture paginatedOpera HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, PaginatedOperationWithResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -909,9 +1053,23 @@ public CompletableFuture paginatedOp HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -976,9 +1134,23 @@ public CompletableFuture streamingInputOperatio HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -1056,9 +1228,23 @@ public CompletableFuture streamingInputOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1072,8 +1258,8 @@ public CompletableFuture streamingInputOutputOperation( .asyncRequestBody(requestBody).transferEncoding(true).build()) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); + .withAsyncRequestBody(requestBody).withAsyncResponseTransformer(asyncResponseTransformer) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1142,9 +1328,23 @@ public CompletableFuture streamingOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1152,7 +1352,8 @@ public CompletableFuture streamingOutputOperation( .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1185,17 +1386,8 @@ public final String serviceName() { } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.AWS_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInputException") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .registerModeledException( - ExceptionMetadata.builder().errorCode("ServiceFaultException") - .exceptionBuilderSupplier(ServiceFaultException::builder).httpStatusCode(500).build()); + return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) + .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, @@ -1265,11 +1457,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-client-class.java index b6b072aad637..6894514a3d92 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-cbor-client-class.java @@ -2,7 +2,9 @@ import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.function.Consumer; +import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler; @@ -38,6 +40,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.json.internal.JsonServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.json.internal.ServiceVersionInfo; import software.amazon.awssdk.services.json.model.APostOperationRequest; import software.amazon.awssdk.services.json.model.APostOperationResponse; import software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest; @@ -98,7 +101,8 @@ final class DefaultJsonClient implements JsonClient { protected DefaultJsonClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Json_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsCborProtocolFactory.builder()).build(); } @@ -130,9 +134,23 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, APostOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest .overrideConfiguration().orElse(null)); @@ -187,9 +205,23 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationWithOutputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest @@ -241,9 +273,23 @@ public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, GetWithoutRequiredMembersResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getWithoutRequiredMembersRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest @@ -291,9 +337,23 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithChecksumRequiredResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, @@ -346,9 +406,23 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithNoneAuthTypeResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest @@ -396,9 +470,23 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithRequestCompressionResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, @@ -451,9 +539,23 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, PaginatedOperationWithResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithResultKeyRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, @@ -501,9 +603,23 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithoutResultKeyRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, @@ -561,9 +677,23 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest @@ -637,9 +767,23 @@ public ReturnT streamingInputOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOutputOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, @@ -659,6 +803,7 @@ public ReturnT streamingInputOutputOperation( .withRequestConfiguration(clientConfiguration) .withInput(streamingInputOutputOperationRequest) .withMetricCollector(apiCallMetricCollector) + .withResponseTransformer(responseTransformer) .withRequestBody(requestBody) .withMarshaller( StreamingRequestMarshaller @@ -703,9 +848,23 @@ public ReturnT streamingOutputOperation(StreamingOutputOperationReques HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInputException": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInputException").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + case "ServiceFaultException": + return Optional.of(ExceptionMetadata.builder().errorCode("ServiceFaultException").httpStatusCode(500) + .exceptionBuilderSupplier(ServiceFaultException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest @@ -721,7 +880,7 @@ public ReturnT streamingOutputOperation(StreamingOutputOperationReques .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -757,8 +916,8 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } private void updateRetryStrategyClientConfiguration(SdkClientConfiguration.Builder configuration) { @@ -799,17 +958,8 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.AWS_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInputException") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .registerModeledException( - ExceptionMetadata.builder().errorCode("ServiceFaultException") - .exceptionBuilderSupplier(ServiceFaultException::builder).httpStatusCode(500).build()); + return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) + .protocol(AwsJsonProtocol.AWS_JSON).protocolVersion("1.1"); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-custompackage-async.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-custompackage-async.java index d3fa60fddd28..cbd3e0ac3630 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-custompackage-async.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-custompackage-async.java @@ -3,6 +3,7 @@ import static software.amazon.awssdk.utils.FunctionalUtils.runAndLogError; import foo.bar.helloworld.internal.ProtocolRestJsonWithCustomPackageServiceClientConfigurationBuilder; +import foo.bar.helloworld.internal.ServiceVersionInfo; import foo.bar.helloworld.model.OneOperationRequest; import foo.bar.helloworld.model.OneOperationResponse; import foo.bar.helloworld.model.ProtocolRestJsonWithCustomPackageException; @@ -65,7 +66,8 @@ final class DefaultProtocolRestJsonWithCustomPackageAsyncClient implements Proto protected DefaultProtocolRestJsonWithCustomPackageAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "AmazonProtocolRestJsonWithCustomPackage" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); } @@ -104,9 +106,17 @@ public CompletableFuture oneOperation(OneOperationRequest HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, OneOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -196,11 +206,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-custompackage-sync.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-custompackage-sync.java index de640533ca5a..86d2c5372f6b 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-custompackage-sync.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-custompackage-sync.java @@ -1,13 +1,16 @@ package foo.bar.helloworld; import foo.bar.helloworld.internal.ProtocolRestJsonWithCustomPackageServiceClientConfigurationBuilder; +import foo.bar.helloworld.internal.ServiceVersionInfo; import foo.bar.helloworld.model.OneOperationRequest; import foo.bar.helloworld.model.OneOperationResponse; import foo.bar.helloworld.model.ProtocolRestJsonWithCustomPackageException; import foo.bar.helloworld.transform.OneOperationRequestMarshaller; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.function.Consumer; +import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler; @@ -30,6 +33,7 @@ import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.metrics.NoOpMetricCollector; +import software.amazon.awssdk.protocols.core.ExceptionMetadata; import software.amazon.awssdk.protocols.json.AwsJsonProtocol; import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory; import software.amazon.awssdk.protocols.json.BaseAwsJsonProtocolFactory; @@ -48,7 +52,7 @@ final class DefaultProtocolRestJsonWithCustomPackageClient implements ProtocolRe private static final Logger log = Logger.loggerFor(DefaultProtocolRestJsonWithCustomPackageClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); + .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); private final SyncClientHandler clientHandler; @@ -58,7 +62,8 @@ final class DefaultProtocolRestJsonWithCustomPackageClient implements ProtocolRe protected DefaultProtocolRestJsonWithCustomPackageClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "AmazonProtocolRestJsonWithCustomPackage" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); } @@ -80,30 +85,38 @@ protected DefaultProtocolRestJsonWithCustomPackageClient(SdkClientConfiguration */ @Override public OneOperationResponse oneOperation(OneOperationRequest oneOperationRequest) throws AwsServiceException, - SdkClientException, ProtocolRestJsonWithCustomPackageException { + SdkClientException, ProtocolRestJsonWithCustomPackageException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - OneOperationResponse::builder); - + OneOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(oneOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, oneOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "AmazonProtocolRestJsonWithCustomPackage"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OneOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("OneOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(oneOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OneOperationRequestMarshaller(protocolFactory))); + .withOperationName("OneOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(oneOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OneOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -115,7 +128,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -130,8 +143,8 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } private void updateRetryStrategyClientConfiguration(SdkClientConfiguration.Builder configuration) { @@ -164,7 +177,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } ProtocolRestJsonWithCustomPackageServiceClientConfigurationBuilder serviceConfigBuilder = new ProtocolRestJsonWithCustomPackageServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } @@ -174,14 +187,14 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private > T init(T builder) { return builder.clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(ProtocolRestJsonWithCustomPackageException::builder) - .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); + .defaultServiceExceptionSupplier(ProtocolRestJsonWithCustomPackageException::builder) + .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); } @Override public final ProtocolRestJsonWithCustomPackageServiceClientConfiguration serviceClientConfiguration() { return new ProtocolRestJsonWithCustomPackageServiceClientConfigurationBuilder(this.clientConfiguration.toBuilder()) - .build(); + .build(); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-customservicemetadata-async.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-customservicemetadata-async.java index 7804faabf8e8..b92034ef589c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-customservicemetadata-async.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-customservicemetadata-async.java @@ -38,6 +38,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.protocolrestjsonwithcustomcontenttype.internal.ProtocolRestJsonWithCustomContentTypeServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.protocolrestjsonwithcustomcontenttype.internal.ServiceVersionInfo; import software.amazon.awssdk.services.protocolrestjsonwithcustomcontenttype.model.OneOperationRequest; import software.amazon.awssdk.services.protocolrestjsonwithcustomcontenttype.model.OneOperationResponse; import software.amazon.awssdk.services.protocolrestjsonwithcustomcontenttype.model.ProtocolRestJsonWithCustomContentTypeException; @@ -65,7 +66,8 @@ final class DefaultProtocolRestJsonWithCustomContentTypeAsyncClient implements P protected DefaultProtocolRestJsonWithCustomContentTypeAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "AmazonProtocolRestJsonWithCustomContentType" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); } @@ -104,9 +106,17 @@ public CompletableFuture oneOperation(OneOperationRequest HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, OneOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -196,11 +206,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-customservicemetadata-sync.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-customservicemetadata-sync.java index 9f134914f49f..60728a004178 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-customservicemetadata-sync.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-customservicemetadata-sync.java @@ -2,7 +2,9 @@ import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.function.Consumer; +import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler; @@ -25,12 +27,14 @@ import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.metrics.NoOpMetricCollector; +import software.amazon.awssdk.protocols.core.ExceptionMetadata; import software.amazon.awssdk.protocols.json.AwsJsonProtocol; import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory; import software.amazon.awssdk.protocols.json.BaseAwsJsonProtocolFactory; import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.protocolrestjsonwithcustomcontenttype.internal.ProtocolRestJsonWithCustomContentTypeServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.protocolrestjsonwithcustomcontenttype.internal.ServiceVersionInfo; import software.amazon.awssdk.services.protocolrestjsonwithcustomcontenttype.model.OneOperationRequest; import software.amazon.awssdk.services.protocolrestjsonwithcustomcontenttype.model.OneOperationResponse; import software.amazon.awssdk.services.protocolrestjsonwithcustomcontenttype.model.ProtocolRestJsonWithCustomContentTypeException; @@ -48,7 +52,7 @@ final class DefaultProtocolRestJsonWithCustomContentTypeClient implements Protoc private static final Logger log = Logger.loggerFor(DefaultProtocolRestJsonWithCustomContentTypeClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); + .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); private final SyncClientHandler clientHandler; @@ -58,7 +62,11 @@ final class DefaultProtocolRestJsonWithCustomContentTypeClient implements Protoc protected DefaultProtocolRestJsonWithCustomContentTypeClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration + .toBuilder() + .option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, + "AmazonProtocolRestJsonWithCustomContentType" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); } @@ -80,30 +88,38 @@ protected DefaultProtocolRestJsonWithCustomContentTypeClient(SdkClientConfigurat */ @Override public OneOperationResponse oneOperation(OneOperationRequest oneOperationRequest) throws AwsServiceException, - SdkClientException, ProtocolRestJsonWithCustomContentTypeException { + SdkClientException, ProtocolRestJsonWithCustomContentTypeException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - OneOperationResponse::builder); - + OneOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(oneOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, oneOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "AmazonProtocolRestJsonWithCustomContentType"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OneOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("OneOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(oneOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OneOperationRequestMarshaller(protocolFactory))); + .withOperationName("OneOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(oneOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OneOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -115,7 +131,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -130,8 +146,8 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } private void updateRetryStrategyClientConfiguration(SdkClientConfiguration.Builder configuration) { @@ -164,7 +180,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } ProtocolRestJsonWithCustomContentTypeServiceClientConfigurationBuilder serviceConfigBuilder = new ProtocolRestJsonWithCustomContentTypeServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } @@ -174,14 +190,14 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private > T init(T builder) { return builder.clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(ProtocolRestJsonWithCustomContentTypeException::builder) - .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1").contentType("application/json"); + .defaultServiceExceptionSupplier(ProtocolRestJsonWithCustomContentTypeException::builder) + .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1").contentType("application/json"); } @Override public final ProtocolRestJsonWithCustomContentTypeServiceClientConfiguration serviceClientConfiguration() { return new ProtocolRestJsonWithCustomContentTypeServiceClientConfigurationBuilder(this.clientConfiguration.toBuilder()) - .build(); + .build(); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-async.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-async.java index 2857ce0fa9d1..137d34e5f377 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-async.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-async.java @@ -44,6 +44,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.endpointdiscoverytest.internal.EndpointDiscoveryTestServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.endpointdiscoverytest.internal.ServiceVersionInfo; import software.amazon.awssdk.services.endpointdiscoverytest.model.DescribeEndpointsRequest; import software.amazon.awssdk.services.endpointdiscoverytest.model.DescribeEndpointsResponse; import software.amazon.awssdk.services.endpointdiscoverytest.model.EndpointDiscoveryTestException; @@ -82,7 +83,8 @@ final class DefaultEndpointDiscoveryTestAsyncClient implements EndpointDiscovery protected DefaultEndpointDiscoveryTestAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "AwsEndpointDiscoveryTest" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); if (clientConfiguration.option(SdkClientOption.ENDPOINT_DISCOVERY_ENABLED)) { this.endpointDiscoveryCache = EndpointDiscoveryRefreshCache @@ -124,9 +126,17 @@ public CompletableFuture describeEndpoints(DescribeEn HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, DescribeEndpointsResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -182,9 +192,17 @@ public CompletableFuture testDiscovery HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, TestDiscoveryIdentifiersRequiredResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); boolean endpointDiscoveryEnabled = clientConfiguration.option(SdkClientOption.ENDPOINT_DISCOVERY_ENABLED); boolean endpointOverridden = clientConfiguration.option(SdkClientOption.CLIENT_ENDPOINT_PROVIDER) .isEndpointOverridden(); @@ -268,9 +286,17 @@ public CompletableFuture testDiscoveryOptional( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, TestDiscoveryOptionalResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); boolean endpointDiscoveryEnabled = clientConfiguration.option(SdkClientOption.ENDPOINT_DISCOVERY_ENABLED); boolean endpointOverridden = clientConfiguration.option(SdkClientOption.CLIENT_ENDPOINT_PROVIDER) .isEndpointOverridden(); @@ -345,9 +371,17 @@ public CompletableFuture testDiscoveryRequired( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, TestDiscoveryRequiredResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); boolean endpointDiscoveryEnabled = clientConfiguration.option(SdkClientOption.ENDPOINT_DISCOVERY_ENABLED); boolean endpointOverridden = clientConfiguration.option(SdkClientOption.CLIENT_ENDPOINT_PROVIDER) .isEndpointOverridden(); @@ -464,11 +498,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-sync.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-sync.java index ba55ae6d349f..a25497e1327b 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-sync.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-endpoint-discovery-sync.java @@ -3,8 +3,10 @@ import java.net.URI; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.function.Consumer; +import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; @@ -32,12 +34,14 @@ import software.amazon.awssdk.metrics.MetricCollector; import software.amazon.awssdk.metrics.MetricPublisher; import software.amazon.awssdk.metrics.NoOpMetricCollector; +import software.amazon.awssdk.protocols.core.ExceptionMetadata; import software.amazon.awssdk.protocols.json.AwsJsonProtocol; import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory; import software.amazon.awssdk.protocols.json.BaseAwsJsonProtocolFactory; import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.endpointdiscoverytest.internal.EndpointDiscoveryTestServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.endpointdiscoverytest.internal.ServiceVersionInfo; import software.amazon.awssdk.services.endpointdiscoverytest.model.DescribeEndpointsRequest; import software.amazon.awssdk.services.endpointdiscoverytest.model.DescribeEndpointsResponse; import software.amazon.awssdk.services.endpointdiscoverytest.model.EndpointDiscoveryTestException; @@ -77,7 +81,8 @@ final class DefaultEndpointDiscoveryTestClient implements EndpointDiscoveryTestC protected DefaultEndpointDiscoveryTestClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "AwsEndpointDiscoveryTest" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); if (clientConfiguration.option(SdkClientOption.ENDPOINT_DISCOVERY_ENABLED)) { this.endpointDiscoveryCache = EndpointDiscoveryRefreshCache.create(EndpointDiscoveryTestEndpointDiscoveryCacheLoader @@ -107,9 +112,17 @@ public DescribeEndpointsResponse describeEndpoints(DescribeEndpointsRequest desc HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, DescribeEndpointsResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(describeEndpointsRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, describeEndpointsRequest @@ -154,9 +167,17 @@ public TestDiscoveryIdentifiersRequiredResponse testDiscoveryIdentifiersRequired HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, TestDiscoveryIdentifiersRequiredResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); boolean endpointDiscoveryEnabled = clientConfiguration.option(SdkClientOption.ENDPOINT_DISCOVERY_ENABLED); boolean endpointOverridden = clientConfiguration.option(SdkClientOption.CLIENT_ENDPOINT_PROVIDER).isEndpointOverridden(); if (endpointOverridden) { @@ -222,9 +243,17 @@ public TestDiscoveryOptionalResponse testDiscoveryOptional(TestDiscoveryOptional HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, TestDiscoveryOptionalResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); boolean endpointDiscoveryEnabled = clientConfiguration.option(SdkClientOption.ENDPOINT_DISCOVERY_ENABLED); boolean endpointOverridden = clientConfiguration.option(SdkClientOption.CLIENT_ENDPOINT_PROVIDER).isEndpointOverridden(); URI cachedEndpoint = null; @@ -281,9 +310,17 @@ public TestDiscoveryRequiredResponse testDiscoveryRequired(TestDiscoveryRequired HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, TestDiscoveryRequiredResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); boolean endpointDiscoveryEnabled = clientConfiguration.option(SdkClientOption.ENDPOINT_DISCOVERY_ENABLED); boolean endpointOverridden = clientConfiguration.option(SdkClientOption.CLIENT_ENDPOINT_PROVIDER).isEndpointOverridden(); if (endpointOverridden) { @@ -347,8 +384,8 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } private void updateRetryStrategyClientConfiguration(SdkClientConfiguration.Builder configuration) { diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-environment-token-system-settings-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-environment-token-system-settings-class.java new file mode 100644 index 000000000000..bb604b75807b --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-environment-token-system-settings-class.java @@ -0,0 +1,24 @@ +package software.amazon.awssdk.services.json.internal; + +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.utils.SystemSetting; + +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +public class EnvironmentTokenSystemSettings implements SystemSetting { + @Override + public String property() { + return "aws.bearerTokenJsonService"; + } + + @Override + public String environmentVariable() { + return "AWS_BEARER_TOKEN_JSON_SERVICE"; + } + + @Override + public String defaultValue() { + return null; + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java index 1dfb81af4d68..8cc62fc11b76 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-async-client-class.java @@ -70,6 +70,7 @@ import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.json.batchmanager.JsonAsyncBatchManager; import software.amazon.awssdk.services.json.internal.JsonServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.json.internal.ServiceVersionInfo; import software.amazon.awssdk.services.json.model.APostOperationRequest; import software.amazon.awssdk.services.json.model.APostOperationResponse; import software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest; @@ -160,7 +161,9 @@ final class DefaultJsonAsyncClient implements JsonAsyncClient { protected DefaultJsonAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, + "Json_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); this.executor = clientConfiguration.option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR); this.executorService = clientConfiguration.option(SdkClientOption.SCHEDULED_EXECUTOR_SERVICE); @@ -210,9 +213,20 @@ public CompletableFuture aPostOperation(APostOperationRe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", "aPostOperationRequest"); @@ -277,9 +291,20 @@ public CompletableFuture aPostOperationWithOut HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, APostOperationWithOutputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -337,9 +362,20 @@ public CompletableFuture bearerAuthOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, BearerAuthOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -425,8 +461,20 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata, eventstreamExceptionMetadataMapper); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() .putMarshaller(DefaultInputEvent.class, new InputEventMarshaller(protocolFactory)).build(); SdkPublisher eventPublisher = SdkPublisher.adapt(requestStream); @@ -512,9 +560,20 @@ public CompletableFuture eventStreamO HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, EventStreamOperationWithOnlyInputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); EventStreamTaggedUnionJsonMarshaller eventMarshaller = EventStreamTaggedUnionJsonMarshaller.builder() .putMarshaller(DefaultInputEventOne.class, new InputEventMarshaller(protocolFactory)) .putMarshaller(DefaultInputEventTwo.class, new InputEventTwoMarshaller(protocolFactory)).build(); @@ -607,8 +666,20 @@ public CompletableFuture eventStreamOperationWithOnlyOutput( HttpResponseHandler errorEventResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata, eventstreamExceptionMetadataMapper); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture future = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer . builder() @@ -686,9 +757,20 @@ public CompletableFuture getOperationWithCheck HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, GetOperationWithChecksumResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -757,9 +839,20 @@ public CompletableFuture getWithoutRequiredMe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, GetWithoutRequiredMembersResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -817,9 +910,20 @@ public CompletableFuture operationWithChe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithChecksumRequiredResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -881,9 +985,20 @@ public CompletableFuture operationWithR HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithRequestCompressionResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -946,9 +1061,20 @@ public CompletableFuture paginatedOpera HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, PaginatedOperationWithResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -1006,9 +1132,20 @@ public CompletableFuture paginatedOp HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -1091,9 +1228,20 @@ public CompletableFuture putOperationWithChecksum( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, PutOperationWithChecksumResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1118,7 +1266,8 @@ public CompletableFuture putOperationWithChecksum( .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, DefaultChecksumAlgorithm.SHA256).build()) - .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(putOperationWithChecksumRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1185,9 +1334,20 @@ public CompletableFuture streamingInputOperatio HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -1265,9 +1425,20 @@ public CompletableFuture streamingInputOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingInputOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1281,8 +1452,8 @@ public CompletableFuture streamingInputOutputOperation( .asyncRequestBody(requestBody).transferEncoding(true).build()) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withAsyncRequestBody(requestBody).withInput(streamingInputOutputOperationRequest), - asyncResponseTransformer); + .withAsyncRequestBody(requestBody).withAsyncResponseTransformer(asyncResponseTransformer) + .withInput(streamingInputOutputOperationRequest), asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1351,9 +1522,20 @@ public CompletableFuture streamingOutputOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, StreamingOutputOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler.execute( new ClientExecutionParams() @@ -1361,7 +1543,8 @@ public CompletableFuture streamingOutputOperation( .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> { if (e != null) { @@ -1399,14 +1582,8 @@ public final String serviceName() { } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.REST_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); + return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) + .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, @@ -1476,11 +1653,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java index 8b32410d4bb9..6c3d7b89e49c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-json-client-class.java @@ -2,7 +2,9 @@ import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.function.Consumer; +import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.auth.signer.Aws4UnsignedPayloadSigner; @@ -45,6 +47,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.json.internal.JsonServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.json.internal.ServiceVersionInfo; import software.amazon.awssdk.services.json.model.APostOperationRequest; import software.amazon.awssdk.services.json.model.APostOperationResponse; import software.amazon.awssdk.services.json.model.APostOperationWithOutputRequest; @@ -101,7 +104,7 @@ final class DefaultJsonClient implements JsonClient { private static final Logger log = Logger.loggerFor(DefaultJsonClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); + .serviceProtocol(AwsServiceProtocol.REST_JSON).build(); private final SyncClientHandler clientHandler; @@ -111,7 +114,8 @@ final class DefaultJsonClient implements JsonClient { protected DefaultJsonClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Json_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); } @@ -137,34 +141,45 @@ protected DefaultJsonClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, JsonException { + AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - APostOperationResponse::builder); - + APostOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); String hostPrefix = "{StringMember}-foo."; HostnameValidator.validateHostnameCompliant(aPostOperationRequest.stringMember(), "StringMember", - "aPostOperationRequest"); + "aPostOperationRequest"); String resolvedHostExpression = String.format("%s-foo.", aPostOperationRequest.stringMember()); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -192,33 +207,44 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, APostOperationWithOutputResponse::builder); - + operationMetadata, APostOperationWithOutputResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -242,32 +268,43 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) - throws AwsServiceException, SdkClientException, JsonException { + throws AwsServiceException, SdkClientException, JsonException { bearerAuthOperationRequest = applySignerOverride(bearerAuthOperationRequest, BearerTokenSigner.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, BearerAuthOperationResponse::builder); - + operationMetadata, BearerAuthOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -291,41 +328,52 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques */ @Override public GetOperationWithChecksumResponse getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, - JsonException { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, + JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetOperationWithChecksumResponse::builder); - + operationMetadata, GetOperationWithChecksumResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(getOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(getOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -353,33 +401,44 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( */ @Override public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( - GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, JsonException { + GetWithoutRequiredMembersRequest getWithoutRequiredMembersRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GetWithoutRequiredMembersResponse::builder); - + operationMetadata, GetWithoutRequiredMembersResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getWithoutRequiredMembersRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getWithoutRequiredMembersRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetWithoutRequiredMembers"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(getWithoutRequiredMembersRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetWithoutRequiredMembers").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(getWithoutRequiredMembersRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new GetWithoutRequiredMembersRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -403,38 +462,49 @@ public GetWithoutRequiredMembersResponse getWithoutRequiredMembers( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, JsonException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithChecksumRequiredResponse::builder); - + operationMetadata, OperationWithChecksumRequiredResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -458,38 +528,49 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, JsonException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithRequestCompressionResponse::builder); - + operationMetadata, OperationWithRequestCompressionResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -513,33 +594,44 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( - PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithResultKeyRequest paginatedOperationWithResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithResultKeyResponse::builder); - + operationMetadata, PaginatedOperationWithResultKeyResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithResultKey"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithResultKeyRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithResultKey").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithResultKeyRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -563,33 +655,44 @@ public PaginatedOperationWithResultKeyResponse paginatedOperationWithResultKey( */ @Override public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResultKey( - PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, - SdkClientException, JsonException { + PaginatedOperationWithoutResultKeyRequest paginatedOperationWithoutResultKeyRequest) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); - + operationMetadata, PaginatedOperationWithoutResultKeyResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(paginatedOperationWithoutResultKeyRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); + paginatedOperationWithoutResultKeyRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PaginatedOperationWithoutResultKey"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithoutResultKeyRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("PaginatedOperationWithoutResultKey").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(paginatedOperationWithoutResultKeyRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new PaginatedOperationWithoutResultKeyRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -639,50 +742,62 @@ public PaginatedOperationWithoutResultKeyResponse paginatedOperationWithoutResul */ @Override public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, - RequestBody requestBody, ResponseTransformer responseTransformer) - throws AwsServiceException, SdkClientException, JsonException { + RequestBody requestBody, ResponseTransformer responseTransformer) + throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, PutOperationWithChecksumResponse::builder); - + operationMetadata, PutOperationWithChecksumResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(putOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build()), responseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(putOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build()), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -696,11 +811,11 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques * The content to send to the service. A {@link RequestBody} can be created using one of several factory * methods for various sources of data. For example, to create a request body from a file you can do the * following. - * + * *
          * {@code RequestBody.fromFile(new File("myfile.txt"))}
          * 
    - * + * * See documentation in {@link RequestBody} for additional details and which sources of data are supported. * The service documentation for the request content is as follows 'This be a stream' * @return Result of the StreamingInputOperation operation returned by the service. @@ -717,39 +832,50 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOperationResponse::builder); - + operationMetadata, StreamingInputOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -763,11 +889,11 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe * The content to send to the service. A {@link RequestBody} can be created using one of several factory * methods for various sources of data. For example, to create a request body from a file you can do the * following. - * + * *
          * {@code RequestBody.fromFile(new File("myfile.txt"))}
          * 
    - * + * * See documentation in {@link RequestBody} for additional details and which sources of data are supported. * The service documentation for the request content is as follows 'This be a stream' * @param responseTransformer @@ -791,45 +917,57 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingInputOutputOperation( - StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + StreamingInputOutputOperationRequest streamingInputOutputOperationRequest, RequestBody requestBody, + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { streamingInputOutputOperationRequest = applySignerOverride(streamingInputOutputOperationRequest, - Aws4UnsignedPayloadSigner.create()); + Aws4UnsignedPayloadSigner.create()); JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingInputOutputOperationResponse::builder); - + operationMetadata, StreamingInputOutputOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); + streamingInputOutputOperationRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingInputOutputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller - .builder() - .delegateMarshaller( - new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingInputOutputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller + .builder() + .delegateMarshaller( + new StreamingInputOutputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).transferEncoding(true).build()), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -860,33 +998,44 @@ public ReturnT streamingInputOutputOperation( */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, JsonException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, JsonException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(true) - .isPayloadJson(false).build(); + .isPayloadJson(false).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, StreamingOutputOperationResponse::builder); - + operationMetadata, StreamingOutputOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Json Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -906,8 +1055,8 @@ private T applySignerOverride(T request, Signer signer) } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @@ -917,7 +1066,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -932,8 +1081,8 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } private void updateRetryStrategyClientConfiguration(SdkClientConfiguration.Builder configuration) { @@ -974,14 +1123,8 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(JsonException::builder) - .protocol(AwsJsonProtocol.REST_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); + return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(JsonException::builder) + .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java index 3878372becba..315cfc92febc 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-async-client-class.java @@ -49,6 +49,7 @@ import software.amazon.awssdk.protocols.query.AwsQueryProtocolFactory; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.query.internal.QueryServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.query.internal.ServiceVersionInfo; import software.amazon.awssdk.services.query.model.APostOperationRequest; import software.amazon.awssdk.services.query.model.APostOperationResponse; import software.amazon.awssdk.services.query.model.APostOperationWithOutputRequest; @@ -115,7 +116,7 @@ final class DefaultQueryAsyncClient implements QueryAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultQueryAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.QUERY).build(); + .serviceProtocol(AwsServiceProtocol.QUERY).build(); private final AsyncClientHandler clientHandler; @@ -127,7 +128,8 @@ final class DefaultQueryAsyncClient implements QueryAsyncClient { protected DefaultQueryAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Query_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(); this.executorService = clientConfiguration.option(SdkClientOption.SCHEDULED_EXECUTOR_SERVICE); } @@ -160,27 +162,27 @@ protected DefaultQueryAsyncClient(SdkClientConfiguration clientConfiguration) { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationResponse::builder); + .createResponseHandler(APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); String hostPrefix = "foo-"; String resolvedHostExpression = "foo-"; CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withInput(aPostOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -218,29 +220,29 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationWithOutputResponse::builder); + .createResponseHandler(APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -274,30 +276,30 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture bearerAuthOperation( - BearerAuthOperationRequest bearerAuthOperationRequest) { + BearerAuthOperationRequest bearerAuthOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); bearerAuthOperationRequest = applySignerOverride(bearerAuthOperationRequest, BearerTokenSigner.create()); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(BearerAuthOperationResponse::builder); + .createResponseHandler(BearerAuthOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -331,37 +333,37 @@ public CompletableFuture bearerAuthOperation( */ @Override public CompletableFuture getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(GetOperationWithChecksumResponse::builder); + .createResponseHandler(GetOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withInput(getOperationWithChecksumRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withInput(getOperationWithChecksumRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -396,33 +398,33 @@ public CompletableFuture getOperationWithCheck */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithChecksumRequiredResponse::builder); + .createResponseHandler(OperationWithChecksumRequiredResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -456,29 +458,29 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithContextParam( - OperationWithContextParamRequest operationWithContextParamRequest) { + OperationWithContextParamRequest operationWithContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithContextParamRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithContextParamResponse::builder); + .createResponseHandler(OperationWithContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -512,30 +514,30 @@ public CompletableFuture operationWithContext */ @Override public CompletableFuture operationWithCustomMember( - OperationWithCustomMemberRequest operationWithCustomMemberRequest) { + OperationWithCustomMemberRequest operationWithCustomMemberRequest) { operationWithCustomMemberRequest = UtilsTest.dummyRequestModifier(operationWithCustomMemberRequest); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithCustomMemberRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithCustomMemberRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomMember"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomMemberResponse::builder); + .createResponseHandler(OperationWithCustomMemberResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithCustomMemberRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithCustomMemberRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -571,30 +573,30 @@ public CompletableFuture operationWithCustomM */ @Override public CompletableFuture operationWithCustomizedOperationContextParam( - OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) { + OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration( - operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); + operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomizedOperationContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); + .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomizedOperationContextParam") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithCustomizedOperationContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomizedOperationContextParam") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithCustomizedOperationContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -630,29 +632,29 @@ public CompletableFuture o */ @Override public CompletableFuture operationWithMapOperationContextParam( - OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) { + OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithMapOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithMapOperationContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); + .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithMapOperationContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithMapOperationContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -686,30 +688,30 @@ public CompletableFuture operatio */ @Override public CompletableFuture operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); + .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) - .withInput(operationWithNoneAuthTypeRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) + .withInput(operationWithNoneAuthTypeRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -745,29 +747,29 @@ public CompletableFuture operationWithNoneAut */ @Override public CompletableFuture operationWithOperationContextParam( - OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) { + OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithOperationContextParam"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithOperationContextParamResponse::builder); + .createResponseHandler(OperationWithOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithOperationContextParamRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithOperationContextParamRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -802,34 +804,34 @@ public CompletableFuture operationWi */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithRequestCompressionResponse::builder); + .createResponseHandler(OperationWithRequestCompressionResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -864,29 +866,29 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture operationWithStaticContextParams( - OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) { + OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithStaticContextParamsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); + operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithStaticContextParams"); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithStaticContextParamsResponse::builder); + .createResponseHandler(OperationWithStaticContextParamsResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(operationWithStaticContextParamsRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withInput(operationWithStaticContextParamsRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -938,19 +940,19 @@ public CompletableFuture operationWith */ @Override public CompletableFuture putOperationWithChecksum( - PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); if (!isSignerOverridden(clientConfiguration)) { @@ -958,39 +960,40 @@ public CompletableFuture putOperationWithChecksum( } HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(PutOperationWithChecksumResponse::builder); + .createResponseHandler(PutOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()).withAsyncRequestBody(requestBody) - .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withAsyncResponseTransformer(asyncResponseTransformer).withAsyncRequestBody(requestBody) + .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1000,7 +1003,7 @@ public CompletableFuture putOperationWithChecksum( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1033,13 +1036,13 @@ public CompletableFuture putOperationWithChecksum( */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); @@ -1048,21 +1051,21 @@ public CompletableFuture streamingInputOperatio } HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingInputOperationResponse::builder); + .createResponseHandler(StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1101,40 +1104,41 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingOutputOperationResponse::builder); + .createResponseHandler(StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -1144,7 +1148,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -1167,15 +1171,15 @@ public final String serviceName() { private AwsQueryProtocolFactory init() { return AwsQueryProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1195,8 +1199,8 @@ private T applySignerOverride(T request, Signer signer) } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java index 5f013b28da68..89c434672edb 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-query-client-class.java @@ -42,6 +42,7 @@ import software.amazon.awssdk.protocols.query.AwsQueryProtocolFactory; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.query.internal.QueryServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.query.internal.ServiceVersionInfo; import software.amazon.awssdk.services.query.model.APostOperationRequest; import software.amazon.awssdk.services.query.model.APostOperationResponse; import software.amazon.awssdk.services.query.model.APostOperationWithOutputRequest; @@ -107,7 +108,7 @@ final class DefaultQueryClient implements QueryClient { private static final Logger log = Logger.loggerFor(DefaultQueryClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.QUERY).build(); + .serviceProtocol(AwsServiceProtocol.QUERY).build(); private final SyncClientHandler clientHandler; @@ -117,7 +118,8 @@ final class DefaultQueryClient implements QueryClient { protected DefaultQueryClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Query_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(); } @@ -143,17 +145,17 @@ protected DefaultQueryClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, QueryException { + AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationResponse::builder); + .createResponseHandler(APostOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); @@ -161,11 +163,11 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio String resolvedHostExpression = "foo-"; return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -193,30 +195,30 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, QueryException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(APostOperationWithOutputResponse::builder); + .createResponseHandler(APostOperationWithOutputResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -240,29 +242,29 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) - throws AwsServiceException, SdkClientException, QueryException { + throws AwsServiceException, SdkClientException, QueryException { bearerAuthOperationRequest = applySignerOverride(bearerAuthOperationRequest, BearerTokenSigner.create()); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(BearerAuthOperationResponse::builder); + .createResponseHandler(BearerAuthOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withInput(bearerAuthOperationRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -286,38 +288,38 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques */ @Override public GetOperationWithChecksumResponse getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, - QueryException { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, + QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(GetOperationWithChecksumResponse::builder); + .createResponseHandler(GetOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(getOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(getOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -341,35 +343,35 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithChecksumRequiredResponse::builder); + .createResponseHandler(OperationWithChecksumRequiredResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -393,30 +395,30 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithContextParamResponse operationWithContextParam( - OperationWithContextParamRequest operationWithContextParamRequest) throws AwsServiceException, SdkClientException, - QueryException { + OperationWithContextParamRequest operationWithContextParamRequest) throws AwsServiceException, SdkClientException, + QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithContextParamResponse::builder); + .createResponseHandler(OperationWithContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithContextParamRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithContextParamRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithContextParam").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithContextParamRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -440,31 +442,31 @@ public OperationWithContextParamResponse operationWithContextParam( */ @Override public OperationWithCustomMemberResponse operationWithCustomMember( - OperationWithCustomMemberRequest operationWithCustomMemberRequest) throws AwsServiceException, SdkClientException, - QueryException { + OperationWithCustomMemberRequest operationWithCustomMemberRequest) throws AwsServiceException, SdkClientException, + QueryException { operationWithCustomMemberRequest = UtilsTest.dummyRequestModifier(operationWithCustomMemberRequest); HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomMemberResponse::builder); + .createResponseHandler(OperationWithCustomMemberResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithCustomMemberRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithCustomMemberRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomMember"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithCustomMemberRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomMember").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithCustomMemberRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithCustomMemberRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -489,31 +491,31 @@ public OperationWithCustomMemberResponse operationWithCustomMember( */ @Override public OperationWithCustomizedOperationContextParamResponse operationWithCustomizedOperationContextParam( - OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) - throws AwsServiceException, SdkClientException, QueryException { + OperationWithCustomizedOperationContextParamRequest operationWithCustomizedOperationContextParamRequest) + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); + .createResponseHandler(OperationWithCustomizedOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration( - operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); + operationWithCustomizedOperationContextParamRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithCustomizedOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithCustomizedOperationContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithCustomizedOperationContextParam") - .withProtocolMetadata(protocolMetadata).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withInput(operationWithCustomizedOperationContextParamRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithCustomizedOperationContextParam") + .withProtocolMetadata(protocolMetadata).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withInput(operationWithCustomizedOperationContextParamRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithCustomizedOperationContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -538,30 +540,30 @@ public OperationWithCustomizedOperationContextParamResponse operationWithCustomi */ @Override public OperationWithMapOperationContextParamResponse operationWithMapOperationContextParam( - OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) - throws AwsServiceException, SdkClientException, QueryException { + OperationWithMapOperationContextParamRequest operationWithMapOperationContextParamRequest) + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); + .createResponseHandler(OperationWithMapOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithMapOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithMapOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithMapOperationContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithMapOperationContextParamRequest).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithMapOperationContextParam").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithMapOperationContextParamRequest).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithMapOperationContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -585,31 +587,31 @@ public OperationWithMapOperationContextParamResponse operationWithMapOperationCo */ @Override public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, - QueryException { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, + QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); + .createResponseHandler(OperationWithNoneAuthTypeResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -634,30 +636,30 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( */ @Override public OperationWithOperationContextParamResponse operationWithOperationContextParam( - OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithOperationContextParamRequest operationWithOperationContextParamRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithOperationContextParamResponse::builder); + .createResponseHandler(OperationWithOperationContextParamResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithOperationContextParamRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); + operationWithOperationContextParamRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithOperationContextParam"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithOperationContextParamRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithOperationContextParam").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithOperationContextParamRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithOperationContextParamRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -681,35 +683,35 @@ public OperationWithOperationContextParamResponse operationWithOperationContextP */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithRequestCompressionResponse::builder); + .createResponseHandler(OperationWithRequestCompressionResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -733,30 +735,30 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public OperationWithStaticContextParamsResponse operationWithStaticContextParams( - OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) throws AwsServiceException, - SdkClientException, QueryException { + OperationWithStaticContextParamsRequest operationWithStaticContextParamsRequest) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(OperationWithStaticContextParamsResponse::builder); + .createResponseHandler(OperationWithStaticContextParamsResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithStaticContextParamsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); + operationWithStaticContextParamsRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithStaticContextParams"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithStaticContextParamsRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithStaticContextParams").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithStaticContextParamsRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithStaticContextParamsRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -806,47 +808,48 @@ public OperationWithStaticContextParamsResponse operationWithStaticContextParams */ @Override public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, - RequestBody requestBody, ResponseTransformer responseTransformer) - throws AwsServiceException, SdkClientException, QueryException { + RequestBody requestBody, ResponseTransformer responseTransformer) + throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(PutOperationWithChecksumResponse::builder); + .createResponseHandler(PutOperationWithChecksumResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(putOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(putOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -881,36 +884,36 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, QueryException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingInputOperationResponse::builder); + .createResponseHandler(StreamingInputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -941,30 +944,30 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, QueryException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, QueryException { HttpResponseHandler responseHandler = protocolFactory - .createResponseHandler(StreamingOutputOperationResponse::builder); + .createResponseHandler(StreamingOutputOperationResponse::builder); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Query Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -989,8 +992,8 @@ private T applySignerOverride(T request, Signer signer) } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @@ -1000,7 +1003,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -1053,11 +1056,11 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private AwsQueryProtocolFactory init() { return AwsQueryProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(QueryException::builder).build(); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-rpcv2-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-rpcv2-async-client-class.java index f2530d02c873..9ec3cad2ab53 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-rpcv2-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-rpcv2-async-client-class.java @@ -37,6 +37,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.protocols.rpcv2.SmithyRpcV2CborProtocolFactory; import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.services.smithyrpcv2protocol.internal.ServiceVersionInfo; import software.amazon.awssdk.services.smithyrpcv2protocol.internal.SmithyRpcV2ProtocolServiceClientConfigurationBuilder; import software.amazon.awssdk.services.smithyrpcv2protocol.model.ComplexErrorException; import software.amazon.awssdk.services.smithyrpcv2protocol.model.EmptyInputOutputRequest; @@ -104,7 +105,8 @@ final class DefaultSmithyRpcV2ProtocolAsyncClient implements SmithyRpcV2Protocol protected DefaultSmithyRpcV2ProtocolAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "SmithyRpcV2Protocol" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(SmithyRpcV2CborProtocolFactory.builder()).build(); } @@ -144,9 +146,26 @@ public CompletableFuture emptyInputOutput(EmptyInputOu HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, EmptyInputOutputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -201,9 +220,26 @@ public CompletableFuture float16(Float16Request float16Request) HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, Float16Response::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams().withOperationName("Float16") @@ -258,9 +294,26 @@ public CompletableFuture fractionalSeconds(Fractional HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, FractionalSecondsResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -318,9 +371,26 @@ public CompletableFuture greetingWithErrors(Greeting HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, GreetingWithErrorsResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -375,9 +445,26 @@ public CompletableFuture noInputOutput(NoInputOutputReque HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, NoInputOutputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -435,9 +522,26 @@ public CompletableFuture operationWithDefaults( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OperationWithDefaultsResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -494,9 +598,26 @@ public CompletableFuture optionalInputOutput( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OptionalInputOutputResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -552,9 +673,26 @@ public CompletableFuture recursiveShapes(RecursiveShape HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, RecursiveShapesResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -611,9 +749,26 @@ public CompletableFuture rpcV2CborDenseMaps(RpcV2Cbo HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, RpcV2CborDenseMapsResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -669,9 +824,26 @@ public CompletableFuture rpcV2CborLists(RpcV2CborListsRe HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, RpcV2CborListsResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -729,9 +901,26 @@ public CompletableFuture rpcV2CborSparseMaps( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, RpcV2CborSparseMapsResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -788,9 +977,26 @@ public CompletableFuture simpleScalarProperties( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, SimpleScalarPropertiesResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -847,9 +1053,26 @@ public CompletableFuture sparseNullsOperation( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, SparseNullsOperationResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -880,20 +1103,9 @@ public final String serviceName() { } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(SmithyRpcV2ProtocolException::builder) - .protocol(AwsJsonProtocol.SMITHY_RPC_V2_CBOR) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("ValidationException") - .exceptionBuilderSupplier(ValidationException::builder).build()) - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidGreeting") - .exceptionBuilderSupplier(InvalidGreetingException::builder).build()) - .registerModeledException( - ExceptionMetadata.builder().errorCode("ComplexError") - .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + return builder.clientConfiguration(clientConfiguration) + .defaultServiceExceptionSupplier(SmithyRpcV2ProtocolException::builder) + .protocol(AwsJsonProtocol.SMITHY_RPC_V2_CBOR).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, @@ -949,11 +1161,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-rpcv2-sync.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-rpcv2-sync.java index c02466218908..e1127da5917e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-rpcv2-sync.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-rpcv2-sync.java @@ -2,7 +2,9 @@ import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.function.Consumer; +import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler; @@ -31,6 +33,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.protocols.rpcv2.SmithyRpcV2CborProtocolFactory; import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.services.smithyrpcv2protocol.internal.ServiceVersionInfo; import software.amazon.awssdk.services.smithyrpcv2protocol.internal.SmithyRpcV2ProtocolServiceClientConfigurationBuilder; import software.amazon.awssdk.services.smithyrpcv2protocol.model.ComplexErrorException; import software.amazon.awssdk.services.smithyrpcv2protocol.model.EmptyInputOutputRequest; @@ -88,7 +91,7 @@ final class DefaultSmithyRpcV2ProtocolClient implements SmithyRpcV2ProtocolClien private static final Logger log = Logger.loggerFor(DefaultSmithyRpcV2ProtocolClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.SMITHY_RPC_V2_CBOR).build(); + .serviceProtocol(AwsServiceProtocol.SMITHY_RPC_V2_CBOR).build(); private final SyncClientHandler clientHandler; @@ -98,7 +101,8 @@ final class DefaultSmithyRpcV2ProtocolClient implements SmithyRpcV2ProtocolClien protected DefaultSmithyRpcV2ProtocolClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "SmithyRpcV2Protocol" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(SmithyRpcV2CborProtocolFactory.builder()).build(); } @@ -120,31 +124,48 @@ protected DefaultSmithyRpcV2ProtocolClient(SdkClientConfiguration clientConfigur */ @Override public EmptyInputOutputResponse emptyInputOutput(EmptyInputOutputRequest emptyInputOutputRequest) throws AwsServiceException, - SdkClientException, SmithyRpcV2ProtocolException { + SdkClientException, SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - EmptyInputOutputResponse::builder); - + EmptyInputOutputResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(emptyInputOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, emptyInputOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EmptyInputOutput"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("EmptyInputOutput").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(emptyInputOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new EmptyInputOutputRequestMarshaller(protocolFactory))); + .withOperationName("EmptyInputOutput").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(emptyInputOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new EmptyInputOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -168,29 +189,46 @@ public EmptyInputOutputResponse emptyInputOutput(EmptyInputOutputRequest emptyIn */ @Override public Float16Response float16(Float16Request float16Request) throws AwsServiceException, SdkClientException, - SmithyRpcV2ProtocolException { + SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - Float16Response::builder); - + Float16Response::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(float16Request, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, float16Request - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "Float16"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("Float16").withProtocolMetadata(protocolMetadata).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withInput(float16Request).withMetricCollector(apiCallMetricCollector) - .withMarshaller(new Float16RequestMarshaller(protocolFactory))); + .withOperationName("Float16").withProtocolMetadata(protocolMetadata).withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) + .withInput(float16Request).withMetricCollector(apiCallMetricCollector) + .withMarshaller(new Float16RequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -214,31 +252,48 @@ public Float16Response float16(Float16Request float16Request) throws AwsServiceE */ @Override public FractionalSecondsResponse fractionalSeconds(FractionalSecondsRequest fractionalSecondsRequest) - throws AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { + throws AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - FractionalSecondsResponse::builder); - + FractionalSecondsResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(fractionalSecondsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, fractionalSecondsRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "FractionalSeconds"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("FractionalSeconds").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(fractionalSecondsRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new FractionalSecondsRequestMarshaller(protocolFactory))); + .withOperationName("FractionalSeconds").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(fractionalSecondsRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new FractionalSecondsRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -264,32 +319,49 @@ public FractionalSecondsResponse fractionalSeconds(FractionalSecondsRequest frac */ @Override public GreetingWithErrorsResponse greetingWithErrors(GreetingWithErrorsRequest greetingWithErrorsRequest) - throws ComplexErrorException, InvalidGreetingException, AwsServiceException, SdkClientException, - SmithyRpcV2ProtocolException { + throws ComplexErrorException, InvalidGreetingException, AwsServiceException, SdkClientException, + SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, GreetingWithErrorsResponse::builder); - + operationMetadata, GreetingWithErrorsResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(greetingWithErrorsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, greetingWithErrorsRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GreetingWithErrors"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("GreetingWithErrors").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(greetingWithErrorsRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new GreetingWithErrorsRequestMarshaller(protocolFactory))); + .withOperationName("GreetingWithErrors").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(greetingWithErrorsRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new GreetingWithErrorsRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -313,30 +385,47 @@ public GreetingWithErrorsResponse greetingWithErrors(GreetingWithErrorsRequest g */ @Override public NoInputOutputResponse noInputOutput(NoInputOutputRequest noInputOutputRequest) throws AwsServiceException, - SdkClientException, SmithyRpcV2ProtocolException { + SdkClientException, SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - NoInputOutputResponse::builder); - + NoInputOutputResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(noInputOutputRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, noInputOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "NoInputOutput"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("NoInputOutput").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(noInputOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new NoInputOutputRequestMarshaller(protocolFactory))); + .withOperationName("NoInputOutput").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(noInputOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new NoInputOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -361,31 +450,48 @@ public NoInputOutputResponse noInputOutput(NoInputOutputRequest noInputOutputReq */ @Override public OperationWithDefaultsResponse operationWithDefaults(OperationWithDefaultsRequest operationWithDefaultsRequest) - throws ValidationException, AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { + throws ValidationException, AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OperationWithDefaultsResponse::builder); - + operationMetadata, OperationWithDefaultsResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithDefaultsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithDefaultsRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithDefaults"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("OperationWithDefaults").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(operationWithDefaultsRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OperationWithDefaultsRequestMarshaller(protocolFactory))); + .withOperationName("OperationWithDefaults").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(operationWithDefaultsRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OperationWithDefaultsRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -409,31 +515,48 @@ public OperationWithDefaultsResponse operationWithDefaults(OperationWithDefaults */ @Override public OptionalInputOutputResponse optionalInputOutput(OptionalInputOutputRequest optionalInputOutputRequest) - throws AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { + throws AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, OptionalInputOutputResponse::builder); - + operationMetadata, OptionalInputOutputResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(optionalInputOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, optionalInputOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OptionalInputOutput"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("OptionalInputOutput").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(optionalInputOutputRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new OptionalInputOutputRequestMarshaller(protocolFactory))); + .withOperationName("OptionalInputOutput").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(optionalInputOutputRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new OptionalInputOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -457,31 +580,48 @@ public OptionalInputOutputResponse optionalInputOutput(OptionalInputOutputReques */ @Override public RecursiveShapesResponse recursiveShapes(RecursiveShapesRequest recursiveShapesRequest) throws AwsServiceException, - SdkClientException, SmithyRpcV2ProtocolException { + SdkClientException, SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - RecursiveShapesResponse::builder); - + RecursiveShapesResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(recursiveShapesRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, recursiveShapesRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "RecursiveShapes"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("RecursiveShapes").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(recursiveShapesRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new RecursiveShapesRequestMarshaller(protocolFactory))); + .withOperationName("RecursiveShapes").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(recursiveShapesRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new RecursiveShapesRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -506,31 +646,48 @@ public RecursiveShapesResponse recursiveShapes(RecursiveShapesRequest recursiveS */ @Override public RpcV2CborDenseMapsResponse rpcV2CborDenseMaps(RpcV2CborDenseMapsRequest rpcV2CborDenseMapsRequest) - throws ValidationException, AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { + throws ValidationException, AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, RpcV2CborDenseMapsResponse::builder); - + operationMetadata, RpcV2CborDenseMapsResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(rpcV2CborDenseMapsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, rpcV2CborDenseMapsRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "RpcV2CborDenseMaps"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("RpcV2CborDenseMaps").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(rpcV2CborDenseMapsRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new RpcV2CborDenseMapsRequestMarshaller(protocolFactory))); + .withOperationName("RpcV2CborDenseMaps").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(rpcV2CborDenseMapsRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new RpcV2CborDenseMapsRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -555,30 +712,47 @@ public RpcV2CborDenseMapsResponse rpcV2CborDenseMaps(RpcV2CborDenseMapsRequest r */ @Override public RpcV2CborListsResponse rpcV2CborLists(RpcV2CborListsRequest rpcV2CborListsRequest) throws ValidationException, - AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { + AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, - RpcV2CborListsResponse::builder); - + RpcV2CborListsResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(rpcV2CborListsRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, rpcV2CborListsRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "RpcV2CborLists"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("RpcV2CborLists").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(rpcV2CborListsRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new RpcV2CborListsRequestMarshaller(protocolFactory))); + .withOperationName("RpcV2CborLists").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(rpcV2CborListsRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new RpcV2CborListsRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -603,31 +777,48 @@ public RpcV2CborListsResponse rpcV2CborLists(RpcV2CborListsRequest rpcV2CborList */ @Override public RpcV2CborSparseMapsResponse rpcV2CborSparseMaps(RpcV2CborSparseMapsRequest rpcV2CborSparseMapsRequest) - throws ValidationException, AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { + throws ValidationException, AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, RpcV2CborSparseMapsResponse::builder); - + operationMetadata, RpcV2CborSparseMapsResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(rpcV2CborSparseMapsRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, rpcV2CborSparseMapsRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "RpcV2CborSparseMaps"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("RpcV2CborSparseMaps").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(rpcV2CborSparseMapsRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new RpcV2CborSparseMapsRequestMarshaller(protocolFactory))); + .withOperationName("RpcV2CborSparseMaps").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(rpcV2CborSparseMapsRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new RpcV2CborSparseMapsRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -651,32 +842,49 @@ public RpcV2CborSparseMapsResponse rpcV2CborSparseMaps(RpcV2CborSparseMapsReques */ @Override public SimpleScalarPropertiesResponse simpleScalarProperties(SimpleScalarPropertiesRequest simpleScalarPropertiesRequest) - throws AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { + throws AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, SimpleScalarPropertiesResponse::builder); - + operationMetadata, SimpleScalarPropertiesResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(simpleScalarPropertiesRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, simpleScalarPropertiesRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "SimpleScalarProperties"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("SimpleScalarProperties").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(simpleScalarPropertiesRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new SimpleScalarPropertiesRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("SimpleScalarProperties").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(simpleScalarPropertiesRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new SimpleScalarPropertiesRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -700,31 +908,48 @@ public SimpleScalarPropertiesResponse simpleScalarProperties(SimpleScalarPropert */ @Override public SparseNullsOperationResponse sparseNullsOperation(SparseNullsOperationRequest sparseNullsOperationRequest) - throws AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { + throws AwsServiceException, SdkClientException, SmithyRpcV2ProtocolException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) - .isPayloadJson(true).build(); + .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - operationMetadata, SparseNullsOperationResponse::builder); - + operationMetadata, SparseNullsOperationResponse::builder); + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "ValidationException": + return Optional.of(ExceptionMetadata.builder().errorCode("ValidationException") + .exceptionBuilderSupplier(ValidationException::builder).build()); + case "InvalidGreeting": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidGreeting") + .exceptionBuilderSupplier(InvalidGreetingException::builder).build()); + case "ComplexError": + return Optional.of(ExceptionMetadata.builder().errorCode("ComplexError") + .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(sparseNullsOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, sparseNullsOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "SmithyRpcV2Protocol"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "SparseNullsOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("SparseNullsOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(sparseNullsOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new SparseNullsOperationRequestMarshaller(protocolFactory))); + .withOperationName("SparseNullsOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(sparseNullsOperationRequest) + .withMetricCollector(apiCallMetricCollector) + .withMarshaller(new SparseNullsOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -736,7 +961,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -751,8 +976,8 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } private void updateRetryStrategyClientConfiguration(SdkClientConfiguration.Builder configuration) { @@ -785,7 +1010,7 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } SmithyRpcV2ProtocolServiceClientConfigurationBuilder serviceConfigBuilder = new SmithyRpcV2ProtocolServiceClientConfigurationBuilder( - configuration); + configuration); for (SdkPlugin plugin : plugins) { plugin.configureClient(serviceConfigBuilder); } @@ -794,20 +1019,9 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(SmithyRpcV2ProtocolException::builder) - .protocol(AwsJsonProtocol.SMITHY_RPC_V2_CBOR) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("ValidationException") - .exceptionBuilderSupplier(ValidationException::builder).build()) - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidGreeting") - .exceptionBuilderSupplier(InvalidGreetingException::builder).build()) - .registerModeledException( - ExceptionMetadata.builder().errorCode("ComplexError") - .exceptionBuilderSupplier(ComplexErrorException::builder).build()); + return builder.clientConfiguration(clientConfiguration) + .defaultServiceExceptionSupplier(SmithyRpcV2ProtocolException::builder) + .protocol(AwsJsonProtocol.SMITHY_RPC_V2_CBOR).protocolVersion("1.1"); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-unsigned-payload-trait-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-unsigned-payload-trait-async-client-class.java index 4a8589dead07..7a1c65058c5a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-unsigned-payload-trait-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-unsigned-payload-trait-async-client-class.java @@ -43,6 +43,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.database.internal.DatabaseServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.database.internal.ServiceVersionInfo; import software.amazon.awssdk.services.database.model.DatabaseException; import software.amazon.awssdk.services.database.model.DatabaseRequest; import software.amazon.awssdk.services.database.model.DeleteRowRequest; @@ -102,7 +103,8 @@ final class DefaultDatabaseAsyncClient implements DatabaseAsyncClient { protected DefaultDatabaseAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Database_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); } @@ -145,9 +147,20 @@ public CompletableFuture deleteRow(DeleteRowRequest deleteRow HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, DeleteRowResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams().withOperationName("DeleteRow") @@ -205,9 +218,20 @@ public CompletableFuture getRow(GetRowRequest getRowRequest) { HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, GetRowResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams().withOperationName("GetRow") @@ -269,9 +293,20 @@ public CompletableFuture opWithSigv HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, OpWithSigv4AndSigv4AUnSignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -332,9 +367,20 @@ public CompletableFuture opWithSigv4SignedPayl HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OpWithSigv4SignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -395,9 +441,20 @@ public CompletableFuture opWithSigv4UnSigned HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OpWithSigv4UnSignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -470,9 +527,20 @@ public CompletableFuture opWithS HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, OpWithSigv4UnSignedPayloadAndStreamingResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -483,10 +551,10 @@ public CompletableFuture opWithS .builder() .delegateMarshaller( new OpWithSigv4UnSignedPayloadAndStreamingRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).transferEncoding(true).build()).withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler).withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(opWithSigv4UnSignedPayloadAndStreamingRequest)); + .asyncRequestBody(requestBody).transferEncoding(true).build()) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncRequestBody(requestBody).withInput(opWithSigv4UnSignedPayloadAndStreamingRequest)); CompletableFuture whenCompleted = executeFuture .whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -540,9 +608,20 @@ public CompletableFuture opWithSigv4aSignedPa HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OpWithSigv4ASignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -603,9 +682,20 @@ public CompletableFuture opWithSigv4aUnSign HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OpWithSigv4AUnSignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -668,9 +758,20 @@ public CompletableFuture opsWithSigv HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, OpsWithSigv4AndSigv4ASignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -729,9 +830,20 @@ public CompletableFuture putRow(PutRowRequest putRowRequest) { HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, PutRowResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams().withOperationName("PutRow") @@ -793,9 +905,20 @@ public CompletableFuture secon HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, SecondOpsWithSigv4AndSigv4ASignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); CompletableFuture executeFuture = clientHandler .execute(new ClientExecutionParams() @@ -827,14 +950,8 @@ public final String serviceName() { } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(DatabaseException::builder) - .protocol(AwsJsonProtocol.REST_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); + return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(DatabaseException::builder) + .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, @@ -905,11 +1022,6 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, return configuration.build(); } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); - } - private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-unsigned-payload-trait-sync-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-unsigned-payload-trait-sync-client-class.java index 2fa866188c5f..0f1feebb9f4e 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-unsigned-payload-trait-sync-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-unsigned-payload-trait-sync-client-class.java @@ -2,7 +2,9 @@ import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.function.Consumer; +import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler; @@ -34,6 +36,7 @@ import software.amazon.awssdk.protocols.json.JsonOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; import software.amazon.awssdk.services.database.internal.DatabaseServiceClientConfigurationBuilder; +import software.amazon.awssdk.services.database.internal.ServiceVersionInfo; import software.amazon.awssdk.services.database.model.DatabaseException; import software.amazon.awssdk.services.database.model.DeleteRowRequest; import software.amazon.awssdk.services.database.model.DeleteRowResponse; @@ -92,7 +95,8 @@ final class DefaultDatabaseClient implements DatabaseClient { protected DefaultDatabaseClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Database_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build(); } @@ -124,9 +128,20 @@ public DeleteRowResponse deleteRow(DeleteRowRequest deleteRowRequest) throws Inv HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, DeleteRowResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(deleteRowRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteRowRequest .overrideConfiguration().orElse(null)); @@ -174,9 +189,20 @@ public GetRowResponse getRow(GetRowRequest getRowRequest) throws InvalidInputExc HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, GetRowResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getRowRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getRowRequest .overrideConfiguration().orElse(null)); @@ -224,9 +250,20 @@ public PutRowResponse putRow(PutRowRequest putRowRequest) throws InvalidInputExc HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, PutRowResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putRowRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putRowRequest .overrideConfiguration().orElse(null)); @@ -276,9 +313,20 @@ public OpWithSigv4AndSigv4AUnSignedPayloadResponse opWithSigv4AndSigv4aUnSignedP HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OpWithSigv4AndSigv4AUnSignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(opWithSigv4AndSigv4AUnSignedPayloadRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, @@ -330,9 +378,20 @@ public OpWithSigv4SignedPayloadResponse opWithSigv4SignedPayload( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OpWithSigv4SignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(opWithSigv4SignedPayloadRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, opWithSigv4SignedPayloadRequest @@ -384,9 +443,20 @@ public OpWithSigv4UnSignedPayloadResponse opWithSigv4UnSignedPayload( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OpWithSigv4UnSignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(opWithSigv4UnSignedPayloadRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, opWithSigv4UnSignedPayloadRequest @@ -450,9 +520,20 @@ public OpWithSigv4UnSignedPayloadAndStreamingResponse opWithSigv4UnSignedPayload HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, OpWithSigv4UnSignedPayloadAndStreamingResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(opWithSigv4UnSignedPayloadAndStreamingRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, @@ -513,9 +594,20 @@ public OpWithSigv4ASignedPayloadResponse opWithSigv4aSignedPayload( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OpWithSigv4ASignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(opWithSigv4ASignedPayloadRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, opWithSigv4ASignedPayloadRequest @@ -567,9 +659,20 @@ public OpWithSigv4AUnSignedPayloadResponse opWithSigv4aUnSignedPayload( HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OpWithSigv4AUnSignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(opWithSigv4AUnSignedPayloadRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, opWithSigv4AUnSignedPayloadRequest @@ -622,9 +725,20 @@ public OpsWithSigv4AndSigv4ASignedPayloadResponse opsWithSigv4andSigv4aSignedPay HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, OpsWithSigv4AndSigv4ASignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(opsWithSigv4AndSigv4ASignedPayloadRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, @@ -677,9 +791,20 @@ public SecondOpsWithSigv4AndSigv4ASignedPayloadResponse secondOpsWithSigv4andSig HttpResponseHandler responseHandler = protocolFactory .createResponseHandler(operationMetadata, SecondOpsWithSigv4AndSigv4ASignedPayloadResponse::builder); - + Function> exceptionMetadataMapper = errorCode -> { + if (errorCode == null) { + return Optional.empty(); + } + switch (errorCode) { + case "InvalidInput": + return Optional.of(ExceptionMetadata.builder().errorCode("InvalidInput").httpStatusCode(400) + .exceptionBuilderSupplier(InvalidInputException::builder).build()); + default: + return Optional.empty(); + } + }; HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, - operationMetadata); + operationMetadata, exceptionMetadataMapper); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration( secondOpsWithSigv4AndSigv4ASignedPayloadRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, @@ -724,8 +849,8 @@ private static List resolveMetricPublishers(SdkClientConfigurat } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, - JsonOperationMetadata operationMetadata) { - return protocolFactory.createErrorResponseHandler(operationMetadata); + JsonOperationMetadata operationMetadata, Function> exceptionMetadataMapper) { + return protocolFactory.createErrorResponseHandler(operationMetadata, exceptionMetadataMapper); } private void updateRetryStrategyClientConfiguration(SdkClientConfiguration.Builder configuration) { @@ -767,14 +892,8 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, } private > T init(T builder) { - return builder - .clientConfiguration(clientConfiguration) - .defaultServiceExceptionSupplier(DatabaseException::builder) - .protocol(AwsJsonProtocol.REST_JSON) - .protocolVersion("1.1") - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()); + return builder.clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(DatabaseException::builder) + .protocol(AwsJsonProtocol.REST_JSON).protocolVersion("1.1"); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java index 4f777ed50154..b6c21b785a46 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-async-client-class.java @@ -54,6 +54,7 @@ import software.amazon.awssdk.protocols.xml.AwsXmlProtocolFactory; import software.amazon.awssdk.protocols.xml.XmlOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.services.xml.internal.ServiceVersionInfo; import software.amazon.awssdk.services.xml.internal.XmlServiceClientConfigurationBuilder; import software.amazon.awssdk.services.xml.model.APostOperationRequest; import software.amazon.awssdk.services.xml.model.APostOperationResponse; @@ -107,7 +108,7 @@ final class DefaultXmlAsyncClient implements XmlAsyncClient { private static final Logger log = LoggerFactory.getLogger(DefaultXmlAsyncClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_XML).build(); + .serviceProtocol(AwsServiceProtocol.REST_XML).build(); private final AsyncClientHandler clientHandler; @@ -119,7 +120,8 @@ final class DefaultXmlAsyncClient implements XmlAsyncClient { protected DefaultXmlAsyncClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsAsyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Xml_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(); this.executor = clientConfiguration.option(SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR); } @@ -152,26 +154,26 @@ protected DefaultXmlAsyncClient(SdkClientConfiguration clientConfiguration) { public CompletableFuture aPostOperation(APostOperationRequest aPostOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(APostOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(APostOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); String hostPrefix = "foo-"; String resolvedHostExpression = "foo-"; CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperation").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).hostPrefixExpression(resolvedHostExpression) - .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperation").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).hostPrefixExpression(resolvedHostExpression) + .withMetricCollector(apiCallMetricCollector).withInput(aPostOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -210,28 +212,28 @@ public CompletableFuture aPostOperation(APostOperationRe */ @Override public CompletableFuture aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withInput(aPostOperationWithOutputRequest)); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withInput(aPostOperationWithOutputRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -266,29 +268,29 @@ public CompletableFuture aPostOperationWithOut */ @Override public CompletableFuture bearerAuthOperation( - BearerAuthOperationRequest bearerAuthOperationRequest) { + BearerAuthOperationRequest bearerAuthOperationRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); bearerAuthOperationRequest = applySignerOverride(bearerAuthOperationRequest, BearerTokenSigner.create()); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(BearerAuthOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(BearerAuthOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).credentialType(CredentialType.TOKEN) - .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("BearerAuthOperation").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).credentialType(CredentialType.TOKEN) + .withMetricCollector(apiCallMetricCollector).withInput(bearerAuthOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -323,51 +325,51 @@ public CompletableFuture bearerAuthOperation( */ @Override public CompletableFuture eventStreamOperation(EventStreamOperationRequest eventStreamOperationRequest, - EventStreamOperationResponseHandler asyncResponseHandler) { + EventStreamOperationResponseHandler asyncResponseHandler) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(eventStreamOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, eventStreamOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "EventStreamOperation"); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - EventStreamOperationResponse::builder, XmlOperationMetadata.builder().hasStreamingSuccessResponse(true) - .build()); + EventStreamOperationResponse::builder, XmlOperationMetadata.builder().hasStreamingSuccessResponse(true) + .build()); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); HttpResponseHandler eventResponseHandler = protocolFactory.createResponseHandler( - EventStreamTaggedUnionPojoSupplier.builder() - .putSdkPojoSupplier("EventPayloadEvent", EventStream::eventPayloadEventBuilder) - .putSdkPojoSupplier("NonEventPayloadEvent", EventStream::nonEventPayloadEventBuilder) - .putSdkPojoSupplier("SecondEventPayloadEvent", EventStream::secondEventPayloadEventBuilder) - .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build(), XmlOperationMetadata - .builder().hasStreamingSuccessResponse(false).build()); + EventStreamTaggedUnionPojoSupplier.builder() + .putSdkPojoSupplier("EventPayloadEvent", EventStream::eventPayloadEventBuilder) + .putSdkPojoSupplier("NonEventPayloadEvent", EventStream::nonEventPayloadEventBuilder) + .putSdkPojoSupplier("SecondEventPayloadEvent", EventStream::secondEventPayloadEventBuilder) + .defaultSdkPojoSupplier(() -> new SdkPojoBuilder(EventStream.UNKNOWN)).build(), XmlOperationMetadata + .builder().hasStreamingSuccessResponse(false).build()); CompletableFuture eventStreamTransformFuture = new CompletableFuture<>(); EventStreamAsyncResponseTransformer asyncResponseTransformer = EventStreamAsyncResponseTransformer - . builder().eventStreamResponseHandler(asyncResponseHandler) - .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) - .exceptionResponseHandler(errorResponseHandler).future(eventStreamTransformFuture).executor(executor) - .serviceName(serviceName()).build(); + . builder().eventStreamResponseHandler(asyncResponseHandler) + .eventResponseHandler(eventResponseHandler).initialResponseHandler(responseHandler) + .exceptionResponseHandler(errorResponseHandler).future(eventStreamTransformFuture).executor(executor) + .serviceName(serviceName()).build(); RestEventStreamAsyncResponseTransformer restAsyncResponseTransformer = RestEventStreamAsyncResponseTransformer - . builder() - .eventStreamAsyncResponseTransformer(asyncResponseTransformer) - .eventStreamResponseHandler(asyncResponseHandler).build(); + . builder() + .eventStreamAsyncResponseTransformer(asyncResponseTransformer) + .eventStreamResponseHandler(asyncResponseHandler).build(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("EventStreamOperation").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), - restAsyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("EventStreamOperation").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new EventStreamOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withMetricCollector(apiCallMetricCollector).withInput(eventStreamOperationRequest), + restAsyncResponseTransformer); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(e)); + () -> asyncResponseHandler.exceptionOccurred(e)); eventStreamTransformFuture.completeExceptionally(e); } metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -376,7 +378,7 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest return CompletableFutureUtils.forwardExceptionTo(eventStreamTransformFuture, executeFuture); } catch (Throwable t) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> asyncResponseHandler.exceptionOccurred(t)); + () -> asyncResponseHandler.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -404,35 +406,35 @@ public CompletableFuture eventStreamOperation(EventStreamOperationRequest */ @Override public CompletableFuture getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withInput(getOperationWithChecksumRequest)); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withInput(getOperationWithChecksumRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -468,31 +470,31 @@ public CompletableFuture getOperationWithCheck */ @Override public CompletableFuture operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()).withInput(operationWithChecksumRequiredRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -527,29 +529,29 @@ public CompletableFuture operationWithChe */ @Override public CompletableFuture operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) - .withInput(operationWithNoneAuthTypeRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) + .withInput(operationWithNoneAuthTypeRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -585,32 +587,32 @@ public CompletableFuture operationWithNoneAut */ @Override public CompletableFuture operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withInput(operationWithRequestCompressionRequest)); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory)) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withInput(operationWithRequestCompressionRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -663,19 +665,19 @@ public CompletableFuture operationWithR */ @Override public CompletableFuture putOperationWithChecksum( - PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, - AsyncResponseTransformer asyncResponseTransformer) { + PutOperationWithChecksumRequest putOperationWithChecksumRequest, AsyncRequestBody requestBody, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); if (!isSignerOverridden(clientConfiguration)) { @@ -683,39 +685,40 @@ public CompletableFuture putOperationWithChecksum( } HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()).withAsyncRequestBody(requestBody) - .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withAsyncResponseTransformer(asyncResponseTransformer).withAsyncRequestBody(requestBody) + .withInput(putOperationWithChecksumRequest), asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -725,7 +728,7 @@ public CompletableFuture putOperationWithChecksum( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -758,13 +761,13 @@ public CompletableFuture putOperationWithChecksum( */ @Override public CompletableFuture streamingInputOperation( - StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { + StreamingInputOperationRequest streamingInputOperationRequest, AsyncRequestBody requestBody) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); @@ -773,20 +776,20 @@ public CompletableFuture streamingInputOperatio } HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(StreamingInputOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(StreamingInputOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); CompletableFuture executeFuture = clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withRequestConfiguration(clientConfiguration) - .withProtocolMetadata(protocolMetadata) - .withMarshaller( - AsyncStreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .asyncRequestBody(requestBody).build()).withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) - .withInput(streamingInputOperationRequest)); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withRequestConfiguration(clientConfiguration) + .withProtocolMetadata(protocolMetadata) + .withMarshaller( + AsyncStreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .asyncRequestBody(requestBody).build()).withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector).withAsyncRequestBody(requestBody) + .withInput(streamingInputOperationRequest)); CompletableFuture whenCompleteFuture = null; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -826,40 +829,41 @@ public CompletableFuture streamingInputOperatio */ @Override public CompletableFuture streamingOutputOperation( - StreamingOutputOperationRequest streamingOutputOperationRequest, - AsyncResponseTransformer asyncResponseTransformer) { + StreamingOutputOperationRequest streamingOutputOperationRequest, + AsyncResponseTransformer asyncResponseTransformer) { SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); Pair, CompletableFuture> pair = AsyncResponseTransformerUtils - .wrapWithEndOfStreamFuture(asyncResponseTransformer); + .wrapWithEndOfStreamFuture(asyncResponseTransformer); asyncResponseTransformer = pair.left(); CompletableFuture endOfStreamFuture = pair.right(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); CompletableFuture executeFuture = clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) - .withInput(streamingOutputOperationRequest), asyncResponseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withMetricCollector(apiCallMetricCollector) + .withAsyncResponseTransformer(asyncResponseTransformer).withInput(streamingOutputOperationRequest), + asyncResponseTransformer); CompletableFuture whenCompleteFuture = null; AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; whenCompleteFuture = executeFuture.whenComplete((r, e) -> { if (e != null) { runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(e)); + () -> finalAsyncResponseTransformer.exceptionOccurred(e)); } endOfStreamFuture.whenComplete((r2, e2) -> { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); @@ -869,7 +873,7 @@ public CompletableFuture streamingOutputOperation( } catch (Throwable t) { AsyncResponseTransformer finalAsyncResponseTransformer = asyncResponseTransformer; runAndLogError(log, "Exception thrown in exceptionOccurred callback, ignoring", - () -> finalAsyncResponseTransformer.exceptionOccurred(t)); + () -> finalAsyncResponseTransformer.exceptionOccurred(t)); metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); return CompletableFutureUtils.failedFuture(t); } @@ -887,15 +891,15 @@ public final String serviceName() { private AwsXmlProtocolFactory init() { return AwsXmlProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -915,8 +919,8 @@ private T applySignerOverride(T request, Signer signer) { } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java index 9bad15fd6e47..b5cb1ff332d5 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/client/test-xml-client-class.java @@ -42,6 +42,7 @@ import software.amazon.awssdk.protocols.xml.AwsXmlProtocolFactory; import software.amazon.awssdk.protocols.xml.XmlOperationMetadata; import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.services.xml.internal.ServiceVersionInfo; import software.amazon.awssdk.services.xml.internal.XmlServiceClientConfigurationBuilder; import software.amazon.awssdk.services.xml.model.APostOperationRequest; import software.amazon.awssdk.services.xml.model.APostOperationResponse; @@ -89,7 +90,7 @@ final class DefaultXmlClient implements XmlClient { private static final Logger log = Logger.loggerFor(DefaultXmlClient.class); private static final AwsProtocolMetadata protocolMetadata = AwsProtocolMetadata.builder() - .serviceProtocol(AwsServiceProtocol.REST_XML).build(); + .serviceProtocol(AwsServiceProtocol.REST_XML).build(); private final SyncClientHandler clientHandler; @@ -99,7 +100,8 @@ final class DefaultXmlClient implements XmlClient { protected DefaultXmlClient(SdkClientConfiguration clientConfiguration) { this.clientHandler = new AwsSyncClientHandler(clientConfiguration); - this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this).build(); + this.clientConfiguration = clientConfiguration.toBuilder().option(SdkClientOption.SDK_CLIENT, this) + .option(SdkClientOption.API_METADATA, "Xml_Service" + "#" + ServiceVersionInfo.VERSION).build(); this.protocolFactory = init(); } @@ -125,15 +127,15 @@ protected DefaultXmlClient(SdkClientConfiguration clientConfiguration) { */ @Override public APostOperationResponse aPostOperation(APostOperationRequest aPostOperationRequest) throws InvalidInputException, - AwsServiceException, SdkClientException, XmlException { + AwsServiceException, SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler( - APostOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + APostOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationRequest, this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperation"); @@ -141,10 +143,10 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio String resolvedHostExpression = "foo-"; return clientHandler.execute(new ClientExecutionParams() - .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) - .withInput(aPostOperationRequest).withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); + .withOperationName("APostOperation").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .hostPrefixExpression(resolvedHostExpression).withRequestConfiguration(clientConfiguration) + .withInput(aPostOperationRequest).withMarshaller(new APostOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -172,28 +174,28 @@ public APostOperationResponse aPostOperation(APostOperationRequest aPostOperatio */ @Override public APostOperationWithOutputResponse aPostOperationWithOutput( - APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, - SdkClientException, XmlException { + APostOperationWithOutputRequest aPostOperationWithOutputRequest) throws InvalidInputException, AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(APostOperationWithOutputResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(aPostOperationWithOutputRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, aPostOperationWithOutputRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "APostOperationWithOutput"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) - .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("APostOperationWithOutput").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration).withInput(aPostOperationWithOutputRequest) + .withMarshaller(new APostOperationWithOutputRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -217,28 +219,28 @@ public APostOperationWithOutputResponse aPostOperationWithOutput( */ @Override public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationRequest bearerAuthOperationRequest) - throws AwsServiceException, SdkClientException, XmlException { + throws AwsServiceException, SdkClientException, XmlException { bearerAuthOperationRequest = applySignerOverride(bearerAuthOperationRequest, BearerTokenSigner.create()); HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(BearerAuthOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(BearerAuthOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(bearerAuthOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, bearerAuthOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BearerAuthOperation"); return clientHandler.execute(new ClientExecutionParams() - .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) - .withInput(bearerAuthOperationRequest) - .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); + .withOperationName("BearerAuthOperation").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .credentialType(CredentialType.TOKEN).withRequestConfiguration(clientConfiguration) + .withInput(bearerAuthOperationRequest) + .withMarshaller(new BearerAuthOperationRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -262,36 +264,36 @@ public BearerAuthOperationResponse bearerAuthOperation(BearerAuthOperationReques */ @Override public GetOperationWithChecksumResponse getOperationWithChecksum( - GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, - XmlException { + GetOperationWithChecksumRequest getOperationWithChecksumRequest) throws AwsServiceException, SdkClientException, + XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(GetOperationWithChecksumResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(getOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, getOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("GetOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(getOperationWithChecksumRequest) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) - .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) - .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) - .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("GetOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(getOperationWithChecksumRequest) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum.builder().requestChecksumRequired(true).isRequestStreaming(false) + .requestAlgorithm(getOperationWithChecksumRequest.checksumAlgorithmAsString()) + .requestAlgorithmHeader("x-amz-sdk-checksum-algorithm").build()) + .withMarshaller(new GetOperationWithChecksumRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -315,33 +317,33 @@ public GetOperationWithChecksumResponse getOperationWithChecksum( */ @Override public OperationWithChecksumRequiredResponse operationWithChecksumRequired( - OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, - SdkClientException, XmlException { + OperationWithChecksumRequiredRequest operationWithChecksumRequiredRequest) throws AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithChecksumRequiredResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithChecksumRequiredRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); + operationWithChecksumRequiredRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithChecksumRequired"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithChecksumRequired") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithChecksumRequiredRequest) - .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, - HttpChecksumRequired.create()) - .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithChecksumRequired") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithChecksumRequiredRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, + HttpChecksumRequired.create()) + .withMarshaller(new OperationWithChecksumRequiredRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -365,29 +367,29 @@ public OperationWithChecksumRequiredResponse operationWithChecksumRequired( */ @Override public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( - OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, - XmlException { + OperationWithNoneAuthTypeRequest operationWithNoneAuthTypeRequest) throws AwsServiceException, SdkClientException, + XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithNoneAuthTypeResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithNoneAuthTypeRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, operationWithNoneAuthTypeRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithNoneAuthType"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) - .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) - .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithNoneAuthType").withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration).withInput(operationWithNoneAuthTypeRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.IS_NONE_AUTH_TYPE_REQUEST, false) + .withMarshaller(new OperationWithNoneAuthTypeRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -411,33 +413,33 @@ public OperationWithNoneAuthTypeResponse operationWithNoneAuthType( */ @Override public OperationWithRequestCompressionResponse operationWithRequestCompression( - OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, - SdkClientException, XmlException { + OperationWithRequestCompressionRequest operationWithRequestCompressionRequest) throws AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(OperationWithRequestCompressionResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(operationWithRequestCompressionRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, - operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); + operationWithRequestCompressionRequest.overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "OperationWithRequestCompression"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("OperationWithRequestCompression") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(operationWithRequestCompressionRequest) - .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, - RequestCompression.builder().encodings("gzip").isStreaming(false).build()) - .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); + .execute(new ClientExecutionParams() + .withOperationName("OperationWithRequestCompression") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(operationWithRequestCompressionRequest) + .putExecutionAttribute(SdkInternalExecutionAttribute.REQUEST_COMPRESSION, + RequestCompression.builder().encodings("gzip").isStreaming(false).build()) + .withMarshaller(new OperationWithRequestCompressionRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -487,47 +489,48 @@ public OperationWithRequestCompressionResponse operationWithRequestCompression( */ @Override public ReturnT putOperationWithChecksum(PutOperationWithChecksumRequest putOperationWithChecksumRequest, - RequestBody requestBody, ResponseTransformer responseTransformer) - throws AwsServiceException, SdkClientException, XmlException { + RequestBody requestBody, ResponseTransformer responseTransformer) + throws AwsServiceException, SdkClientException, XmlException { HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + PutOperationWithChecksumResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(putOperationWithChecksumRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, putOperationWithChecksumRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutOperationWithChecksum"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("PutOperationWithChecksum") - .withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler) - .withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration) - .withInput(putOperationWithChecksumRequest) - .withMetricCollector(apiCallMetricCollector) - .putExecutionAttribute( - SdkInternalExecutionAttribute.HTTP_CHECKSUM, - HttpChecksum - .builder() - .requestChecksumRequired(false) - .isRequestStreaming(true) - .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) - .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, - DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, - DefaultChecksumAlgorithm.SHA256).build()) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("PutOperationWithChecksum") + .withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler) + .withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration) + .withInput(putOperationWithChecksumRequest) + .withMetricCollector(apiCallMetricCollector) + .putExecutionAttribute( + SdkInternalExecutionAttribute.HTTP_CHECKSUM, + HttpChecksum + .builder() + .requestChecksumRequired(false) + .isRequestStreaming(true) + .requestValidationMode(putOperationWithChecksumRequest.checksumModeAsString()) + .responseAlgorithmsV2(DefaultChecksumAlgorithm.CRC32C, + DefaultChecksumAlgorithm.CRC32, DefaultChecksumAlgorithm.SHA1, + DefaultChecksumAlgorithm.SHA256).build()) + .withResponseTransformer(responseTransformer) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new PutOperationWithChecksumRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -562,34 +565,34 @@ public ReturnT putOperationWithChecksum(PutOperationWithChecksumReques */ @Override public StreamingInputOperationResponse streamingInputOperation(StreamingInputOperationRequest streamingInputOperationRequest, - RequestBody requestBody) throws AwsServiceException, SdkClientException, XmlException { + RequestBody requestBody) throws AwsServiceException, SdkClientException, XmlException { HttpResponseHandler> responseHandler = protocolFactory - .createCombinedResponseHandler(StreamingInputOperationResponse::builder, - new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); + .createCombinedResponseHandler(StreamingInputOperationResponse::builder, + new XmlOperationMetadata().withHasStreamingSuccessResponse(false)); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingInputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingInputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingInputOperation"); return clientHandler - .execute(new ClientExecutionParams() - .withOperationName("StreamingInputOperation") - .withProtocolMetadata(protocolMetadata) - .withCombinedResponseHandler(responseHandler) - .withMetricCollector(apiCallMetricCollector) - .withRequestConfiguration(clientConfiguration) - .withInput(streamingInputOperationRequest) - .withRequestBody(requestBody) - .withMarshaller( - StreamingRequestMarshaller.builder() - .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) - .requestBody(requestBody).build())); + .execute(new ClientExecutionParams() + .withOperationName("StreamingInputOperation") + .withProtocolMetadata(protocolMetadata) + .withCombinedResponseHandler(responseHandler) + .withMetricCollector(apiCallMetricCollector) + .withRequestConfiguration(clientConfiguration) + .withInput(streamingInputOperationRequest) + .withRequestBody(requestBody) + .withMarshaller( + StreamingRequestMarshaller.builder() + .delegateMarshaller(new StreamingInputOperationRequestMarshaller(protocolFactory)) + .requestBody(requestBody).build())); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -620,30 +623,30 @@ public StreamingInputOperationResponse streamingInputOperation(StreamingInputOpe */ @Override public ReturnT streamingOutputOperation(StreamingOutputOperationRequest streamingOutputOperationRequest, - ResponseTransformer responseTransformer) throws AwsServiceException, - SdkClientException, XmlException { + ResponseTransformer responseTransformer) throws AwsServiceException, + SdkClientException, XmlException { HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( - StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); + StreamingOutputOperationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true)); HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler(); SdkClientConfiguration clientConfiguration = updateSdkClientConfiguration(streamingOutputOperationRequest, - this.clientConfiguration); + this.clientConfiguration); List metricPublishers = resolveMetricPublishers(clientConfiguration, streamingOutputOperationRequest - .overrideConfiguration().orElse(null)); + .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector - .create("ApiCall"); + .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Xml Service"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "StreamingOutputOperation"); return clientHandler.execute( - new ClientExecutionParams() - .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) - .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) - .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) - .withMetricCollector(apiCallMetricCollector) - .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); + new ClientExecutionParams() + .withOperationName("StreamingOutputOperation").withProtocolMetadata(protocolMetadata) + .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) + .withRequestConfiguration(clientConfiguration).withInput(streamingOutputOperationRequest) + .withMetricCollector(apiCallMetricCollector).withResponseTransformer(responseTransformer) + .withMarshaller(new StreamingOutputOperationRequestMarshaller(protocolFactory)), responseTransformer); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } @@ -655,8 +658,8 @@ private T applySignerOverride(T request, Signer signer) { } Consumer signerOverride = b -> b.signer(signer).build(); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() - .map(c -> c.toBuilder().applyMutation(signerOverride).build()) - .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); + .map(c -> c.toBuilder().applyMutation(signerOverride).build()) + .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(signerOverride).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @@ -666,7 +669,7 @@ public final String serviceName() { } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, - RequestOverrideConfiguration requestOverrideConfiguration) { + RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); @@ -719,11 +722,11 @@ private SdkClientConfiguration updateSdkClientConfiguration(SdkRequest request, private AwsXmlProtocolFactory init() { return AwsXmlProtocolFactory - .builder() - .registerModeledException( - ExceptionMetadata.builder().errorCode("InvalidInput") - .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) - .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); + .builder() + .registerModeledException( + ExceptionMetadata.builder().errorCode("InvalidInput") + .exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build()) + .clientConfiguration(clientConfiguration).defaultServiceExceptionSupplier(XmlException::builder).build(); } @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java index 242803093986..5e3b7f4b4a8c 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesrequest.java @@ -16,6 +16,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.core.SdkField; @@ -1661,6 +1663,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, CopyableBuilder { /** * Sets the value of the StringMember property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java index c4ac38e31af6..1140d391e9fa 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesresponse.java @@ -16,6 +16,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -1660,6 +1662,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, CopyableBuilder { /** * Sets the value of the StringMember property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesunionstructure.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesunionstructure.java index 44c03d1c94ca..24da57f3ae45 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesunionstructure.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/alltypesunionstructure.java @@ -19,6 +19,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -2301,6 +2303,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the StringMember property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/basetype.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/basetype.java index 69dadbca3e85..ff6559aa321a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/basetype.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/basetype.java @@ -11,6 +11,8 @@ import java.util.function.BiConsumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -163,6 +165,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the BaseMember property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenamerequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenamerequest.java index 4db18e92828a..ba330a28a3f1 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenamerequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenamerequest.java @@ -11,6 +11,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -171,6 +173,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, CopyableBuilder { /** * Sets the value of the NewNameNoDeprecation property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenameresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenameresponse.java index e27647980743..73722408db39 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenameresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/deprecatedrenameresponse.java @@ -10,6 +10,8 @@ import java.util.function.BiConsumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -158,6 +160,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, CopyableBuilder { /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/emptymodeledexception.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/emptymodeledexception.java index 453a04251693..69afeae4d1e3 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/emptymodeledexception.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/emptymodeledexception.java @@ -4,6 +4,8 @@ import java.util.List; import java.util.Map; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.exception.AwsErrorDetails; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -52,6 +54,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder, JsonProtocolTestsException.Builder { @Override Builder awsErrorDetails(AwsErrorDetails awsErrorDetails); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventone.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventone.java index 48c7b5544789..1a19b3d7341b 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventone.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventone.java @@ -12,6 +12,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -148,6 +150,8 @@ public void accept(EventStreamOperationResponseHandler.Visitor visitor) { throw new UnsupportedOperationException(); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the Foo property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationrequest.java index 2adf77c42955..a54966a44bb2 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationrequest.java @@ -6,6 +6,8 @@ import java.util.Optional; import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -92,6 +94,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, CopyableBuilder { @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationresponse.java index ee19db4104f1..0dc74eb06af8 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationresponse.java @@ -5,6 +5,8 @@ import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.utils.ToString; @@ -90,6 +92,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, CopyableBuilder { } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationwithonlyinputrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationwithonlyinputrequest.java index dfff6b57d1ce..568f8c1dd48a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationwithonlyinputrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationwithonlyinputrequest.java @@ -6,6 +6,8 @@ import java.util.Optional; import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -92,6 +94,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, CopyableBuilder { @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationwithonlyinputresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationwithonlyinputresponse.java index 480c1476209b..2ace67990bb6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationwithonlyinputresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventstreamoperationwithonlyinputresponse.java @@ -5,6 +5,8 @@ import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.utils.ToString; @@ -88,6 +90,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, CopyableBuilder { } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventtwo.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventtwo.java index a75b587bb251..24dc77a00fd3 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventtwo.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/eventtwo.java @@ -12,6 +12,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -148,6 +150,8 @@ public void accept(EventStreamOperationResponseHandler.Visitor visitor) { throw new UnsupportedOperationException(); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the Bar property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinternalservererrorexception.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinternalservererrorexception.java index 04a1289aa734..355490f7b900 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinternalservererrorexception.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinternalservererrorexception.java @@ -4,6 +4,8 @@ import java.util.List; import java.util.Map; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.exception.AwsErrorDetails; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -60,6 +62,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder, JsonException.Builder { @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinvalidinputexception.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinvalidinputexception.java index 86a1b7271775..d0afd5a44822 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinvalidinputexception.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonserviceinvalidinputexception.java @@ -4,6 +4,8 @@ import java.util.List; import java.util.Map; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.exception.AwsErrorDetails; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -55,6 +57,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder, JsonException.Builder { @Override Builder awsErrorDetails(AwsErrorDetails awsErrorDetails); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonservicethrottlingexception.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonservicethrottlingexception.java index d298beb076a0..bec5900c28d8 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonservicethrottlingexception.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/exceptions/jsonservicethrottlingexception.java @@ -4,6 +4,8 @@ import java.util.List; import java.util.Map; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.exception.AwsErrorDetails; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -65,6 +67,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder, JsonException.Builder { @Override Builder awsErrorDetails(AwsErrorDetails awsErrorDetails); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingrequest.java index 530a844adeb4..65751af4c76d 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingrequest.java @@ -12,6 +12,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -320,6 +322,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, CopyableBuilder { /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingresponse.java index 19dc77405387..006154fc3e3b 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/existencechecknamingresponse.java @@ -11,6 +11,8 @@ import java.util.function.BiConsumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -318,6 +320,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, CopyableBuilder { /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputevent.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputevent.java index 167b63bf2865..175a8af52eee 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputevent.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputevent.java @@ -13,6 +13,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -145,6 +147,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the ExplicitPayloadMember property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventtwo.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventtwo.java index ce1c91f4e817..1c15d2559dae 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventtwo.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/inputeventtwo.java @@ -13,6 +13,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -190,6 +192,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the ImplicitPayloadMemberOne property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersrequest.java index ab2cf6014ee4..49dd02400bf8 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersrequest.java @@ -12,6 +12,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -336,6 +338,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, CopyableBuilder { /** * Sets the value of the ListOfListOfStrings property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersresponse.java index 58d3611d065b..9accde6e9eab 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedcontainersresponse.java @@ -11,6 +11,8 @@ import java.util.function.BiConsumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -334,6 +336,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, CopyableBuilder { /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedqueryparameteroperation.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedqueryparameteroperation.java index 7a0632cf41a2..c935db9281f0 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedqueryparameteroperation.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/nestedqueryparameteroperation.java @@ -11,6 +11,8 @@ import java.util.function.BiConsumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -160,6 +162,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the QueryParamOne property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberrequest.java index e92d7b949a35..67433478d1af 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberrequest.java @@ -11,6 +11,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -220,6 +222,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, CopyableBuilder { /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberresponse.java index e0513534e636..c0ac1a368945 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithdeprecatedmemberresponse.java @@ -10,6 +10,8 @@ import java.util.function.BiConsumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -188,6 +190,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, CopyableBuilder { /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithnoinputoroutputrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithnoinputoroutputrequest.java index 73f1490e3747..c900e1c8f276 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithnoinputoroutputrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithnoinputoroutputrequest.java @@ -6,6 +6,8 @@ import java.util.Optional; import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -90,6 +92,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, CopyableBuilder { @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithnoinputoroutputresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithnoinputoroutputresponse.java index a40840c0feba..7c62a482840a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithnoinputoroutputresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/operationwithnoinputoroutputresponse.java @@ -5,6 +5,8 @@ import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.utils.ToString; @@ -88,6 +90,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, CopyableBuilder { } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/queryparameteroperationrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/queryparameteroperationrequest.java index 7359ae356609..6e5452cb2e58 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/queryparameteroperationrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/queryparameteroperationrequest.java @@ -12,6 +12,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -352,6 +354,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, CopyableBuilder { /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/queryparameteroperationresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/queryparameteroperationresponse.java index 320327e0b254..14376b1d1c97 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/queryparameteroperationresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/queryparameteroperationresponse.java @@ -5,6 +5,8 @@ import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.utils.ToString; @@ -88,6 +90,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, CopyableBuilder { } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivestructtype.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivestructtype.java index 1678ad1ee11d..43f2e51bc5e7 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivestructtype.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/recursivestructtype.java @@ -15,6 +15,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -269,6 +271,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the NoRecurse property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/serviceclientconfiguration-withchecksum-builder.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/serviceclientconfiguration-withchecksum-builder.java index 3552d6abd9c3..f535c3d73c74 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/serviceclientconfiguration-withchecksum-builder.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/serviceclientconfiguration-withchecksum-builder.java @@ -18,6 +18,7 @@ import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeProvider; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.TokenIdentity; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.json.JsonServiceClientConfiguration; import software.amazon.awssdk.services.json.auth.scheme.JsonAuthSchemeProvider; @@ -167,6 +168,23 @@ public JsonAuthSchemeProvider authSchemeProvider() { + JsonAuthSchemeProvider.class.getSimpleName()); } + /** + * Sets the value for token provider + */ + @Override + public JsonServiceClientConfiguration.Builder tokenProvider(IdentityProvider tokenProvider) { + config.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER, tokenProvider); + return this; + } + + /** + * Gets the value for token provider + */ + @Override + public IdentityProvider tokenProvider() { + return config.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER); + } + /** * Sets the value for client behavior for response checksum validation */ diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonrequest.java index 684bef539e58..0d1ee02f28ed 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonrequest.java @@ -6,6 +6,8 @@ import java.util.Optional; import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -90,6 +92,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends SharedEventStreamRequest.Builder, SdkPojo, CopyableBuilder { @Override Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonresponse.java index 1342f9e0c370..f27008ef8524 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/getrandompersonresponse.java @@ -12,6 +12,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -158,6 +160,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends SharedEventStreamResponse.Builder, SdkPojo, CopyableBuilder { /** diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/person.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/person.java index 9ee0ccbce85e..9a994cecca29 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/person.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/person.java @@ -13,6 +13,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -180,6 +182,8 @@ public void accept(StreamDeathsResponseHandler.Visitor visitor) { throw new UnsupportedOperationException(); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the Name property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsrequest.java index 87140bca6f0a..1b3c5768d3dd 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsrequest.java @@ -6,6 +6,8 @@ import java.util.Optional; import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -90,6 +92,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends SharedEventStreamRequest.Builder, SdkPojo, CopyableBuilder { @Override Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsresponse.java index afce38dd74eb..c9bf1c735f7f 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streambirthsresponse.java @@ -5,6 +5,8 @@ import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.utils.ToString; @@ -90,6 +92,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends SharedEventStreamResponse.Builder, SdkPojo, CopyableBuilder { } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsrequest.java index 9152f80822dd..0900cc67c2d6 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsrequest.java @@ -6,6 +6,8 @@ import java.util.Optional; import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -90,6 +92,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends SharedEventStreamRequest.Builder, SdkPojo, CopyableBuilder { @Override Builder overrideConfiguration(AwsRequestOverrideConfiguration overrideConfiguration); diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsresponse.java index 934e2c889d69..7668ca7f3bdf 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/sharedstream/streamdeathsresponse.java @@ -5,6 +5,8 @@ import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.utils.ToString; @@ -90,6 +92,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends SharedEventStreamResponse.Builder, SdkPojo, CopyableBuilder { } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/simplestruct.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/simplestruct.java index 13900f4bfb54..01d8f6391b81 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/simplestruct.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/simplestruct.java @@ -11,6 +11,8 @@ import java.util.function.BiConsumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -131,6 +133,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the StringMember property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streaminginputoperationrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streaminginputoperationrequest.java index 35f4f0ad08d6..c6ab8b4fe930 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streaminginputoperationrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streaminginputoperationrequest.java @@ -6,6 +6,8 @@ import java.util.Optional; import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -92,6 +94,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, CopyableBuilder { @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streaminginputoperationresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streaminginputoperationresponse.java index d488bcfb5498..f116873bb689 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streaminginputoperationresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streaminginputoperationresponse.java @@ -5,6 +5,8 @@ import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.utils.ToString; @@ -88,6 +90,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, CopyableBuilder { } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streamingoutputoperationrequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streamingoutputoperationrequest.java index fca35f81081f..de41c746ba36 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streamingoutputoperationrequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streamingoutputoperationrequest.java @@ -6,6 +6,8 @@ import java.util.Optional; import java.util.function.Consumer; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -90,6 +92,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsRequest.Builder, SdkPojo, CopyableBuilder { @Override diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streamingoutputoperationresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streamingoutputoperationresponse.java index 3ed64ad12800..ef87cb7a7dc0 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streamingoutputoperationresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/streamingoutputoperationresponse.java @@ -5,6 +5,8 @@ import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.utils.ToString; @@ -90,6 +92,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends JsonProtocolTestsResponse.Builder, SdkPojo, CopyableBuilder { } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithnestedblobtype.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithnestedblobtype.java index 893e8d789493..976da6079645 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithnestedblobtype.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithnestedblobtype.java @@ -12,6 +12,8 @@ import java.util.function.BiConsumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -134,6 +136,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the NestedBlob property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithtimestamp.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithtimestamp.java index e804703c5080..ac3b4db2541f 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithtimestamp.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/structwithtimestamp.java @@ -12,6 +12,8 @@ import java.util.function.BiConsumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -134,6 +136,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the NestedTimestamp property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/subtypeone.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/subtypeone.java index 586339d5a9bd..775dff4a2cf7 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/subtypeone.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/subtypeone.java @@ -11,6 +11,8 @@ import java.util.function.BiConsumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -132,6 +134,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the SubTypeOneMember property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/underscore_name_type.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/underscore_name_type.java index c5b447ca8ee8..82eef558b842 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/underscore_name_type.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/underscore_name_type.java @@ -6,6 +6,8 @@ import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.utils.ToString; @@ -91,6 +93,8 @@ private static Map> memberNameToFieldInitializer() { return Collections.emptyMap(); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespacerequest.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespacerequest.java index 1aa020ae67d9..67174e469879 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespacerequest.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespacerequest.java @@ -11,6 +11,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; @@ -198,6 +200,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends ProtocolRestXmlRequest.Builder, SdkPojo, CopyableBuilder { /** * Sets the value of the StringMember property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespaceresponse.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespaceresponse.java index 24925471bc41..c4d82f056241 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespaceresponse.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/testxmlnamespaceresponse.java @@ -11,6 +11,8 @@ import java.util.function.Consumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -197,6 +199,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends ProtocolRestXmlResponse.Builder, SdkPojo, CopyableBuilder { /** * Sets the value of the StringMember property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/xmlnamespacemember.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/xmlnamespacemember.java index a65c53044429..9f313aa6124a 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/xmlnamespacemember.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/model/xmlnamespace/xmlnamespacemember.java @@ -11,6 +11,8 @@ import java.util.function.BiConsumer; import java.util.function.Function; import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.Mutable; +import software.amazon.awssdk.annotations.NotThreadSafe; import software.amazon.awssdk.core.SdkField; import software.amazon.awssdk.core.SdkPojo; import software.amazon.awssdk.core.protocol.MarshallLocation; @@ -162,6 +164,8 @@ private static BiConsumer setter(BiConsumer s) { return (obj, val) -> s.accept((Builder) obj, val); } + @Mutable + @NotThreadSafe public interface Builder extends SdkPojo, CopyableBuilder { /** * Sets the value of the Type property for this object. diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-class.java index 942a770b61a3..684823698760 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-class.java @@ -23,7 +23,9 @@ public final class DefaultQueryEndpointProvider implements QueryEndpointProvider public CompletableFuture resolveEndpoint(QueryEndpointParams params) { Validate.notNull(params.region(), "Parameter 'region' must not be null"); try { - RuleResult result = endpointRule0(params, new LocalState(params.region())); + Region region = params.region(); + String regionId = region == null ? null : region.id(); + RuleResult result = endpointRule0(params, regionId); if (result.canContinue()) { throw SdkClientException.create("Rule engine did not reach an error or endpoint result"); } @@ -40,170 +42,101 @@ public CompletableFuture resolveEndpoint(QueryEndpointParams params) { } } - private static RuleResult endpointRule0(QueryEndpointParams params, LocalState locals) { - return endpointRule1(params, locals); + private static RuleResult endpointRule0(QueryEndpointParams params, String region) { + return endpointRule1(params, region); } - private static RuleResult endpointRule1(QueryEndpointParams params, LocalState locals) { - RulePartition partitionResult = null; - if ((partitionResult = RulesFunctions.awsPartition(locals.region())) != null) { - locals = locals.toBuilder().partitionResult(partitionResult).build(); - RuleResult result = endpointRule2(params, locals); + private static RuleResult endpointRule1(QueryEndpointParams params, String region) { + RulePartition partitionResult = RulesFunctions.awsPartition(region); + if (partitionResult != null) { + RuleResult result = endpointRule2(params, partitionResult); if (result.isResolved()) { return result; } - result = endpointRule6(params, locals); + result = endpointRule6(params, region, partitionResult); if (result.isResolved()) { return result; } - result = endpointRule11(params, locals); - if (result.isResolved()) { - return result; + return RuleResult.error(region + " is not a valid HTTP host-label"); + if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint() + && params.arnList() != null) { + String firstArn = RulesFunctions.listAccess(params.arnList(), 0); + if (firstArn != null) { + RuleArn parsedArn = RulesFunctions.awsParseArn(firstArn); + if (parsedArn != null) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://" + params.endpointId() + ".query." + + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); + } + } } - return endpointRule12(params, locals); } return RuleResult.carryOn(); } - private static RuleResult endpointRule2(QueryEndpointParams params, LocalState locals) { + private static RuleResult endpointRule2(QueryEndpointParams params, RulePartition partitionResult) { if (params.endpointId() != null) { - RuleResult result = endpointRule3(params, locals); - if (result.isResolved()) { - return result; - } - result = endpointRule4(params, locals); - if (result.isResolved()) { - return result; - } - return endpointRule5(params, locals); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule3(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() != null && params.useFipsEndpoint()) { - return RuleResult.error("FIPS endpoints not supported with multi-region endpoints"); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule4(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint()) { - return RuleResult - .endpoint(Endpoint - .builder() - .url(URI.create("https://" + params.endpointId() + ".query." - + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query") - .signingRegionSet(Arrays.asList("*")).build())).build()); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule5(QueryEndpointParams params, LocalState locals) { - return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://" + params.endpointId() + ".query." + locals.partitionResult().dnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) - .build())).build()); - } - - private static RuleResult endpointRule6(QueryEndpointParams params, LocalState locals) { - if (RulesFunctions.isValidHostLabel(locals.region(), false)) { - RuleResult result = endpointRule7(params, locals); - if (result.isResolved()) { - return result; - } - result = endpointRule8(params, locals); - if (result.isResolved()) { - return result; + if (params.useFipsEndpoint() != null && params.useFipsEndpoint()) { + return RuleResult.error("FIPS endpoints not supported with multi-region endpoints"); } - result = endpointRule9(params, locals); - if (result.isResolved()) { - return result; + if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint()) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://" + params.endpointId() + ".query." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); } - return endpointRule10(params, locals); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule7(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() != null && params.useFipsEndpoint() && params.useDualStackEndpoint() == null) { return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://query-fips." + locals.region() + "." + locals.partitionResult().dnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) - .build())).build()); + .builder() + .url(URI.create("https://" + params.endpointId() + ".query." + partitionResult.dnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) + .build())).build()); } return RuleResult.carryOn(); } - private static RuleResult endpointRule8(QueryEndpointParams params, LocalState locals) { - if (params.useDualStackEndpoint() != null && params.useDualStackEndpoint() && params.useFipsEndpoint() == null) { - return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://query." + locals.region() + "." + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) - .build(), SigV4AuthScheme.builder().signingName("query").signingRegion(locals.region()) - .build())).build()); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule9(QueryEndpointParams params, LocalState locals) { - if (params.useDualStackEndpoint() != null && params.useFipsEndpoint() != null && params.useDualStackEndpoint() - && params.useFipsEndpoint()) { - return RuleResult - .endpoint(Endpoint - .builder() - .url(URI.create("https://query-fips." + locals.region() + "." - + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query") - .signingRegionSet(Arrays.asList("*")).build())).build()); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule10(QueryEndpointParams params, LocalState locals) { - return RuleResult.endpoint(Endpoint.builder() - .url(URI.create("https://query." + locals.region() + "." + locals.partitionResult().dnsSuffix())).build()); - } - - private static RuleResult endpointRule11(QueryEndpointParams params, LocalState locals) { - return RuleResult.error(locals.region() + " is not a valid HTTP host-label"); - } - - private static RuleResult endpointRule12(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint() - && params.arnList() != null) { - String firstArn = null; - RuleArn parsedArn = null; - if ((firstArn = RulesFunctions.listAccess(params.arnList(), 0)) != null) { - locals = locals.toBuilder().firstArn(firstArn).build(); - } else { - return RuleResult.carryOn(); + private static RuleResult endpointRule6(QueryEndpointParams params, String region, RulePartition partitionResult) { + if (RulesFunctions.isValidHostLabel(region, false)) { + if (params.useFipsEndpoint() != null && params.useFipsEndpoint() && params.useDualStackEndpoint() == null) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://query-fips." + region + "." + partitionResult.dnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); } - if ((parsedArn = RulesFunctions.awsParseArn(locals.firstArn())) != null) { - locals = locals.toBuilder().parsedArn(parsedArn).build(); + if (params.useDualStackEndpoint() != null && params.useDualStackEndpoint() && params.useFipsEndpoint() == null) { return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://" + params.endpointId() + ".query." - + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query") - .signingRegionSet(Arrays.asList("*")).build())).build()); + .builder() + .url(URI.create("https://query." + region + "." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build(), + SigV4AuthScheme.builder().signingName("query").signingRegion(region).build())).build()); } + if (params.useDualStackEndpoint() != null && params.useFipsEndpoint() != null && params.useDualStackEndpoint() + && params.useFipsEndpoint()) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://query-fips." + region + "." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); + } + return RuleResult.endpoint(Endpoint.builder() + .url(URI.create("https://query." + region + "." + partitionResult.dnsSuffix())).build()); } return RuleResult.carryOn(); } @@ -217,107 +150,4 @@ public boolean equals(Object rhs) { public int hashCode() { return getClass().hashCode(); } - - private static final class LocalState { - private final String region; - - private final RulePartition partitionResult; - - private final String firstArn; - - private final RuleArn parsedArn; - - LocalState() { - this.region = null; - this.partitionResult = null; - this.firstArn = null; - this.parsedArn = null; - } - - LocalState(Region region) { - if (region != null) { - this.region = region.id(); - } else { - this.region = null; - } - this.partitionResult = null; - this.firstArn = null; - this.parsedArn = null; - } - - LocalState(LocalStateBuilder builder) { - this.region = builder.region; - this.partitionResult = builder.partitionResult; - this.firstArn = builder.firstArn; - this.parsedArn = builder.parsedArn; - } - - public String region() { - return this.region; - } - - public RulePartition partitionResult() { - return this.partitionResult; - } - - public String firstArn() { - return this.firstArn; - } - - public RuleArn parsedArn() { - return this.parsedArn; - } - - public LocalStateBuilder toBuilder() { - return new LocalStateBuilder(this); - } - } - - private static final class LocalStateBuilder { - private String region; - - private RulePartition partitionResult; - - private String firstArn; - - private RuleArn parsedArn; - - LocalStateBuilder() { - this.region = null; - this.partitionResult = null; - this.firstArn = null; - this.parsedArn = null; - } - - LocalStateBuilder(LocalState locals) { - this.region = locals.region; - this.partitionResult = locals.partitionResult; - this.firstArn = locals.firstArn; - this.parsedArn = locals.parsedArn; - } - - public LocalStateBuilder region(String value) { - this.region = value; - return this; - } - - public LocalStateBuilder partitionResult(RulePartition value) { - this.partitionResult = value; - return this; - } - - public LocalStateBuilder firstArn(String value) { - this.firstArn = value; - return this; - } - - public LocalStateBuilder parsedArn(RuleArn value) { - this.parsedArn = value; - return this; - } - - LocalState build() { - return new LocalState(this); - } - } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-know-prop-override-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-know-prop-override-class.java index 942a770b61a3..684823698760 100644 --- a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-know-prop-override-class.java +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-know-prop-override-class.java @@ -23,7 +23,9 @@ public final class DefaultQueryEndpointProvider implements QueryEndpointProvider public CompletableFuture resolveEndpoint(QueryEndpointParams params) { Validate.notNull(params.region(), "Parameter 'region' must not be null"); try { - RuleResult result = endpointRule0(params, new LocalState(params.region())); + Region region = params.region(); + String regionId = region == null ? null : region.id(); + RuleResult result = endpointRule0(params, regionId); if (result.canContinue()) { throw SdkClientException.create("Rule engine did not reach an error or endpoint result"); } @@ -40,170 +42,101 @@ public CompletableFuture resolveEndpoint(QueryEndpointParams params) { } } - private static RuleResult endpointRule0(QueryEndpointParams params, LocalState locals) { - return endpointRule1(params, locals); + private static RuleResult endpointRule0(QueryEndpointParams params, String region) { + return endpointRule1(params, region); } - private static RuleResult endpointRule1(QueryEndpointParams params, LocalState locals) { - RulePartition partitionResult = null; - if ((partitionResult = RulesFunctions.awsPartition(locals.region())) != null) { - locals = locals.toBuilder().partitionResult(partitionResult).build(); - RuleResult result = endpointRule2(params, locals); + private static RuleResult endpointRule1(QueryEndpointParams params, String region) { + RulePartition partitionResult = RulesFunctions.awsPartition(region); + if (partitionResult != null) { + RuleResult result = endpointRule2(params, partitionResult); if (result.isResolved()) { return result; } - result = endpointRule6(params, locals); + result = endpointRule6(params, region, partitionResult); if (result.isResolved()) { return result; } - result = endpointRule11(params, locals); - if (result.isResolved()) { - return result; + return RuleResult.error(region + " is not a valid HTTP host-label"); + if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint() + && params.arnList() != null) { + String firstArn = RulesFunctions.listAccess(params.arnList(), 0); + if (firstArn != null) { + RuleArn parsedArn = RulesFunctions.awsParseArn(firstArn); + if (parsedArn != null) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://" + params.endpointId() + ".query." + + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); + } + } } - return endpointRule12(params, locals); } return RuleResult.carryOn(); } - private static RuleResult endpointRule2(QueryEndpointParams params, LocalState locals) { + private static RuleResult endpointRule2(QueryEndpointParams params, RulePartition partitionResult) { if (params.endpointId() != null) { - RuleResult result = endpointRule3(params, locals); - if (result.isResolved()) { - return result; - } - result = endpointRule4(params, locals); - if (result.isResolved()) { - return result; - } - return endpointRule5(params, locals); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule3(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() != null && params.useFipsEndpoint()) { - return RuleResult.error("FIPS endpoints not supported with multi-region endpoints"); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule4(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint()) { - return RuleResult - .endpoint(Endpoint - .builder() - .url(URI.create("https://" + params.endpointId() + ".query." - + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query") - .signingRegionSet(Arrays.asList("*")).build())).build()); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule5(QueryEndpointParams params, LocalState locals) { - return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://" + params.endpointId() + ".query." + locals.partitionResult().dnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) - .build())).build()); - } - - private static RuleResult endpointRule6(QueryEndpointParams params, LocalState locals) { - if (RulesFunctions.isValidHostLabel(locals.region(), false)) { - RuleResult result = endpointRule7(params, locals); - if (result.isResolved()) { - return result; - } - result = endpointRule8(params, locals); - if (result.isResolved()) { - return result; + if (params.useFipsEndpoint() != null && params.useFipsEndpoint()) { + return RuleResult.error("FIPS endpoints not supported with multi-region endpoints"); } - result = endpointRule9(params, locals); - if (result.isResolved()) { - return result; + if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint()) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://" + params.endpointId() + ".query." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); } - return endpointRule10(params, locals); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule7(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() != null && params.useFipsEndpoint() && params.useDualStackEndpoint() == null) { return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://query-fips." + locals.region() + "." + locals.partitionResult().dnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) - .build())).build()); + .builder() + .url(URI.create("https://" + params.endpointId() + ".query." + partitionResult.dnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) + .build())).build()); } return RuleResult.carryOn(); } - private static RuleResult endpointRule8(QueryEndpointParams params, LocalState locals) { - if (params.useDualStackEndpoint() != null && params.useDualStackEndpoint() && params.useFipsEndpoint() == null) { - return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://query." + locals.region() + "." + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) - .build(), SigV4AuthScheme.builder().signingName("query").signingRegion(locals.region()) - .build())).build()); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule9(QueryEndpointParams params, LocalState locals) { - if (params.useDualStackEndpoint() != null && params.useFipsEndpoint() != null && params.useDualStackEndpoint() - && params.useFipsEndpoint()) { - return RuleResult - .endpoint(Endpoint - .builder() - .url(URI.create("https://query-fips." + locals.region() + "." - + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query") - .signingRegionSet(Arrays.asList("*")).build())).build()); - } - return RuleResult.carryOn(); - } - - private static RuleResult endpointRule10(QueryEndpointParams params, LocalState locals) { - return RuleResult.endpoint(Endpoint.builder() - .url(URI.create("https://query." + locals.region() + "." + locals.partitionResult().dnsSuffix())).build()); - } - - private static RuleResult endpointRule11(QueryEndpointParams params, LocalState locals) { - return RuleResult.error(locals.region() + " is not a valid HTTP host-label"); - } - - private static RuleResult endpointRule12(QueryEndpointParams params, LocalState locals) { - if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint() - && params.arnList() != null) { - String firstArn = null; - RuleArn parsedArn = null; - if ((firstArn = RulesFunctions.listAccess(params.arnList(), 0)) != null) { - locals = locals.toBuilder().firstArn(firstArn).build(); - } else { - return RuleResult.carryOn(); + private static RuleResult endpointRule6(QueryEndpointParams params, String region, RulePartition partitionResult) { + if (RulesFunctions.isValidHostLabel(region, false)) { + if (params.useFipsEndpoint() != null && params.useFipsEndpoint() && params.useDualStackEndpoint() == null) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://query-fips." + region + "." + partitionResult.dnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); } - if ((parsedArn = RulesFunctions.awsParseArn(locals.firstArn())) != null) { - locals = locals.toBuilder().parsedArn(parsedArn).build(); + if (params.useDualStackEndpoint() != null && params.useDualStackEndpoint() && params.useFipsEndpoint() == null) { return RuleResult.endpoint(Endpoint - .builder() - .url(URI.create("https://" + params.endpointId() + ".query." - + locals.partitionResult().dualStackDnsSuffix())) - .putAttribute( - AwsEndpointAttribute.AUTH_SCHEMES, - Arrays.asList(SigV4aAuthScheme.builder().signingName("query") - .signingRegionSet(Arrays.asList("*")).build())).build()); + .builder() + .url(URI.create("https://query." + region + "." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build(), + SigV4AuthScheme.builder().signingName("query").signingRegion(region).build())).build()); } + if (params.useDualStackEndpoint() != null && params.useFipsEndpoint() != null && params.useDualStackEndpoint() + && params.useFipsEndpoint()) { + return RuleResult.endpoint(Endpoint + .builder() + .url(URI.create("https://query-fips." + region + "." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); + } + return RuleResult.endpoint(Endpoint.builder() + .url(URI.create("https://query." + region + "." + partitionResult.dnsSuffix())).build()); } return RuleResult.carryOn(); } @@ -217,107 +150,4 @@ public boolean equals(Object rhs) { public int hashCode() { return getClass().hashCode(); } - - private static final class LocalState { - private final String region; - - private final RulePartition partitionResult; - - private final String firstArn; - - private final RuleArn parsedArn; - - LocalState() { - this.region = null; - this.partitionResult = null; - this.firstArn = null; - this.parsedArn = null; - } - - LocalState(Region region) { - if (region != null) { - this.region = region.id(); - } else { - this.region = null; - } - this.partitionResult = null; - this.firstArn = null; - this.parsedArn = null; - } - - LocalState(LocalStateBuilder builder) { - this.region = builder.region; - this.partitionResult = builder.partitionResult; - this.firstArn = builder.firstArn; - this.parsedArn = builder.parsedArn; - } - - public String region() { - return this.region; - } - - public RulePartition partitionResult() { - return this.partitionResult; - } - - public String firstArn() { - return this.firstArn; - } - - public RuleArn parsedArn() { - return this.parsedArn; - } - - public LocalStateBuilder toBuilder() { - return new LocalStateBuilder(this); - } - } - - private static final class LocalStateBuilder { - private String region; - - private RulePartition partitionResult; - - private String firstArn; - - private RuleArn parsedArn; - - LocalStateBuilder() { - this.region = null; - this.partitionResult = null; - this.firstArn = null; - this.parsedArn = null; - } - - LocalStateBuilder(LocalState locals) { - this.region = locals.region; - this.partitionResult = locals.partitionResult; - this.firstArn = locals.firstArn; - this.parsedArn = locals.parsedArn; - } - - public LocalStateBuilder region(String value) { - this.region = value; - return this; - } - - public LocalStateBuilder partitionResult(RulePartition value) { - this.partitionResult = value; - return this; - } - - public LocalStateBuilder firstArn(String value) { - this.firstArn = value; - return this; - } - - public LocalStateBuilder parsedArn(RuleArn value) { - this.parsedArn = value; - return this; - } - - LocalState build() { - return new LocalState(this); - } - } } diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-unknown-property-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-unknown-property-class.java new file mode 100644 index 000000000000..18f0c5cb6e42 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-unknown-property-class.java @@ -0,0 +1,60 @@ +package software.amazon.awssdk.services.query.endpoints.internal; + +import java.net.URI; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.endpoints.Endpoint; +import software.amazon.awssdk.services.query.endpoints.QueryEndpointParams; +import software.amazon.awssdk.services.query.endpoints.QueryEndpointProvider; +import software.amazon.awssdk.utils.CompletableFutureUtils; + +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +public final class DefaultQueryEndpointProvider implements QueryEndpointProvider { + @Override + public CompletableFuture resolveEndpoint(QueryEndpointParams params) { + try { + RuleResult result = endpointRule0(params); + if (result.canContinue()) { + throw SdkClientException.create("Rule engine did not reach an error or endpoint result"); + } + if (result.isError()) { + String errorMsg = result.error(); + if (errorMsg.contains("Invalid ARN") && errorMsg.contains(":s3:::")) { + errorMsg += ". Use the bucket name instead of simple bucket ARNs in GetBucketLocationRequest."; + } + throw SdkClientException.create(errorMsg); + } + return CompletableFuture.completedFuture(result.endpoint()); + } catch (Exception error) { + return CompletableFutureUtils.failedFuture(error); + } + } + + private static RuleResult endpointRule0(QueryEndpointParams params) { + RuleResult result = endpointRule1(params); + if (result.isResolved()) { + return result; + } + return RuleResult.error("Invalid Configuration: Missing Endpoint"); + } + + private static RuleResult endpointRule1(QueryEndpointParams params) { + if (params.endpoint() != null) { + return RuleResult.endpoint(Endpoint.builder().url(URI.create(params.endpoint())).build()); + } + return RuleResult.carryOn(); + } + + @Override + public boolean equals(Object rhs) { + return rhs != null && getClass().equals(rhs.getClass()); + } + + @Override + public int hashCode() { + return getClass().hashCode(); + } +} diff --git a/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-uri-cache-class.java b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-uri-cache-class.java new file mode 100644 index 000000000000..53eb66b3c147 --- /dev/null +++ b/codegen/src/test/resources/software/amazon/awssdk/codegen/poet/rules2/endpoint-provider-uri-cache-class.java @@ -0,0 +1,155 @@ +package software.amazon.awssdk.services.query.endpoints.internal; + +import java.util.Arrays; +import java.util.concurrent.CompletableFuture; +import software.amazon.awssdk.annotations.Generated; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.awscore.endpoints.AwsEndpointAttribute; +import software.amazon.awssdk.awscore.endpoints.authscheme.SigV4AuthScheme; +import software.amazon.awssdk.awscore.endpoints.authscheme.SigV4aAuthScheme; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.endpoints.Endpoint; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.query.endpoints.QueryEndpointParams; +import software.amazon.awssdk.services.query.endpoints.QueryEndpointProvider; +import software.amazon.awssdk.utils.CompletableFutureUtils; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.uri.SdkUri; + +@Generated("software.amazon.awssdk:codegen") +@SdkInternalApi +public final class DefaultQueryEndpointProvider implements QueryEndpointProvider { + @Override + public CompletableFuture resolveEndpoint(QueryEndpointParams params) { + Validate.notNull(params.region(), "Parameter 'region' must not be null"); + try { + Region region = params.region(); + String regionId = region == null ? null : region.id(); + RuleResult result = endpointRule0(params, regionId); + if (result.canContinue()) { + throw SdkClientException.create("Rule engine did not reach an error or endpoint result"); + } + if (result.isError()) { + String errorMsg = result.error(); + if (errorMsg.contains("Invalid ARN") && errorMsg.contains(":s3:::")) { + errorMsg += ". Use the bucket name instead of simple bucket ARNs in GetBucketLocationRequest."; + } + throw SdkClientException.create(errorMsg); + } + return CompletableFuture.completedFuture(result.endpoint()); + } catch (Exception error) { + return CompletableFutureUtils.failedFuture(error); + } + } + + private static RuleResult endpointRule0(QueryEndpointParams params, String region) { + return endpointRule1(params, region); + } + + private static RuleResult endpointRule1(QueryEndpointParams params, String region) { + RulePartition partitionResult = RulesFunctions.awsPartition(region); + if (partitionResult != null) { + RuleResult result = endpointRule2(params, partitionResult); + if (result.isResolved()) { + return result; + } + result = endpointRule6(params, region, partitionResult); + if (result.isResolved()) { + return result; + } + return RuleResult.error(region + " is not a valid HTTP host-label"); + if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint() + && params.arnList() != null) { + String firstArn = RulesFunctions.listAccess(params.arnList(), 0); + if (firstArn != null) { + RuleArn parsedArn = RulesFunctions.awsParseArn(firstArn); + if (parsedArn != null) { + return RuleResult.endpoint(Endpoint + .builder() + .url(SdkUri.getInstance().create( + "https://" + params.endpointId() + ".query." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); + } + } + } + } + return RuleResult.carryOn(); + } + + private static RuleResult endpointRule2(QueryEndpointParams params, RulePartition partitionResult) { + if (params.endpointId() != null) { + if (params.useFipsEndpoint() != null && params.useFipsEndpoint()) { + return RuleResult.error("FIPS endpoints not supported with multi-region endpoints"); + } + if (params.useFipsEndpoint() == null && params.useDualStackEndpoint() != null && params.useDualStackEndpoint()) { + return RuleResult.endpoint(Endpoint + .builder() + .url(SdkUri.getInstance().create( + "https://" + params.endpointId() + ".query." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); + } + return RuleResult.endpoint(Endpoint + .builder() + .url(SdkUri.getInstance().create("https://" + params.endpointId() + ".query." + partitionResult.dnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query").signingRegionSet(Arrays.asList("*")) + .build())).build()); + } + return RuleResult.carryOn(); + } + + private static RuleResult endpointRule6(QueryEndpointParams params, String region, RulePartition partitionResult) { + if (RulesFunctions.isValidHostLabel(region, false)) { + if (params.useFipsEndpoint() != null && params.useFipsEndpoint() && params.useDualStackEndpoint() == null) { + return RuleResult.endpoint(Endpoint + .builder() + .url(SdkUri.getInstance().create("https://query-fips." + region + "." + partitionResult.dnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); + } + if (params.useDualStackEndpoint() != null && params.useDualStackEndpoint() && params.useFipsEndpoint() == null) { + return RuleResult.endpoint(Endpoint + .builder() + .url(SdkUri.getInstance().create("https://query." + region + "." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build(), + SigV4AuthScheme.builder().signingName("query").signingRegion(region).build())).build()); + } + if (params.useDualStackEndpoint() != null && params.useFipsEndpoint() != null && params.useDualStackEndpoint() + && params.useFipsEndpoint()) { + return RuleResult.endpoint(Endpoint + .builder() + .url(SdkUri.getInstance().create( + "https://query-fips." + region + "." + partitionResult.dualStackDnsSuffix())) + .putAttribute( + AwsEndpointAttribute.AUTH_SCHEMES, + Arrays.asList(SigV4aAuthScheme.builder().signingName("query") + .signingRegionSet(Arrays.asList("*")).build())).build()); + } + return RuleResult.endpoint(Endpoint.builder() + .url(SdkUri.getInstance().create("https://query." + region + "." + partitionResult.dnsSuffix())).build()); + } + return RuleResult.carryOn(); + } + + @Override + public boolean equals(Object rhs) { + return rhs != null && getClass().equals(rhs.getClass()); + } + + @Override + public int hashCode() { + return getClass().hashCode(); + } +} diff --git a/core/annotations/pom.xml b/core/annotations/pom.xml index 68448807d0b9..73be6389470d 100644 --- a/core/annotations/pom.xml +++ b/core/annotations/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/arns/pom.xml b/core/arns/pom.xml index 7e8003db98ba..7a59d4d5a035 100644 --- a/core/arns/pom.xml +++ b/core/arns/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/arns/src/main/java/software/amazon/awssdk/arns/Arn.java b/core/arns/src/main/java/software/amazon/awssdk/arns/Arn.java index 18ac2c8f49f4..dada19e8557e 100644 --- a/core/arns/src/main/java/software/amazon/awssdk/arns/Arn.java +++ b/core/arns/src/main/java/software/amazon/awssdk/arns/Arn.java @@ -138,6 +138,23 @@ public static Builder builder() { return new DefaultBuilder(); } + /** + * Attempts to parse the given string into an {@link Arn}. If the input string is not a valid ARN, + * this method returns {@link Optional#empty()} instead of throwing an exception. + *

    + * When successful, the resource is accessible entirely as a string through + * {@link #resourceAsString()}. Where correctly formatted, a parsed resource + * containing resource type, resource and qualifier is available through + * {@link #resource()}. + * + * @param arn A string containing an ARN to parse. + * @return An {@link Optional} containing the parsed {@link Arn} if valid, or empty if invalid. + * @throws IllegalArgumentException if the ARN contains empty partition or service fields + */ + public static Optional tryFromString(String arn) { + return parseArn(arn, false); + } + /** * Parses a given string into an {@link Arn}. The resource is accessible entirely as a * string through {@link #resourceAsString()}. Where correctly formatted, a parsed @@ -148,47 +165,75 @@ public static Builder builder() { * @return {@link Arn} - A modeled Arn. */ public static Arn fromString(String arn) { + return parseArn(arn, true).orElseThrow(() -> new IllegalArgumentException("ARN parsing failed")); + } + + private static Optional parseArn(String arn, boolean throwOnError) { + if (arn == null) { + return Optional.empty(); + } + int arnColonIndex = arn.indexOf(':'); if (arnColonIndex < 0 || !"arn".equals(arn.substring(0, arnColonIndex))) { - throw new IllegalArgumentException("Malformed ARN - doesn't start with 'arn:'"); + if (throwOnError) { + throw new IllegalArgumentException("Malformed ARN - doesn't start with 'arn:'"); + } + return Optional.empty(); } int partitionColonIndex = arn.indexOf(':', arnColonIndex + 1); if (partitionColonIndex < 0) { - throw new IllegalArgumentException("Malformed ARN - no AWS partition specified"); + if (throwOnError) { + throw new IllegalArgumentException("Malformed ARN - no AWS partition specified"); + } + return Optional.empty(); } String partition = arn.substring(arnColonIndex + 1, partitionColonIndex); int serviceColonIndex = arn.indexOf(':', partitionColonIndex + 1); if (serviceColonIndex < 0) { - throw new IllegalArgumentException("Malformed ARN - no service specified"); + if (throwOnError) { + throw new IllegalArgumentException("Malformed ARN - no service specified"); + } + return Optional.empty(); } String service = arn.substring(partitionColonIndex + 1, serviceColonIndex); int regionColonIndex = arn.indexOf(':', serviceColonIndex + 1); if (regionColonIndex < 0) { - throw new IllegalArgumentException("Malformed ARN - no AWS region partition specified"); + if (throwOnError) { + throw new IllegalArgumentException("Malformed ARN - no AWS region partition specified"); + } + return Optional.empty(); } String region = arn.substring(serviceColonIndex + 1, regionColonIndex); int accountColonIndex = arn.indexOf(':', regionColonIndex + 1); if (accountColonIndex < 0) { - throw new IllegalArgumentException("Malformed ARN - no AWS account specified"); + if (throwOnError) { + throw new IllegalArgumentException("Malformed ARN - no AWS account specified"); + } + return Optional.empty(); } String accountId = arn.substring(regionColonIndex + 1, accountColonIndex); String resource = arn.substring(accountColonIndex + 1); if (resource.isEmpty()) { - throw new IllegalArgumentException("Malformed ARN - no resource specified"); + if (throwOnError) { + throw new IllegalArgumentException("Malformed ARN - no resource specified"); + } + return Optional.empty(); } - return Arn.builder() - .partition(partition) - .service(service) - .region(region) - .accountId(accountId) - .resource(resource) - .build(); + Arn resultArn = builder() + .partition(partition) + .service(service) + .region(region) + .accountId(accountId) + .resource(resource) + .build(); + + return Optional.of(resultArn); } @Override diff --git a/core/arns/src/test/java/software/amazon/awssdk/arns/ArnTest.java b/core/arns/src/test/java/software/amazon/awssdk/arns/ArnTest.java index 0e4bcfd68f0b..e95903e03cb9 100644 --- a/core/arns/src/test/java/software/amazon/awssdk/arns/ArnTest.java +++ b/core/arns/src/test/java/software/amazon/awssdk/arns/ArnTest.java @@ -17,8 +17,14 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assertions.assertThrows; +import java.util.Optional; +import java.util.stream.Stream; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; public class ArnTest { @@ -311,4 +317,78 @@ public void invalidArnWithoutAccountId_ThrowsIllegalArgumentException() { String arnString = "arn:aws:s3:us-east-1:"; assertThatThrownBy(() -> Arn.fromString(arnString)).hasMessageContaining("Malformed ARN"); } + + private static Stream validArnTestCases() { + return Stream.of( + Arguments.of("Basic resource", "arn:aws:s3:us-east-1:12345678910:myresource"), + Arguments.of("Minimal requirements", "arn:aws:foobar:::myresource"), + Arguments.of("Qualified resource", "arn:aws:s3:us-east-1:12345678910:myresource:foobar:1"), + Arguments.of("Minimal resources", "arn:aws:s3:::bucket"), + Arguments.of("Without region", "arn:aws:iam::123456789012:root"), + Arguments.of("Resource type and resource", "arn:aws:s3:us-east-1:12345678910:bucket:foobar"), + Arguments.of("Resource type And resource and qualifier", + "arn:aws:s3:us-east-1:12345678910:bucket:foobar:1"), + Arguments.of("Resource type And resource with slash", "arn:aws:s3:us-east-1:12345678910:bucket/foobar"), + Arguments.of("Resource type and resource and qualifier slash", + "arn:aws:s3:us-east-1:12345678910:bucket/foobar/1"), + Arguments.of("Without region", "arn:aws:s3::123456789012:myresource"), + Arguments.of("Without accountId", "arn:aws:s3:us-east-1::myresource"), + Arguments.of("Resource with dots", "arn:aws:s3:us-east-1:12345678910:myresource:foobar.1") + ); + } + + private static Stream invalidArnTestCases() { + return Stream.of( + Arguments.of("Without resource", "arn:aws:s3:us-east-1:12345678910:"), + Arguments.of("Invalid arn", "arn:aws:"), + Arguments.of("Doesn't start with arn", "fakearn:aws:"), + Arguments.of("Invalid without partition", "arn:"), + Arguments.of("Invalid without service", "arn:aws:"), + Arguments.of("Invalid without region", "arn:aws:s3:"), + Arguments.of("Invalid without accountId", "arn:aws:s3:us-east-1:"), + Arguments.of("Null Arn", null) + ); + } + + private static Stream exceptionThrowingArnTestCases() { + return Stream.of( + Arguments.of("Valid without partition", "arn::s3:us-east-1:12345678910:myresource"), + Arguments.of("Valid without service", "arn:aws::us-east-1:12345678910:myresource") + ); + } + + @ParameterizedTest(name = "{0}") + @MethodSource("validArnTestCases") + public void optionalArnFromString_ValidArns_ReturnsPopulatedOptional(String testName, String arnString) { + Optional optionalArn = Arn.tryFromString(arnString); + + assertThat(optionalArn).isPresent(); + + Arn expectedArn = Arn.fromString(arnString); + Arn actualArn = optionalArn.get(); + + assertThat(actualArn.partition()).isEqualTo(expectedArn.partition()); + assertThat(actualArn.service()).isEqualTo(expectedArn.service()); + assertThat(actualArn.region()).isEqualTo(expectedArn.region()); + assertThat(actualArn.accountId()).isEqualTo(expectedArn.accountId()); + assertThat(actualArn.resourceAsString()).isEqualTo(expectedArn.resourceAsString()); + + assertThat(actualArn.toString()).isEqualTo(arnString); + } + + @ParameterizedTest(name = "{0}") + @MethodSource("invalidArnTestCases") + public void optionalArnFromString_InvalidArns_ReturnsEmptyOptional(String testName, String arnString) { + Optional optionalArn = Arn.tryFromString(arnString); + assertThat(optionalArn).isEmpty(); + } + + @ParameterizedTest(name = "{0}") + @MethodSource("exceptionThrowingArnTestCases") + public void tryFromString_InvalidArns_ShouldThrowExceptions(String testName, String arnString) { + assertThrows(IllegalArgumentException.class, () -> { + Arn.tryFromString(arnString); + }); + } + } diff --git a/core/auth-crt/pom.xml b/core/auth-crt/pom.xml index d3768bea7fc6..373b9b51fb04 100644 --- a/core/auth-crt/pom.xml +++ b/core/auth-crt/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT auth-crt diff --git a/core/auth-crt/src/main/java/software/amazon/awssdk/authcrt/signer/internal/CrtHttpRequestConverter.java b/core/auth-crt/src/main/java/software/amazon/awssdk/authcrt/signer/internal/CrtHttpRequestConverter.java index 89f3541348cd..2047e88f5bad 100644 --- a/core/auth-crt/src/main/java/software/amazon/awssdk/authcrt/signer/internal/CrtHttpRequestConverter.java +++ b/core/auth-crt/src/main/java/software/amazon/awssdk/authcrt/signer/internal/CrtHttpRequestConverter.java @@ -36,6 +36,7 @@ import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.utils.StringUtils; import software.amazon.awssdk.utils.http.SdkHttpUtils; +import software.amazon.awssdk.utils.uri.SdkUri; @SdkInternalApi public final class CrtHttpRequestConverter { @@ -77,7 +78,7 @@ public SdkHttpFullRequest crtRequestToHttp(SdkHttpFullRequest inputRequest, Http String portString = SdkHttpUtils.isUsingStandardPort(builder.protocol(), builder.port()) ? "" : ":" + builder.port(); String encodedPath = encodedPathFromCrtFormat(inputRequest.encodedPath(), signedCrtRequest.getEncodedPath()); String fullUriString = builder.protocol() + "://" + builder.host() + portString + encodedPath; - fullUri = new URI(fullUriString); + fullUri = SdkUri.getInstance().newUri(fullUriString); } catch (URISyntaxException e) { return null; } diff --git a/core/auth/pom.xml b/core/auth/pom.xml index f564c70d0c4e..c2242f59e9e1 100644 --- a/core/auth/pom.xml +++ b/core/auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT auth diff --git a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.java b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.java index c0e46006c2a3..7b159a6a9489 100644 --- a/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.java +++ b/core/auth/src/main/java/software/amazon/awssdk/auth/credentials/DefaultCredentialsProvider.java @@ -74,10 +74,15 @@ private DefaultCredentialsProvider(Builder builder) { } /** - * Returns the singleton instance of the {@link DefaultCredentialsProvider} using the default configuration. - * Configuration can be specified by creating an instance using the {@link #builder()}. If you want to + * Returns the singleton instance of the {@link DefaultCredentialsProvider} using the default configuration. + * Configuration can be specified by creating an instance using the {@link #builder()}. If you want to * create a new instance, use {@link #builder()} instead. + * + * @deprecated The create() method that returns a singleton instance which can cause issues if one client closes the provider + * while others are still using it. Use {@code builder().build()} to create independent instances, which is the + * safer approach and recommended for most use cases. */ + @Deprecated public static DefaultCredentialsProvider create() { return DEFAULT_CREDENTIALS_PROVIDER; } diff --git a/core/aws-core/pom.xml b/core/aws-core/pom.xml index 9980350faca0..ebf1f073fcf3 100644 --- a/core/aws-core/pom.xml +++ b/core/aws-core/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT aws-core diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequestOverrideConfiguration.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequestOverrideConfiguration.java index cde970af6402..a73691ab1df1 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequestOverrideConfiguration.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/AwsRequestOverrideConfiguration.java @@ -23,6 +23,7 @@ import software.amazon.awssdk.core.RequestOverrideConfiguration; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.TokenIdentity; import software.amazon.awssdk.utils.builder.SdkBuilder; /** @@ -31,10 +32,12 @@ @SdkPublicApi public final class AwsRequestOverrideConfiguration extends RequestOverrideConfiguration { private final IdentityProvider credentialsProvider; + private final IdentityProvider tokenIdentityProvider; private AwsRequestOverrideConfiguration(BuilderImpl builder) { super(builder); this.credentialsProvider = builder.awsCredentialsProvider; + this.tokenIdentityProvider = builder.tokenIdentityProvider; } /** @@ -75,6 +78,16 @@ public Optional> credentialsI return Optional.ofNullable(credentialsProvider); } + /** + * The optional {@link IdentityProvider} that will provide a token identity to be used to + * authenticate this request. + * + * @return The optional {@link IdentityProvider}. + */ + public Optional> tokenIdentityProvider() { + return Optional.ofNullable(tokenIdentityProvider); + } + @Override public Builder toBuilder() { return new BuilderImpl(this); @@ -97,7 +110,8 @@ public boolean equals(Object o) { return false; } AwsRequestOverrideConfiguration that = (AwsRequestOverrideConfiguration) o; - return Objects.equals(credentialsProvider, that.credentialsProvider); + return Objects.equals(credentialsProvider, that.credentialsProvider) && + Objects.equals(tokenIdentityProvider, that.tokenIdentityProvider); } @Override @@ -105,6 +119,7 @@ public int hashCode() { int hashCode = 1; hashCode = 31 * hashCode + super.hashCode(); hashCode = 31 * hashCode + Objects.hashCode(credentialsProvider); + hashCode = 31 * hashCode + Objects.hashCode(tokenIdentityProvider); return hashCode; } @@ -139,6 +154,17 @@ default Builder credentialsProvider(IdentityProvider} that will provide a token identity to be used + * to authenticate this request. + * + * @param tokenIdentityProvider The {@link IdentityProvider}. + * @return This object for chaining. + */ + default Builder tokenIdentityProvider(IdentityProvider tokenIdentityProvider) { + throw new UnsupportedOperationException(); + } + @Override AwsRequestOverrideConfiguration build(); } @@ -146,6 +172,7 @@ default Builder credentialsProvider(IdentityProvider implements Builder { private IdentityProvider awsCredentialsProvider; + private IdentityProvider tokenIdentityProvider; private BuilderImpl() { } @@ -157,6 +184,7 @@ private BuilderImpl(RequestOverrideConfiguration requestOverrideConfiguration) { private BuilderImpl(AwsRequestOverrideConfiguration awsRequestOverrideConfig) { super(awsRequestOverrideConfig); this.awsCredentialsProvider = awsRequestOverrideConfig.credentialsProvider; + this.tokenIdentityProvider = awsRequestOverrideConfig.tokenIdentityProvider; } @Override @@ -170,6 +198,12 @@ public AwsCredentialsProvider credentialsProvider() { return CredentialUtils.toCredentialsProvider(awsCredentialsProvider); } + @Override + public Builder tokenIdentityProvider(IdentityProvider tokenIdentityProvider) { + this.tokenIdentityProvider = tokenIdentityProvider; + return this; + } + @Override public AwsRequestOverrideConfiguration build() { return new AwsRequestOverrideConfiguration(this); diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/auth/AuthSchemePreferenceResolver.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/auth/AuthSchemePreferenceResolver.java new file mode 100644 index 000000000000..5e7fb58f0d96 --- /dev/null +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/auth/AuthSchemePreferenceResolver.java @@ -0,0 +1,123 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.awscore.auth; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.profiles.Profile; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.profiles.ProfileProperty; +import software.amazon.awssdk.utils.CollectionUtils; +import software.amazon.awssdk.utils.StringUtils; +import software.amazon.awssdk.utils.Validate; + +/** + * A resolver for the default value of auth scheme preference. This checks environment variables, + * system properties and the profile file for the relevant configuration options when + * {@link #resolveAuthSchemePreference()} is invoked. + */ +@SdkProtectedApi +public final class AuthSchemePreferenceResolver { + private final Supplier profileFile; + private final String profileName; + + private AuthSchemePreferenceResolver(Builder builder) { + this.profileFile = Validate.getOrDefault(builder.profileFile, () -> ProfileFile::defaultProfileFile); + this.profileName = Validate.getOrDefault(builder.profileName, + ProfileFileSystemSetting.AWS_PROFILE::getStringValueOrThrow); + } + + public static Builder builder() { + return new Builder(); + } + + /** + * Resolve the auth scheme preference based on the following order of precedence: + * 1. System settings (jvm and then environment). + * 2. Profile file + * + * @return The resolved, ordered list of auth scheme preferences or an empty list if no values are found. + */ + public List resolveAuthSchemePreference() { + List systemSettingList = fromSystemSetting(); + if (!CollectionUtils.isNullOrEmpty(systemSettingList)) { + return systemSettingList; + } + + List profileFilePrefList = fromProfileFile(); + if (!CollectionUtils.isNullOrEmpty(profileFilePrefList)) { + return profileFilePrefList; + } + + return Collections.emptyList(); + } + + private List fromSystemSetting() { + Optional value = SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.getStringValue(); + if (value.isPresent()) { + return parseAuthSchemeList(value.get()); + } + return Collections.emptyList(); + } + + private List fromProfileFile() { + ProfileFile profileFile = this.profileFile.get(); + + Optional profile = profileFile.profile(profileName); + + String unformattedAuthSchemePreferenceList = + profile + .flatMap(p -> p.property(ProfileProperty.AUTH_SCHEME_PREFERENCE)) + .orElse(null); + + return unformattedAuthSchemePreferenceList != null + ? parseAuthSchemeList(unformattedAuthSchemePreferenceList) + : Collections.emptyList(); + } + + public static final class Builder { + private Supplier profileFile; + private String profileName; + + public AuthSchemePreferenceResolver.Builder profileFile(Supplier profileFile) { + this.profileFile = profileFile; + return this; + } + + public AuthSchemePreferenceResolver.Builder profileName(String profileName) { + this.profileName = profileName; + return this; + } + + public AuthSchemePreferenceResolver build() { + return new AuthSchemePreferenceResolver(this); + } + } + + private static List parseAuthSchemeList(String unformattedList) { + if (StringUtils.isEmpty(unformattedList)) { + return Collections.emptyList(); + } + + return Arrays.asList(unformattedList.replaceAll("\\s+", "").split(",")); + } +} diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/endpoint/AwsClientEndpointProvider.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/endpoint/AwsClientEndpointProvider.java index fbee6e0ec47f..0bb5119b369e 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/endpoint/AwsClientEndpointProvider.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/endpoint/AwsClientEndpointProvider.java @@ -41,6 +41,7 @@ import software.amazon.awssdk.utils.ToString; import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.internal.SystemSettingUtils; +import software.amazon.awssdk.utils.uri.SdkUri; /** * An implementation of {@link ClientEndpointProvider} that loads the default client endpoint from: @@ -238,7 +239,7 @@ private Optional clientEndpointFromServiceMetadata(Builder build .region(builder.region) .tags(endpointTags) .build()); - URI endpoint = URI.create(builder.protocol + "://" + endpointWithoutProtocol); + URI endpoint = SdkUri.getInstance().create(builder.protocol + "://" + endpointWithoutProtocol); if (endpoint.getHost() == null) { String error = "Configured region (" + builder.region + ") and tags (" + endpointTags + ") resulted in " + "an invalid URI: " + endpoint + ". This is usually caused by an invalid region " @@ -260,7 +261,7 @@ private Optional clientEndpointFromServiceMetadata(Builder build private Optional createUri(String source, Optional uri) { return uri.map(u -> { try { - URI parsedUri = new URI(uri.get()); + URI parsedUri = SdkUri.getInstance().newUri(uri.get()); log.trace(() -> "Client endpoint was loaded from the " + source + ": " + parsedUri); return parsedUri; } catch (URISyntaxException e) { diff --git a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilder.java b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilder.java index 63bbcaa6cdb0..4a71ba7681fb 100644 --- a/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilder.java +++ b/core/aws-core/src/main/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilder.java @@ -21,6 +21,8 @@ import static software.amazon.awssdk.core.interceptor.SdkExecutionAttribute.RESOLVED_CHECKSUM_SPECS; import static software.amazon.awssdk.core.internal.useragent.BusinessMetricsUtils.resolveRetryMode; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Optional; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -36,6 +38,8 @@ import software.amazon.awssdk.core.SdkRequest; import software.amazon.awssdk.core.SdkResponse; import software.amazon.awssdk.core.SelectedAuthScheme; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.client.config.SdkClientOption; @@ -49,8 +53,11 @@ import software.amazon.awssdk.core.internal.InternalCoreExecutionAttribute; import software.amazon.awssdk.core.internal.util.HttpChecksumResolver; import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.core.useragent.AdditionalMetadata; import software.amazon.awssdk.core.useragent.BusinessMetricCollection; import software.amazon.awssdk.endpoints.EndpointProvider; +import software.amazon.awssdk.http.ContentStreamProvider; import software.amazon.awssdk.http.auth.scheme.NoAuthAuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeProvider; @@ -69,7 +76,7 @@ private AwsExecutionContextBuilder() { */ public static ExecutionContext invokeInterceptorsAndCreateExecutionContext(ClientExecutionParams executionParams, - SdkClientConfiguration clientConfig) { + SdkClientConfiguration clientConfig) { // Note: This is currently copied to DefaultS3Presigner and other presigners. // Don't edit this without considering those @@ -134,13 +141,13 @@ private AwsExecutionContextBuilder() { putAuthSchemeResolutionAttributes(executionAttributes, clientConfig, originalRequest); ExecutionInterceptorChain executionInterceptorChain = - new ExecutionInterceptorChain(clientConfig.option(SdkClientOption.EXECUTION_INTERCEPTORS)); + new ExecutionInterceptorChain(clientConfig.option(SdkClientOption.EXECUTION_INTERCEPTORS)); InterceptorContext interceptorContext = InterceptorContext.builder() - .request(originalRequest) - .asyncRequestBody(executionParams.getAsyncRequestBody()) - .requestBody(executionParams.getRequestBody()) - .build(); + .request(originalRequest) + .asyncRequestBody(executionParams.getAsyncRequestBody()) + .requestBody(executionParams.getRequestBody()) + .build(); interceptorContext = runInitialInterceptors(interceptorContext, executionAttributes, executionInterceptorChain); SdkRequest modifiedRequests = interceptorContext.request(); @@ -159,6 +166,8 @@ private AwsExecutionContextBuilder() { signer, executionAttributes, executionAttributes.getOptionalAttribute( AwsSignerExecutionAttribute.AWS_CREDENTIALS).orElse(null))); + putStreamingInputOutputTypesMetadata(executionAttributes, executionParams); + return ExecutionContext.builder() .interceptorChain(executionInterceptorChain) .interceptorContext(interceptorContext) @@ -168,6 +177,57 @@ private AwsExecutionContextBuilder() { .build(); } + private static void putStreamingInputOutputTypesMetadata( + ExecutionAttributes executionAttributes, ClientExecutionParams executionParams) { + List userAgentMetadata = new ArrayList<>(); + + if (executionParams.getRequestBody() != null) { + userAgentMetadata.add( + AdditionalMetadata + .builder() + .name("rb") + .value(ContentStreamProvider.ProviderType.shortValueFromName( + executionParams.getRequestBody().contentStreamProvider().name()) + ) + .build()); + } + + if (executionParams.getAsyncRequestBody() != null) { + userAgentMetadata.add( + AdditionalMetadata + .builder() + .name("rb") + .value(AsyncRequestBody.BodyType.shortValueFromName( + executionParams.getAsyncRequestBody().body()) + ) + .build()); + } + + if (executionParams.getResponseTransformer() != null) { + userAgentMetadata.add( + AdditionalMetadata + .builder() + .name("rt") + .value(ResponseTransformer.TransformerType.shortValueFromName( + executionParams.getResponseTransformer().name()) + ) + .build()); + } + + if (executionParams.getAsyncResponseTransformer() != null) { + userAgentMetadata.add( + AdditionalMetadata + .builder() + .name("rt") + .value(AsyncResponseTransformer.TransformerType.shortValueFromName( + executionParams.getAsyncResponseTransformer().name()) + ) + .build()); + } + + executionAttributes.putAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA, userAgentMetadata); + } + /** * We will load the old (non-SRA) signer if this client seems like an old version or the customer has provided a signer * override. We assume that if there's no auth schemes defined, we're on the old code path. @@ -217,9 +277,6 @@ private static void putAuthSchemeResolutionAttributes(ExecutionAttributes execut .putAttribute(SdkInternalExecutionAttribute.IDENTITY_PROVIDERS, identityProviders); } - // TODO(sra-identity-and-auth): This is hard coding the logic for the credentialsIdentityProvider from - // AwsRequestOverrideConfiguration. Currently, AwsRequestOverrideConfiguration does not support overriding the - // tokenIdentityProvider. When adding that support this method will need to be updated. private static IdentityProviders resolveIdentityProviders(SdkRequest originalRequest, SdkClientConfiguration clientConfig) { IdentityProviders identityProviders = @@ -232,13 +289,17 @@ private static IdentityProviders resolveIdentityProviders(SdkRequest originalReq return null; } - return originalRequest.overrideConfiguration() - .filter(c -> c instanceof AwsRequestOverrideConfiguration) - .map(c -> (AwsRequestOverrideConfiguration) c) - .flatMap(AwsRequestOverrideConfiguration::credentialsIdentityProvider) - .map(identityProvider -> - identityProviders.copy(b -> b.putIdentityProvider(identityProvider))) - .orElse(identityProviders); + return originalRequest + .overrideConfiguration() + .filter(c -> c instanceof AwsRequestOverrideConfiguration) + .map(c -> (AwsRequestOverrideConfiguration) c) + .map(c -> { + return identityProviders.copy(b -> { + c.credentialsIdentityProvider().ifPresent(b::putIdentityProvider); + c.tokenIdentityProvider().ifPresent(b::putIdentityProvider); + }); + }) + .orElse(identityProviders); } /** @@ -277,12 +338,13 @@ private static MetricCollector resolveMetricCollector(ClientExecutionParams tokenIdentityProvider = StaticTokenProvider.create(() -> "test-token"); + + AwsRequestOverrideConfiguration configuration1 = AwsRequestOverrideConfiguration + .builder().tokenIdentityProvider(tokenIdentityProvider).build(); + + assertThat(configuration1.tokenIdentityProvider().get().resolveIdentity().join().token()) + .isEqualTo(tokenIdentityProvider.resolveIdentity().join().token()); + } + private void assertCredentialsEqual(AwsCredentialsProvider credentialsProvider, IdentityProvider identityProvider) { AwsCredentials creds1 = credentialsProvider.resolveCredentials(); diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilderTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilderTest.java index 4f2bea548c95..e6ab211de5da 100644 --- a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilderTest.java +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/AwsExecutionContextBuilderTest.java @@ -22,11 +22,13 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.io.File; +import java.io.IOException; import java.util.Arrays; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; import org.junit.Before; @@ -36,6 +38,7 @@ import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.auth.token.credentials.StaticTokenProvider; import software.amazon.awssdk.awscore.AwsRequest; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.awscore.client.config.AwsClientOption; @@ -43,6 +46,8 @@ import software.amazon.awssdk.core.SdkRequest; import software.amazon.awssdk.core.SdkResponse; import software.amazon.awssdk.core.SelectedAuthScheme; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.checksums.ChecksumSpecs; import software.amazon.awssdk.core.client.config.SdkAdvancedClientOption; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; @@ -58,17 +63,19 @@ import software.amazon.awssdk.core.internal.util.HttpChecksumUtils; import software.amazon.awssdk.core.signer.NoOpSigner; import software.amazon.awssdk.core.signer.Signer; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.core.useragent.AdditionalMetadata; import software.amazon.awssdk.http.auth.aws.scheme.AwsV4AuthScheme; import software.amazon.awssdk.http.auth.scheme.NoAuthAuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; -import software.amazon.awssdk.http.auth.spi.signer.SignerProperty; import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; import software.amazon.awssdk.identity.spi.IdentityProvider; import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.identity.spi.TokenIdentity; import software.amazon.awssdk.profiles.ProfileFile; -import software.amazon.awssdk.regions.RegionScope; @RunWith(MockitoJUnitRunner.class) public class AwsExecutionContextBuilderTest { @@ -396,16 +403,24 @@ public void invokeInterceptorsAndCreateExecutionContext_withoutIdentityProviders public void invokeInterceptorsAndCreateExecutionContext_requestOverrideForIdentityProvider_updatesIdentityProviders() { IdentityProvider clientCredentialsProvider = StaticCredentialsProvider.create(AwsBasicCredentials.create("foo", "bar")); + IdentityProvider clientTokenProvider = StaticTokenProvider.create(() -> "client-token"); IdentityProviders identityProviders = - IdentityProviders.builder().putIdentityProvider(clientCredentialsProvider).build(); + IdentityProviders.builder() + .putIdentityProvider(clientCredentialsProvider) + .putIdentityProvider(clientTokenProvider) + .build(); SdkClientConfiguration clientConfig = testClientConfiguration() .option(SdkClientOption.IDENTITY_PROVIDERS, identityProviders) .build(); IdentityProvider requestCredentialsProvider = StaticCredentialsProvider.create(AwsBasicCredentials.create("akid", "skid")); + IdentityProvider requestTokenProvider = StaticTokenProvider.create(() -> "request-token"); Optional overrideConfiguration = - Optional.of(AwsRequestOverrideConfiguration.builder().credentialsProvider(requestCredentialsProvider).build()); + Optional.of(AwsRequestOverrideConfiguration.builder() + .credentialsProvider(requestCredentialsProvider) + .tokenIdentityProvider(requestTokenProvider) + .build()); when(sdkRequest.overrideConfiguration()).thenReturn(overrideConfiguration); ClientExecutionParams executionParams = clientExecutionParams(); @@ -420,6 +435,79 @@ public void invokeInterceptorsAndCreateExecutionContext_requestOverrideForIdenti actualIdentityProviders.identityProvider(AwsCredentialsIdentity.class); assertThat(actualIdentityProvider).isSameAs(requestCredentialsProvider); + + IdentityProvider actualTokenProvider = + actualIdentityProviders.identityProvider(TokenIdentity.class); + + assertThat(actualTokenProvider).isSameAs(requestTokenProvider); + } + + @Test + public void invokeInterceptorsAndCreateExecutionContext_withRequestBody_addsUserAgentMetadata() throws IOException { + ClientExecutionParams executionParams = clientExecutionParams(); + File testFile = File.createTempFile("testFile", UUID.randomUUID().toString()); + testFile.deleteOnExit(); + executionParams.withRequestBody(RequestBody.fromFile(testFile)); + + ExecutionContext executionContext = + AwsExecutionContextBuilder.invokeInterceptorsAndCreateExecutionContext(executionParams, + testClientConfiguration().build()); + + ExecutionAttributes executionAttributes = executionContext.executionAttributes(); + assertThat(executionAttributes.getAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA)).isEqualTo( + Collections.singletonList(AdditionalMetadata.builder().name("rb").value("f").build()) + ); + } + + @Test + public void invokeInterceptorsAndCreateExecutionContext_withResponseTransformer_addsUserAgentMetadata() throws IOException { + ClientExecutionParams executionParams = clientExecutionParams(); + File testFile = File.createTempFile("testFile", UUID.randomUUID().toString()); + testFile.deleteOnExit(); + executionParams.withResponseTransformer(ResponseTransformer.toFile(testFile)); + + ExecutionContext executionContext = + AwsExecutionContextBuilder.invokeInterceptorsAndCreateExecutionContext(executionParams, + testClientConfiguration().build()); + + ExecutionAttributes executionAttributes = executionContext.executionAttributes(); + assertThat(executionAttributes.getAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA)).isEqualTo( + Collections.singletonList(AdditionalMetadata.builder().name("rt").value("f").build()) + ); + } + + @Test + public void invokeInterceptorsAndCreateExecutionContext_withAsyncRequestBody_addsUserAgentMetadata() throws IOException { + ClientExecutionParams executionParams = clientExecutionParams(); + File testFile = File.createTempFile("testFile", UUID.randomUUID().toString()); + testFile.deleteOnExit(); + executionParams.withAsyncRequestBody(AsyncRequestBody.fromFile(testFile)); + + ExecutionContext executionContext = + AwsExecutionContextBuilder.invokeInterceptorsAndCreateExecutionContext(executionParams, + testClientConfiguration().build()); + + ExecutionAttributes executionAttributes = executionContext.executionAttributes(); + assertThat(executionAttributes.getAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA)).isEqualTo( + Collections.singletonList(AdditionalMetadata.builder().name("rb").value("f").build()) + ); + } + + @Test + public void invokeInterceptorsAndCreateExecutionContext_withAsyncResponseTransformer_addsUserAgentMetadata() throws IOException { + ClientExecutionParams executionParams = clientExecutionParams(); + File testFile = File.createTempFile("testFile", UUID.randomUUID().toString()); + testFile.deleteOnExit(); + executionParams.withAsyncResponseTransformer(AsyncResponseTransformer.toFile(testFile)); + + ExecutionContext executionContext = + AwsExecutionContextBuilder.invokeInterceptorsAndCreateExecutionContext(executionParams, + testClientConfiguration().build()); + + ExecutionAttributes executionAttributes = executionContext.executionAttributes(); + assertThat(executionAttributes.getAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA)).isEqualTo( + Collections.singletonList(AdditionalMetadata.builder().name("rt").value("f").build()) + ); } private ClientExecutionParams clientExecutionParams() { diff --git a/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/auth/AuthSchemePreferenceResolverTest.java b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/auth/AuthSchemePreferenceResolverTest.java new file mode 100644 index 000000000000..1f8526009cf8 --- /dev/null +++ b/core/aws-core/src/test/java/software/amazon/awssdk/awscore/internal/auth/AuthSchemePreferenceResolverTest.java @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.awscore.internal.auth; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Stream; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.awscore.auth.AuthSchemePreferenceResolver; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileProperty; +import software.amazon.awssdk.utils.StringInputStream; + +class AuthSchemePreferenceResolverTest { + + @AfterEach + void tearDown() { + System.clearProperty(SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.property()); + } + + @ParameterizedTest(name = "{0}") + @MethodSource("profileTestCases") + void profileParsingTests(String testName, String profileContent, String profileName, List expected) { + ProfileFile profileFile = ProfileFile.builder() + .type(ProfileFile.Type.CONFIGURATION) + .content(new StringInputStream(profileContent)) + .build(); + + AuthSchemePreferenceResolver.Builder resolverBuilder = AuthSchemePreferenceResolver.builder() + .profileFile(() -> profileFile); + if (profileName != null) { + resolverBuilder.profileName(profileName); + } + + assertThat(resolverBuilder.build().resolveAuthSchemePreference()).isEqualTo(expected); + } + + static Stream profileTestCases() { + return Stream.of( + Arguments.of( + "Default profile parsing", + "[default]\n" + ProfileProperty.AUTH_SCHEME_PREFERENCE + "=sigv4,bearer", + null, + Arrays.asList("sigv4", "bearer") + ), + Arguments.of( + "Custom profile parsing", + "[profile custom]\n" + ProfileProperty.AUTH_SCHEME_PREFERENCE + "=sigv4,bearer", + "custom", + Arrays.asList("sigv4", "bearer") + ), + Arguments.of( + "Profile with whitespace", + "[default]\n" + ProfileProperty.AUTH_SCHEME_PREFERENCE + "=sigv4, \tbearer \t", + null, + Arrays.asList("sigv4", "bearer") + ) + ); + } + + @ParameterizedTest(name = "{0}") + @MethodSource("systemSettingTestCases") + void systemSettingParsingTests(String testName, String systemSetting, List expected) { + if (systemSetting != null) { + System.setProperty(SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.property(), systemSetting); + } + + AuthSchemePreferenceResolver resolver = AuthSchemePreferenceResolver.builder().build(); + assertThat(resolver.resolveAuthSchemePreference()).isEqualTo(expected); + } + + static Stream systemSettingTestCases() { + return Stream.of( + Arguments.of("Basic system setting", "sigv4,bearer", Arrays.asList("sigv4", "bearer")), + Arguments.of("Empty system setting", "", Collections.emptyList()), + Arguments.of("No system setting", null, Collections.emptyList()), + + // Whitespace/formatting cases (from schemeParsingCases) + Arguments.of("Whitespace with tabs", "scheme1, scheme2 , \tscheme3 \t", + Arrays.asList("scheme1", "scheme2", "scheme3")), + Arguments.of("Whitespace with joined schemes", "scheme1, scheme2 \t scheme3 scheme4", + Arrays.asList("scheme1", "scheme2scheme3scheme4")), + Arguments.of("Whitespace in scheme names", "sigv4, sig v 4 a, bearer", + Arrays.asList("sigv4", "sigv4a", "bearer")) + ); + } +} \ No newline at end of file diff --git a/core/checksums-spi/pom.xml b/core/checksums-spi/pom.xml index bfa031498318..eace625d6d2c 100644 --- a/core/checksums-spi/pom.xml +++ b/core/checksums-spi/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT checksums-spi diff --git a/core/checksums/pom.xml b/core/checksums/pom.xml index fed6d145488b..70039fa12bef 100644 --- a/core/checksums/pom.xml +++ b/core/checksums/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT checksums diff --git a/core/checksums/src/main/java/software/amazon/awssdk/checksums/internal/ConstructorCache.java b/core/checksums/src/main/java/software/amazon/awssdk/checksums/internal/ConstructorCache.java index a710ed0a0ef4..af539cde94ae 100644 --- a/core/checksums/src/main/java/software/amazon/awssdk/checksums/internal/ConstructorCache.java +++ b/core/checksums/src/main/java/software/amazon/awssdk/checksums/internal/ConstructorCache.java @@ -63,7 +63,17 @@ private Optional> getClass(String className) { return Optional.empty(); } }); - return classRef.map(WeakReference::get); + + // if the WeakReference to the class has been garbage collected, remove it from the cache and try again + if (classRef.isPresent()) { + Class clazz = classRef.get().get(); + if (clazz != null) { + return Optional.of(clazz); + } + classesByClassLoader.remove(classLoader); + return getClass(className); + } + return Optional.empty(); } /** diff --git a/core/crt-core/pom.xml b/core/crt-core/pom.xml index e6c368e62dbe..ca963dc141d3 100644 --- a/core/crt-core/pom.xml +++ b/core/crt-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT crt-core diff --git a/core/endpoints-spi/pom.xml b/core/endpoints-spi/pom.xml index 326345a58921..4f13ee5ef369 100644 --- a/core/endpoints-spi/pom.xml +++ b/core/endpoints-spi/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/http-auth-aws-crt/pom.xml b/core/http-auth-aws-crt/pom.xml index 59312a96f12a..fe69f58f0866 100644 --- a/core/http-auth-aws-crt/pom.xml +++ b/core/http-auth-aws-crt/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT http-auth-aws-crt diff --git a/core/http-auth-aws-eventstream/pom.xml b/core/http-auth-aws-eventstream/pom.xml index 2f1545c43889..ef39bbc4b083 100644 --- a/core/http-auth-aws-eventstream/pom.xml +++ b/core/http-auth-aws-eventstream/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT http-auth-aws-eventstream diff --git a/core/http-auth-aws/pom.xml b/core/http-auth-aws/pom.xml index a380082e6577..9ff1f9f3361a 100644 --- a/core/http-auth-aws/pom.xml +++ b/core/http-auth-aws/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT http-auth-aws diff --git a/core/http-auth-aws/src/main/java/software/amazon/awssdk/http/auth/aws/crt/internal/util/CrtHttpRequestConverter.java b/core/http-auth-aws/src/main/java/software/amazon/awssdk/http/auth/aws/crt/internal/util/CrtHttpRequestConverter.java index 0485cf128887..fd0bb010fd8f 100644 --- a/core/http-auth-aws/src/main/java/software/amazon/awssdk/http/auth/aws/crt/internal/util/CrtHttpRequestConverter.java +++ b/core/http-auth-aws/src/main/java/software/amazon/awssdk/http/auth/aws/crt/internal/util/CrtHttpRequestConverter.java @@ -31,6 +31,7 @@ import software.amazon.awssdk.http.auth.aws.crt.internal.io.CrtInputStream; import software.amazon.awssdk.utils.StringUtils; import software.amazon.awssdk.utils.http.SdkHttpUtils; +import software.amazon.awssdk.utils.uri.SdkUri; @SdkInternalApi public final class CrtHttpRequestConverter { @@ -73,7 +74,7 @@ public static SdkHttpRequest toRequest(SdkHttpRequest request, HttpRequest crtRe String portString = SdkHttpUtils.isUsingStandardPort(builder.protocol(), builder.port()) ? "" : ":" + builder.port(); String encodedPath = encodedPathFromCrtFormat(request.encodedPath(), crtRequest.getEncodedPath()); String fullUriString = builder.protocol() + "://" + builder.host() + portString + encodedPath; - fullUri = new URI(fullUriString); + fullUri = SdkUri.getInstance().newUri(fullUriString); } catch (URISyntaxException e) { throw new RuntimeException("Full URI could not be formed.", e); } diff --git a/core/http-auth-spi/pom.xml b/core/http-auth-spi/pom.xml index 33a5729692af..c06eb454884b 100644 --- a/core/http-auth-spi/pom.xml +++ b/core/http-auth-spi/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT http-auth-spi diff --git a/core/http-auth/pom.xml b/core/http-auth/pom.xml index 374fafabb3ab..49e750e4a6f0 100644 --- a/core/http-auth/pom.xml +++ b/core/http-auth/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT http-auth diff --git a/core/identity-spi/pom.xml b/core/identity-spi/pom.xml index da05e74796da..e7878b61f01f 100644 --- a/core/identity-spi/pom.xml +++ b/core/identity-spi/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT identity-spi diff --git a/core/imds/pom.xml b/core/imds/pom.xml index e143ac597126..a4e939a1eb63 100644 --- a/core/imds/pom.xml +++ b/core/imds/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 imds diff --git a/core/json-utils/pom.xml b/core/json-utils/pom.xml index 3bed94d41173..b14a3c2614b1 100644 --- a/core/json-utils/pom.xml +++ b/core/json-utils/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/metrics-spi/pom.xml b/core/metrics-spi/pom.xml index 5660cbf23c98..aac0fe42074b 100644 --- a/core/metrics-spi/pom.xml +++ b/core/metrics-spi/pom.xml @@ -5,7 +5,7 @@ core software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/pom.xml b/core/pom.xml index ddd5565a98bb..9008ee4b6e9e 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT core diff --git a/core/profiles/pom.xml b/core/profiles/pom.xml index 13b352ebc4e3..8cbaa2100f01 100644 --- a/core/profiles/pom.xml +++ b/core/profiles/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT profiles diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFile.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFile.java index 89ffa2b64ad2..5199e729da8f 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFile.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFile.java @@ -85,6 +85,13 @@ public static Aggregator aggregator() { return new Aggregator(); } + /** + * Create an empty profile file. + */ + static ProfileFile empty() { + return new ProfileFile(Collections.emptyMap()); + } + /** * Get the default profile file, using the credentials file from "~/.aws/credentials", the config file from "~/.aws/config" * and the "default" profile. This default behavior can be customized using the @@ -310,8 +317,10 @@ public void setType(Type type) { @Override public ProfileFile build() { + Validate.isTrue(content != null || contentLocation != null, + "content or contentLocation must be set."); InputStream stream = content != null ? content : - FunctionalUtils.invokeSafely(() -> Files.newInputStream(contentLocation)); + FunctionalUtils.invokeSafely(() -> Files.newInputStream(contentLocation)); Validate.paramNotNull(type, "type"); Validate.paramNotNull(stream, "content"); diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFileSupplier.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFileSupplier.java index 4dec2883c814..7046c300d339 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFileSupplier.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileFileSupplier.java @@ -55,13 +55,15 @@ static ProfileFileSupplier defaultSupplier() { = ProfileFileLocation.configurationFileLocation() .map(path -> reloadWhenModified(path, ProfileFile.Type.CONFIGURATION)); - ProfileFileSupplier supplier = () -> ProfileFile.builder().build(); + ProfileFileSupplier supplier; if (credentialsSupplierOptional.isPresent() && configurationSupplierOptional.isPresent()) { supplier = aggregate(credentialsSupplierOptional.get(), configurationSupplierOptional.get()); } else if (credentialsSupplierOptional.isPresent()) { supplier = credentialsSupplierOptional.get(); } else if (configurationSupplierOptional.isPresent()) { supplier = configurationSupplierOptional.get(); + } else { + supplier = fixedProfileFile(ProfileFile.empty()); } return supplier; diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java index 434e27b3b6f2..cd97c6047a55 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/ProfileProperty.java @@ -151,6 +151,8 @@ public final class ProfileProperty { public static final String USE_DUALSTACK_ENDPOINT = "use_dualstack_endpoint"; + public static final String AUTH_SCHEME_PREFERENCE = "auth_scheme_preference"; + public static final String USE_FIPS_ENDPOINT = "use_fips_endpoint"; public static final String EC2_METADATA_SERVICE_ENDPOINT_MODE = "ec2_metadata_service_endpoint_mode"; diff --git a/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresher.java b/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresher.java index 799aa5880882..60a91a25527f 100644 --- a/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresher.java +++ b/core/profiles/src/main/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresher.java @@ -39,6 +39,7 @@ public final class ProfileFileRefresher { private static final ProfileFileRefreshRecord EMPTY_REFRESH_RECORD = ProfileFileRefreshRecord.builder() .refreshTime(Instant.MIN) .build(); + private static final long STALE_TIME_MS = 1000; private final CachedSupplier profileFileCache; private volatile ProfileFileRefreshRecord currentRefreshRecord; private final Supplier profileFile; @@ -96,7 +97,7 @@ private RefreshResult reloadAsRefreshResultIfStale() { refreshRecord = currentRefreshRecord; } - return wrapIntoRefreshResult(refreshRecord, now); + return wrapIntoRefreshResult(refreshRecord, now.plusMillis(STALE_TIME_MS)); } private RefreshResult wrapIntoRefreshResult(T value, Instant staleTime) { diff --git a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileSupplierTest.java b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileSupplierTest.java index bc39916da5d9..e5ca165840ae 100644 --- a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileSupplierTest.java +++ b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileSupplierTest.java @@ -48,6 +48,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.condition.EnabledForJreRange; import org.junit.jupiter.api.condition.JRE; +import software.amazon.awssdk.testutils.EnvironmentVariableHelper; import software.amazon.awssdk.utils.Pair; import software.amazon.awssdk.utils.StringInputStream; import software.amazon.awssdk.testutils.LogCaptor; @@ -581,6 +582,15 @@ public void checkPermission(Permission perm) { } } + @Test + public void defaultSupplier_noCredentialsFiles_returnsEmptyProvider() { + EnvironmentVariableHelper.run(environmentVariableHelper -> { + environmentVariableHelper.set(ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE, "no-such-file"); + environmentVariableHelper.set(ProfileFileSystemSetting.AWS_CONFIG_FILE, "no-such-file"); + ProfileFileSupplier supplier = ProfileFileSupplier.defaultSupplier(); + assertThat(supplier.get().profiles()).isEmpty(); + }); + } private Path writeTestFile(String contents, Path path) { try { diff --git a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java index e827e07dbd8b..9d2310c69d2b 100644 --- a/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java +++ b/core/profiles/src/test/java/software/amazon/awssdk/profiles/ProfileFileTest.java @@ -570,6 +570,14 @@ public void returnsEmptyMap_when_AwsFilesDoNotExist() { assertThat(missingProfile.profiles()).isInstanceOf(Map.class); } + @Test + public void builderValidatesContentRequired() { + assertThatThrownBy(() -> ProfileFile.builder().type(ProfileFile.Type.CONFIGURATION).build()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("content or contentLocation must be set."); + + } + private ProfileFile configFile(String configFile) { return ProfileFile.builder() .content(configFile) diff --git a/core/profiles/src/test/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresherTest.java b/core/profiles/src/test/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresherTest.java index 69e86f937484..97bf3e9aa707 100644 --- a/core/profiles/src/test/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresherTest.java +++ b/core/profiles/src/test/java/software/amazon/awssdk/profiles/internal/ProfileFileRefresherTest.java @@ -29,11 +29,13 @@ import java.time.ZoneOffset; import java.time.temporal.TemporalAmount; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.logging.log4j.Level; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.testutils.LogCaptor; public class ProfileFileRefresherTest { @@ -63,43 +65,47 @@ void refreshIfStale_profileModifiedNoPathSpecified_doesNotReloadProfileFile() { ProfileFileRefresher refresher = refresherWithClock(clock) .profileFile(() -> profileFile(credentialsFilePath)) .build(); - Duration intervalWithinJitter = Duration.ofMillis(100); + Duration intervalWithinStale = Duration.ofMillis(100); ProfileFile file1 = refresher.refreshIfStale(); generateTestCredentialsFile("modifiedAccessKey", "modifiedSecretAccessKey"); updateModificationTime(credentialsFilePath, clock.instant().plusMillis(1)); - clock.tickForward(intervalWithinJitter); + clock.tickForward(intervalWithinStale); ProfileFile file2 = refresher.refreshIfStale(); Assertions.assertThat(file2).isSameAs(file1); } @Test - void refreshIfStale_profileModifiedWithinJitterPeriod_doesNotReloadProfileFile() { - Path credentialsFilePath = generateTestCredentialsFile("defaultAccessKey", "defaultSecretAccessKey"); + void refreshIfStale_profileModifiedWithinStalePeriod_doesNotReloadProfileFile() { + try (LogCaptor logCaptor = LogCaptor.create(Level.WARN)) { + Path credentialsFilePath = generateTestCredentialsFile("defaultAccessKey", "defaultSecretAccessKey"); - AdjustableClock clock = new AdjustableClock(); - ProfileFileRefresher refresher = refresherWithClock(clock) - .profileFile(() -> profileFile(credentialsFilePath)) - .profileFilePath(credentialsFilePath) - .build(); - Duration intervalWithinJitter = Duration.ofMillis(100); + AdjustableClock clock = new AdjustableClock(); + ProfileFileRefresher refresher = refresherWithClock(clock) + .profileFile(() -> profileFile(credentialsFilePath)) + .profileFilePath(credentialsFilePath) + .build(); + Duration intervalWithinStale = Duration.ofMillis(100); - ProfileFile file1 = refresher.refreshIfStale(); + ProfileFile file1 = refresher.refreshIfStale(); - clock.tickForward(intervalWithinJitter); - generateTestCredentialsFile("modifiedAccessKey", "modifiedSecretAccessKey"); - updateModificationTime(credentialsFilePath, clock.instant()); + clock.tickForward(intervalWithinStale); + generateTestCredentialsFile("modifiedAccessKey", "modifiedSecretAccessKey"); + updateModificationTime(credentialsFilePath, clock.instant()); - ProfileFile file2 = refresher.refreshIfStale(); + ProfileFile file2 = refresher.refreshIfStale(); - Assertions.assertThat(file2).isSameAs(file1); + Assertions.assertThat(file2).isSameAs(file1); + + Assertions.assertThat(logCaptor.loggedEvents()).isEmpty(); + } } @Test - void refreshIfStale_profileModifiedOutsideJitterPeriod_reloadsProfileFile() { + void refreshIfStale_profileModifiedOutsideStalePeriod_reloadsProfileFile() { Path credentialsFilePath = generateTestCredentialsFile("defaultAccessKey", "defaultSecretAccessKey"); AdjustableClock clock = new AdjustableClock(); diff --git a/core/protocols/aws-cbor-protocol/pom.xml b/core/protocols/aws-cbor-protocol/pom.xml index 8b0178201d60..f235a4b3ee47 100644 --- a/core/protocols/aws-cbor-protocol/pom.xml +++ b/core/protocols/aws-cbor-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/pom.xml b/core/protocols/aws-json-protocol/pom.xml index 6f7362fb070f..00228d3ddd8a 100644 --- a/core/protocols/aws-json-protocol/pom.xml +++ b/core/protocols/aws-json-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonProtocolUnmarshaller.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonProtocolUnmarshaller.java index 526d205ca221..5f2f129e8571 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonProtocolUnmarshaller.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonProtocolUnmarshaller.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.core.SdkBytes; @@ -266,6 +267,11 @@ private T unmarshallFromJson(SdkPojo sdkPojo, InputStream in return (T) unmarshallingParser.parse(sdkPojo, inputStream); } + @SuppressWarnings("unchecked") + private T unmarshallMemberFromJson(Supplier constructor, InputStream inputStream) { + return (T) unmarshallingParser.parseMember(constructor, inputStream); + } + private TypeT unmarshallResponse(SdkPojo sdkPojo, SdkHttpFullResponse response) throws IOException { JsonUnmarshallerContext context = JsonUnmarshallerContext.builder() @@ -290,7 +296,7 @@ private TypeT unmarshallResponse(SdkPojo sdkPojo, } else if (isExplicitPayloadMember(field) && field.marshallingType() == MarshallingType.SDK_POJO) { Optional responseContent = context.response().content(); if (responseContent.isPresent()) { - field.set(sdkPojo, unmarshallFromJson(field.constructor().get(), responseContent.get())); + field.set(sdkPojo, unmarshallMemberFromJson(field.constructor(), responseContent.get())); } else { field.set(sdkPojo, null); } diff --git a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshallingParser.java b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshallingParser.java index 30737ae78d73..77e6231dd2a5 100644 --- a/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshallingParser.java +++ b/core/protocols/aws-json-protocol/src/main/java/software/amazon/awssdk/protocols/json/internal/unmarshall/JsonUnmarshallingParser.java @@ -24,6 +24,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.function.Supplier; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.core.SdkBytes; @@ -72,6 +73,33 @@ public static Builder builder() { return new Builder(); } + /** + * Parse the provided {@link InputStream} and return the deserialized {@link SdkPojo}. Unlike + * {@link #parse(SdkPojo, InputStream)} this method returns null if the input stream is empty. This is used to unmarshall + * payload members that can be null unlike top-level response pojos. + */ + public SdkPojo parseMember(Supplier constructor, InputStream content) { + return invokeSafely(() -> { + try (JsonParser parser = jsonFactory.createParser(content) + .configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false)) { + + JsonUnmarshallerContext c = JsonUnmarshallerContext.builder().build(); + JsonToken token = parser.nextToken(); + if (token == null) { + return null; + } + if (token == JsonToken.VALUE_NULL) { + return null; + } + if (token != JsonToken.START_OBJECT) { + throw new JsonParseException("expecting start object, got instead: " + token); + } + SdkPojo pojo = constructor.get(); + return parseSdkPojo(c, pojo, parser); + } + }); + } + /** * Parse the provided {@link InputStream} and return the deserialized {@link SdkPojo}. */ diff --git a/core/protocols/aws-query-protocol/pom.xml b/core/protocols/aws-query-protocol/pom.xml index 140492037407..9c7698ae662e 100644 --- a/core/protocols/aws-query-protocol/pom.xml +++ b/core/protocols/aws-query-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/protocols/aws-xml-protocol/pom.xml b/core/protocols/aws-xml-protocol/pom.xml index cc7c9098436f..f6bfe51b44e5 100644 --- a/core/protocols/aws-xml-protocol/pom.xml +++ b/core/protocols/aws-xml-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/protocols/pom.xml b/core/protocols/pom.xml index 52b726bb1c06..d57f6804f8bc 100644 --- a/core/protocols/pom.xml +++ b/core/protocols/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/protocols/protocol-core/pom.xml b/core/protocols/protocol-core/pom.xml index a3f0a855470a..77a46c8687e9 100644 --- a/core/protocols/protocol-core/pom.xml +++ b/core/protocols/protocol-core/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/protocols/smithy-rpcv2-protocol/pom.xml b/core/protocols/smithy-rpcv2-protocol/pom.xml index e9387281477e..c1c7ba703f91 100644 --- a/core/protocols/smithy-rpcv2-protocol/pom.xml +++ b/core/protocols/smithy-rpcv2-protocol/pom.xml @@ -20,7 +20,7 @@ protocols software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/regions/pom.xml b/core/regions/pom.xml index fc54fe6a340b..56609cb90d7d 100644 --- a/core/regions/pom.xml +++ b/core/regions/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT regions diff --git a/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/ServiceMetadataUtils.java b/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/ServiceMetadataUtils.java index 87e1d9f89f37..19810aedfd43 100644 --- a/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/ServiceMetadataUtils.java +++ b/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/ServiceMetadataUtils.java @@ -25,6 +25,7 @@ import software.amazon.awssdk.utils.Pair; import software.amazon.awssdk.utils.StringUtils; import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.uri.SdkUri; @SdkInternalApi public class ServiceMetadataUtils { @@ -38,7 +39,8 @@ public static URI endpointFor(String hostname, String endpointPrefix, String region, String dnsSuffix) { - return URI.create(StringUtils.replaceEach(hostname, SEARCH_LIST, new String[] { endpointPrefix, region, dnsSuffix })); + return SdkUri.getInstance().create( + StringUtils.replaceEach(hostname, SEARCH_LIST, new String[] {endpointPrefix, region, dnsSuffix })); } public static Region signingRegion(ServiceEndpointKey key, diff --git a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json index 7a0d5854a3f4..cff345581447 100644 --- a/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json +++ b/core/regions/src/main/resources/software/amazon/awssdk/regions/internal/region/endpoints.json @@ -29,6 +29,9 @@ "ap-east-1" : { "description" : "Asia Pacific (Hong Kong)" }, + "ap-east-2" : { + "description" : "Asia Pacific (Taipei)" + }, "ap-northeast-1" : { "description" : "Asia Pacific (Tokyo)" }, @@ -135,6 +138,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "access-analyzer.ap-northeast-1.api.aws", @@ -411,6 +415,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -522,6 +527,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -649,6 +655,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -744,7 +751,9 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -989,6 +998,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "credentialScope" : { "region" : "ap-northeast-1" @@ -1921,6 +1931,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -2043,6 +2054,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -2079,6 +2091,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -2186,6 +2199,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -2867,12 +2881,15 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "appsync.ca-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, + "ca-west-1" : { }, "eu-central-1" : { "variants" : [ { "hostname" : "appsync.eu-central-1.api.aws", @@ -2991,6 +3008,8 @@ "protocols" : [ "https" ] }, "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, "ap-northeast-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -3016,17 +3035,21 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "eu-central-1" : { "variants" : [ { "tags" : [ "dualstack" ] } ] }, + "eu-central-2" : { }, "eu-north-1" : { "variants" : [ { "tags" : [ "dualstack" ] } ] }, + "eu-south-1" : { }, "eu-west-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -3042,6 +3065,7 @@ "tags" : [ "dualstack" ] } ] }, + "me-central-1" : { }, "sa-east-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -3089,6 +3113,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -3135,6 +3160,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "athena.ap-northeast-1.api.aws", @@ -3456,6 +3482,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -3593,6 +3620,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -3660,6 +3688,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -4519,6 +4548,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "cloudcontrolapi.ap-northeast-1.api.aws", @@ -4796,6 +4826,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -4991,6 +5022,7 @@ "tags" : [ "dualstack" ] } ] }, + "eu-south-2" : { }, "eu-west-1" : { "variants" : [ { "hostname" : "cloudhsmv2.eu-west-1.api.aws", @@ -5077,6 +5109,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -5390,6 +5423,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -6473,6 +6507,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -6606,6 +6641,9 @@ "connect-campaigns" : { "endpoints" : { "af-south-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, @@ -7156,6 +7194,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "datasync.ap-northeast-1.api.aws", @@ -7426,6 +7465,7 @@ } ] }, "endpoints" : { + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "datazone.ap-northeast-1.api.aws" }, @@ -7639,6 +7679,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -7768,6 +7809,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "dlm.ap-northeast-1.api.aws", @@ -7972,6 +8014,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -8352,6 +8395,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -8467,6 +8511,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -8588,6 +8633,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "ec2.ap-northeast-1.api.aws", @@ -8776,6 +8822,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -8877,6 +8924,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -8973,6 +9021,7 @@ "ap-east-1" : { "hostname" : "eks-auth.ap-east-1.api.aws" }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "eks-auth.ap-northeast-1.api.aws" }, @@ -9069,6 +9118,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -9364,6 +9414,7 @@ "tags" : [ "fips" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", @@ -9777,6 +9828,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -9865,6 +9917,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -9994,6 +10047,7 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, @@ -10004,6 +10058,7 @@ } ] }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, "eu-west-1" : { }, @@ -10045,6 +10100,7 @@ "hostname" : "email-fips.us-west-2.amazonaws.com" }, "il-central-1" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -10282,6 +10338,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "aos.ap-northeast-1.api.aws", @@ -10525,6 +10582,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "events.ap-northeast-1.api.aws", @@ -10827,6 +10885,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "firehose.ap-northeast-1.api.aws", @@ -11459,6 +11518,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "fsx-fips.ca-central-1.amazonaws.com", @@ -11566,6 +11626,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "prod-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -11663,6 +11724,8 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, @@ -11694,6 +11757,7 @@ "ap-east-1" : { "hostname" : "gameliftstreams.ap-east-1.api.aws" }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "gameliftstreams.ap-northeast-1.api.aws" }, @@ -11918,6 +11982,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "glue.ap-northeast-1.api.aws", @@ -12527,6 +12592,7 @@ "endpoints" : { "ap-south-1" : { }, "ap-southeast-2" : { }, + "eu-west-1" : { }, "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -12608,6 +12674,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -12621,6 +12688,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -12777,14 +12845,20 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, + "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -12816,7 +12890,10 @@ "deprecated" : true, "hostname" : "inspector2-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, + "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -12868,6 +12945,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "internetmonitor.ap-northeast-1.api.aws", "variants" : [ { @@ -13789,6 +13867,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "kafka-fips.ca-central-1.amazonaws.com", @@ -13854,6 +13933,7 @@ "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, + "mx-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -13979,6 +14059,7 @@ "ap-east-1" : { "hostname" : "kendra-ranking.ap-east-1.api.aws" }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "kendra-ranking.ap-northeast-1.api.aws" }, @@ -14085,6 +14166,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -14328,6 +14410,7 @@ "deprecated" : true, "hostname" : "kms-fips.ap-east-1.amazonaws.com" }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "kms-fips.ap-northeast-1.amazonaws.com", @@ -14734,6 +14817,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "lakeformation.ap-northeast-1.api.aws", @@ -14982,6 +15066,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "lambda.ap-northeast-1.api.aws", @@ -15488,6 +15573,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "logs.ap-northeast-1.api.aws", @@ -16755,6 +16841,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -17044,6 +17131,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -17470,6 +17558,7 @@ "ap-east-1" : { "hostname" : "notifications.ap-east-1.api.aws" }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "notifications.ap-northeast-1.api.aws" }, @@ -17578,6 +17667,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -18079,6 +18169,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "protocols" : [ "https" ], "variants" : [ { @@ -18869,13 +18960,6 @@ } } }, - "private-networks" : { - "endpoints" : { - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-2" : { } - } - }, "profile" : { "endpoints" : { "af-south-1" : { }, @@ -18957,6 +19041,7 @@ "ap-east-1" : { "hostname" : "qbusiness.ap-east-1.api.aws" }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "qbusiness.ap-northeast-1.api.aws" }, @@ -19160,6 +19245,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -19404,6 +19490,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "rbin.ap-northeast-1.api.aws", @@ -19658,6 +19745,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -19935,6 +20023,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -20626,6 +20715,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -20721,6 +20811,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -20907,6 +20998,8 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-5" : { }, + "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "route53profiles-fips.ca-central-1.api.aws", @@ -20991,6 +21084,7 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { }, "sa-east-1" : { "variants" : [ { "hostname" : "route53profiles.sa-east-1.api.aws", @@ -21052,6 +21146,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "route53resolver.ap-northeast-1.api.aws", @@ -21521,6 +21616,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "hostname" : "s3.ap-northeast-1.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ], @@ -22418,6 +22514,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -22426,6 +22523,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -22494,6 +22592,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "tags" : [ "dualstack" ] @@ -23308,6 +23407,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "servicediscovery.ap-northeast-1.api.aws", @@ -23575,6 +23675,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -24639,6 +24740,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "sns.ap-northeast-1.api.aws", @@ -24889,6 +24991,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "sqs.ap-northeast-1.api.aws", @@ -25136,6 +25239,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -25828,6 +25932,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -25936,6 +26041,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -26050,6 +26156,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -26093,6 +26200,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -26202,6 +26310,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -26320,6 +26429,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-east-2" : { }, "ap-northeast-1" : { "variants" : [ { "hostname" : "synthetics.ap-northeast-1.api.aws", @@ -26584,6 +26694,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -28917,6 +29028,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -30431,6 +30543,12 @@ }, "isRegionalized" : true }, + "scheduler" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "schemas" : { "endpoints" : { "cn-north-1" : { }, @@ -35232,6 +35350,12 @@ } } }, + "scheduler" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "schemas" : { "endpoints" : { "us-gov-east-1" : { }, @@ -36551,6 +36675,12 @@ "us-iso-west-1" : { } } }, + "backup" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, "batch" : { "endpoints" : { "us-iso-east-1" : { } @@ -37109,6 +37239,11 @@ } } }, + "lakeformation" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, "lambda" : { "endpoints" : { "us-iso-east-1" : { }, @@ -37785,6 +37920,11 @@ "us-isob-east-1" : { } } }, + "athena" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "autoscaling" : { "defaults" : { "protocols" : [ "http", "https" ] @@ -37793,6 +37933,11 @@ "us-isob-east-1" : { } } }, + "backup" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "batch" : { "endpoints" : { "us-isob-east-1" : { } @@ -38132,6 +38277,11 @@ } } }, + "lakeformation" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "lambda" : { "endpoints" : { "us-isob-east-1" : { } @@ -38385,6 +38535,11 @@ } } }, + "securityhub" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "servicediscovery" : { "endpoints" : { "us-isob-east-1" : { } @@ -38873,6 +39028,11 @@ } } }, + "license-manager" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, "logs" : { "endpoints" : { "eu-isoe-west-1" : { } @@ -38954,6 +39114,11 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-iso-e-global" }, + "route53profiles" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, "route53resolver" : { "endpoints" : { "eu-isoe-west-1" : { } @@ -38985,6 +39150,11 @@ "eu-isoe-west-1" : { } } }, + "schemas" : { + "endpoints" : { + "eu-isoe-west-1" : { } + } + }, "secretsmanager" : { "endpoints" : { "eu-isoe-west-1" : { } @@ -39261,6 +39431,12 @@ "us-isof-south-1" : { } } }, + "cloudtrail-data" : { + "endpoints" : { + "us-isof-east-1" : { }, + "us-isof-south-1" : { } + } + }, "codebuild" : { "endpoints" : { "us-isof-east-1" : { }, diff --git a/core/regions/src/test/java/software/amazon/awssdk/regions/RegionTest.java b/core/regions/src/test/java/software/amazon/awssdk/regions/RegionTest.java index 49f057b746c2..d6d5a94deb97 100644 --- a/core/regions/src/test/java/software/amazon/awssdk/regions/RegionTest.java +++ b/core/regions/src/test/java/software/amazon/awssdk/regions/RegionTest.java @@ -66,4 +66,33 @@ public void idIsUrlEncoded() { Region region = Region.of("http://my-host.com/?"); assertThat(region.id()).isEqualTo("http%3A%2F%2Fmy-host.com%2F%3F"); } + + @Test + public void globalRegionIsRecognized() { + Region globalRegion = Region.of("aws-global"); + assertThat(globalRegion.id()).isEqualTo("aws-global"); + assertSame(Region.AWS_GLOBAL, globalRegion); + } + + @Test + public void multipleGlobalRegionsAreSupported() { + Region awsGlobal = Region.of("aws-global"); + Region s3Global = Region.of("s3-global"); + + assertThat(awsGlobal.id()).isEqualTo("aws-global"); + assertThat(s3Global.id()).isEqualTo("s3-global"); + + assertSame(Region.of("aws-global"), awsGlobal); + assertSame(Region.of("s3-global"), s3Global); + } + + @Test + public void allPartitionGlobalRegionsAreRecognized() { + assertThat(Region.of("aws-global").id()).isEqualTo("aws-global"); + assertThat(Region.of("aws-cn-global").id()).isEqualTo("aws-cn-global"); + assertThat(Region.of("aws-us-gov-global").id()).isEqualTo("aws-us-gov-global"); + assertThat(Region.of("aws-iso-global").id()).isEqualTo("aws-iso-global"); + assertThat(Region.of("aws-iso-b-global").id()).isEqualTo("aws-iso-b-global"); + assertThat(Region.of("aws-iso-f-global").id()).isEqualTo("aws-iso-f-global"); + } } diff --git a/core/retries-spi/pom.xml b/core/retries-spi/pom.xml index 1c038080f107..bf5757827e1b 100644 --- a/core/retries-spi/pom.xml +++ b/core/retries-spi/pom.xml @@ -20,7 +20,7 @@ core software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/retries/pom.xml b/core/retries/pom.xml index ca4b45253654..8cff2e0a512d 100644 --- a/core/retries/pom.xml +++ b/core/retries/pom.xml @@ -21,7 +21,7 @@ core software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/core/sdk-core/pom.xml b/core/sdk-core/pom.xml index a7229c37668a..774a048de19e 100644 --- a/core/sdk-core/pom.xml +++ b/core/sdk-core/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk core - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sdk-core AWS Java SDK :: SDK Core diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java index f55eb73cbc7f..65889c2d08fd 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/SdkSystemSetting.java @@ -256,8 +256,14 @@ public enum SdkSystemSetting implements SystemSetting { * Configure the SIGV4A signing region set. * This is a non-empty, comma-delimited list of AWS region names used during signing. */ - AWS_SIGV4A_SIGNING_REGION_SET("aws.sigv4a.signing.region.set", null) - ; + AWS_SIGV4A_SIGNING_REGION_SET("aws.sigv4a.signing.region.set", null), + + + /** + * Configure the preferred auth scheme to use. + * This is a comma-delimited list of AWS auth scheme names used during signing. + */ + AWS_AUTH_SCHEME_PREFERENCE("aws.authSchemePreference", null); private final String systemProperty; private final String defaultValue; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java index 752e0032958f..3fd8c3cc0165 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncRequestBody.java @@ -23,11 +23,13 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.util.Arrays; +import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.function.Consumer; import org.reactivestreams.Publisher; import org.reactivestreams.Subscriber; +import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.FileRequestBodyConfiguration; import software.amazon.awssdk.core.internal.async.ByteBuffersAsyncRequestBody; @@ -37,6 +39,7 @@ import software.amazon.awssdk.core.internal.util.Mimetype; import software.amazon.awssdk.utils.BinaryUtils; import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.internal.EnumUtils; /** * Interface to allow non-blocking streaming of request content. This follows the reactive streams pattern where this interface is @@ -74,6 +77,16 @@ default String contentType() { return Mimetype.MIMETYPE_OCTET_STREAM; } + /** + * Each AsyncRequestBody should return a well-formed name that can be used to identify the implementation. + * The body name should only include alphanumeric characters. + * + * @return String containing the identifying name of this AsyncRequestBody implementation. + */ + default String body() { + return BodyType.UNKNOWN.getName(); + } + /** * Creates an {@link AsyncRequestBody} the produces data from the input ByteBuffer publisher. The data is delivered when the * publisher publishes the data. @@ -96,6 +109,11 @@ public Optional contentLength() { public void subscribe(Subscriber s) { publisher.subscribe(s); } + + @Override + public String body() { + return BodyType.PUBLISHER.getName(); + } }; } @@ -403,8 +421,8 @@ static AsyncRequestBody fromInputStream(Consumer split(Consumer VALUE_MAP = + EnumUtils.uniqueIndex(BodyType.class, BodyType::getName); + + private final String name; + private final String shortValue; + + BodyType(String name, String shortValue) { + this.name = name; + this.shortValue = shortValue; + } + + public String getName() { + return name; + } + + public String getShortValue() { + return shortValue; + } + + public static String shortValueFromName(String name) { + return VALUE_MAP.getOrDefault(name, UNKNOWN).getShortValue(); + } + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncResponseTransformer.java index 6550497d52ed..d7c872d89289 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/AsyncResponseTransformer.java @@ -19,8 +19,10 @@ import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.file.Path; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.function.Consumer; +import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.FileTransformerConfiguration; import software.amazon.awssdk.core.ResponseBytes; @@ -36,6 +38,7 @@ import software.amazon.awssdk.utils.Validate; import software.amazon.awssdk.utils.builder.CopyableBuilder; import software.amazon.awssdk.utils.builder.ToCopyableBuilder; +import software.amazon.awssdk.utils.internal.EnumUtils; /** * Callback interface to handle a streaming asynchronous response. @@ -158,6 +161,16 @@ default SplitResult split(Consumer Builder resultFuture(CompletableFuture future); } } + + @SdkProtectedApi + enum TransformerType { + FILE("File", "f"), + BYTES("Bytes", "b"), + STREAM("Stream", "s"), + PUBLISHER("Publisher", "p"), + UNKNOWN("Unknown", "u"); + + private static final Map VALUE_MAP = + EnumUtils.uniqueIndex(TransformerType.class, TransformerType::getName); + + private final String name; + private final String shortValue; + + TransformerType(String name, String shortValue) { + this.name = name; + this.shortValue = shortValue; + } + + public String getName() { + return name; + } + + public String getShortValue() { + return shortValue; + } + + public static String shortValueFromName(String name) { + return VALUE_MAP.getOrDefault(name, UNKNOWN).getShortValue(); + } + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingInputStreamAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingInputStreamAsyncRequestBody.java index 3639d82c04c1..deb354d276dd 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingInputStreamAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingInputStreamAsyncRequestBody.java @@ -120,6 +120,11 @@ public void subscribe(Subscriber s) { } } + @Override + public String body() { + return BodyType.STREAM.getName(); + } + private void waitForSubscriptionIfNeeded() throws InterruptedException { long timeoutSeconds = subscribeTimeout.getSeconds(); if (!subscribedLatch.await(timeoutSeconds, TimeUnit.SECONDS)) { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingOutputStreamAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingOutputStreamAsyncRequestBody.java index c0a044ffa3ba..f6bc15844729 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingOutputStreamAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/BlockingOutputStreamAsyncRequestBody.java @@ -92,6 +92,11 @@ public void subscribe(Subscriber s) { } } + @Override + public String body() { + return BodyType.STREAM.getName(); + } + private void waitForSubscriptionIfNeeded() { try { long timeoutSeconds = subscribeTimeout.getSeconds(); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/listener/AsyncResponseTransformerListener.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/listener/AsyncResponseTransformerListener.java index b189d51ec7ef..c7ee37690ca0 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/listener/AsyncResponseTransformerListener.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/async/listener/AsyncResponseTransformerListener.java @@ -76,6 +76,10 @@ final class NotifyingAsyncResponseTransformer implements Asy this.listener = Validate.notNull(listener, "listener"); } + public AsyncResponseTransformer getDelegate() { + return delegate; + } + @Override public CompletableFuture prepare() { return delegate.prepare(); @@ -99,6 +103,11 @@ public void exceptionOccurred(Throwable error) { delegate.exceptionOccurred(error); } + @Override + public String name() { + return delegate.name(); + } + static void invoke(Runnable runnable, String callbackName) { try { runnable.run(); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java index 3edec111e45a..ad4e7bcbdec4 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/builder/SdkDefaultClientBuilder.java @@ -21,6 +21,7 @@ import static software.amazon.awssdk.core.client.config.SdkAdvancedClientOption.USER_AGENT_PREFIX; import static software.amazon.awssdk.core.client.config.SdkAdvancedClientOption.USER_AGENT_SUFFIX; import static software.amazon.awssdk.core.client.config.SdkClientOption.ADDITIONAL_HTTP_HEADERS; +import static software.amazon.awssdk.core.client.config.SdkClientOption.API_METADATA; import static software.amazon.awssdk.core.client.config.SdkClientOption.ASYNC_HTTP_CLIENT; import static software.amazon.awssdk.core.client.config.SdkClientOption.CLIENT_TYPE; import static software.amazon.awssdk.core.client.config.SdkClientOption.CLIENT_USER_AGENT; @@ -93,6 +94,7 @@ import software.amazon.awssdk.core.internal.useragent.AppIdResolver; import software.amazon.awssdk.core.internal.useragent.SdkClientUserAgentProperties; import software.amazon.awssdk.core.internal.useragent.SdkUserAgentBuilder; +import software.amazon.awssdk.core.internal.useragent.UserAgentConstant; import software.amazon.awssdk.core.retry.RetryMode; import software.amazon.awssdk.core.util.SystemUserAgent; import software.amazon.awssdk.http.ExecutableHttpRequest; @@ -402,6 +404,7 @@ private String resolveClientUserAgent(LazyValueSource config) { String appId = config.get(USER_AGENT_APP_ID); String resolvedAppId = appId == null ? resolveAppId(config) : appId; clientProperties.putProperty(APP_ID, resolvedAppId); + clientProperties.putProperty(UserAgentConstant.API_METADATA, config.get(API_METADATA)); return SdkUserAgentBuilder.buildClientUserAgentString(SystemUserAgent.getOrCreate(), clientProperties); } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java index 58def071bc64..e05753513ad4 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/config/SdkClientOption.java @@ -342,6 +342,11 @@ public final class SdkClientOption extends ClientOption { public static final SdkClientOption RESPONSE_CHECKSUM_VALIDATION = new SdkClientOption<>(ResponseChecksumValidation.class); + /** + * The API metadata for user agent (service-id#version). + */ + public static final SdkClientOption API_METADATA = new SdkClientOption<>(String.class); + /** * An optional identification value to be appended to the user agent header. The value should be less than 50 characters in * length and is null by default. diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/handler/ClientExecutionParams.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/handler/ClientExecutionParams.java index 81e9d4209ed8..e307f5857ce7 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/handler/ClientExecutionParams.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/client/handler/ClientExecutionParams.java @@ -23,6 +23,7 @@ import software.amazon.awssdk.core.SdkProtocolMetadata; import software.amazon.awssdk.core.SdkRequest; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.client.config.SdkClientConfiguration; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.http.HttpResponseHandler; @@ -30,6 +31,7 @@ import software.amazon.awssdk.core.interceptor.ExecutionAttributes; import software.amazon.awssdk.core.runtime.transform.Marshaller; import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; import software.amazon.awssdk.metrics.MetricCollector; /** @@ -49,6 +51,8 @@ public final class ClientExecutionParams { private HttpResponseHandler responseHandler; private HttpResponseHandler errorResponseHandler; private HttpResponseHandler> combinedResponseHandler; + private ResponseTransformer responseTransformer; + private AsyncResponseTransformer asyncResponseTransformer; private boolean fullDuplex; private boolean hasInitialRequestEvent; private String hostPrefixExpression; @@ -133,6 +137,25 @@ public ClientExecutionParams withAsyncRequestBody(AsyncRequestB return this; } + public ResponseTransformer getResponseTransformer() { + return responseTransformer; + } + + public ClientExecutionParams withResponseTransformer(ResponseTransformer responseTransformer) { + this.responseTransformer = responseTransformer; + return this; + } + + public AsyncResponseTransformer getAsyncResponseTransformer() { + return asyncResponseTransformer; + } + + public ClientExecutionParams withAsyncResponseTransformer( + AsyncResponseTransformer asyncResponseTransformer) { + this.asyncResponseTransformer = asyncResponseTransformer; + return this; + } + public boolean isFullDuplex() { return fullDuplex; } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java index 08f890bd8333..bd09c48d9f43 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/interceptor/SdkInternalExecutionAttribute.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.core.interceptor; +import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import software.amazon.awssdk.annotations.SdkProtectedApi; @@ -28,6 +29,7 @@ import software.amazon.awssdk.core.interceptor.trait.HttpChecksum; import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired; import software.amazon.awssdk.core.internal.interceptor.trait.RequestCompression; +import software.amazon.awssdk.core.useragent.AdditionalMetadata; import software.amazon.awssdk.core.useragent.BusinessMetricCollection; import software.amazon.awssdk.endpoints.Endpoint; import software.amazon.awssdk.endpoints.EndpointProvider; @@ -55,6 +57,12 @@ public final class SdkInternalExecutionAttribute extends SdkExecutionAttribute { public static final ExecutionAttribute BUSINESS_METRICS = new ExecutionAttribute<>("BusinessMetricsCollection"); + /** + * A collection of metadata to be added to the UserAgent. + */ + public static final ExecutionAttribute> USER_AGENT_METADATA = + new ExecutionAttribute<>("UserAgentMetadata"); + /** * If true, indicates that this is an event streaming request being sent over RPC, and therefore the serialized * request object is encapsulated as an event of type {@code initial-request}. @@ -189,6 +197,13 @@ public final class SdkInternalExecutionAttribute extends SdkExecutionAttribute { public static final ExecutionAttribute RESPONSE_CHECKSUM_VALIDATION = new ExecutionAttribute<>( "ResponseChecksumValidation"); + /** + * The token configured from the environment or system properties, used to determine if the BEARER_SERVICE_ENV_VARS + * business metric should be set. + */ + public static final ExecutionAttribute TOKEN_CONFIGURED_FROM_ENV = new ExecutionAttribute<>( + "TokenConfiguredFromEnv"); + /** * The backing attribute for RESOLVED_CHECKSUM_SPECS. * This holds the real ChecksumSpecs value, and is used to map to the ChecksumAlgorithm signer property diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncResponseTransformer.java index 90d587cd5a36..d1103ea2a2de 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteArrayAsyncResponseTransformer.java @@ -65,6 +65,11 @@ public void exceptionOccurred(Throwable throwable) { cf.completeExceptionally(throwable); } + @Override + public String name() { + return TransformerType.BYTES.getName(); + } + static class BaosSubscriber implements Subscriber { private final CompletableFuture resultFuture; diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java index 87540e5363ba..a4ee21fe0238 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/ByteBuffersAsyncRequestBody.java @@ -118,6 +118,11 @@ public void cancel() { } } + @Override + public String body() { + return BodyType.BYTES.getName(); + } + public static ByteBuffersAsyncRequestBody of(ByteBuffer... buffers) { long length = Arrays.stream(buffers) .mapToLong(ByteBuffer::remaining) diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java index f8bbdd552088..f5dcc164f61c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncRequestBody.java @@ -141,6 +141,11 @@ public void subscribe(Subscriber s) { } } + @Override + public String body() { + return BodyType.FILE.getName(); + } + /** * @return Builder instance to construct a {@link FileAsyncRequestBody}. */ diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformer.java index 9d0bdf560af2..4348355fa5d8 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/FileAsyncResponseTransformer.java @@ -169,6 +169,11 @@ public void exceptionOccurred(Throwable throwable) { } } + @Override + public String name() { + return TransformerType.FILE.getName(); + } + /** * {@link Subscriber} implementation that writes chunks to a file. */ diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamResponseTransformer.java index 434894a44c8c..72ece7a26575 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamResponseTransformer.java @@ -59,4 +59,9 @@ public void onStream(SdkPublisher publisher) { public void exceptionOccurred(Throwable error) { future.completeExceptionally(error); } + + @Override + public String name() { + return TransformerType.STREAM.getName(); + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamWithExecutorAsyncRequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamWithExecutorAsyncRequestBody.java index fc742536ec88..7ee81817ed11 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamWithExecutorAsyncRequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/InputStreamWithExecutorAsyncRequestBody.java @@ -86,6 +86,11 @@ public void subscribe(Subscriber s) { } } + @Override + public String body() { + return BodyType.STREAM.getName(); + } + private void tryReset(InputStream inputStream) { try { inputStream.reset(); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/PublisherAsyncResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/PublisherAsyncResponseTransformer.java index d5448a5addcd..3f0f9fa19fce 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/PublisherAsyncResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/async/PublisherAsyncResponseTransformer.java @@ -57,4 +57,9 @@ public void onStream(SdkPublisher publisher) { public void exceptionOccurred(Throwable error) { future.completeExceptionally(error); } + + @Override + public String name() { + return TransformerType.PUBLISHER.getName(); + } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStage.java index 1f6e2b9949d3..ef1e3fb2cc9d 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStage.java @@ -41,6 +41,7 @@ import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.http.pipeline.MutableRequestToRequestPipeline; import software.amazon.awssdk.core.internal.useragent.IdentityProviderNameMapping; +import software.amazon.awssdk.core.useragent.AdditionalMetadata; import software.amazon.awssdk.core.useragent.BusinessMetricCollection; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.identity.spi.Identity; @@ -110,6 +111,13 @@ private String finalizeUserAgent(RequestExecutionContext context) { javaUserAgent.append(clientUserAgent); + //add useragent metadata from execution context + List userAgentMetadata = + context.executionAttributes().getAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA); + if (userAgentMetadata != null) { + userAgentMetadata.forEach(s -> javaUserAgent.append(SPACE).append(s)); + } + //add remaining SDK user agent properties identityProviderName(context.executionAttributes()).ifPresent( authSource -> appendSpaceAndField(javaUserAgent, CONFIG_METADATA, uaPair(AUTH_SOURCE, authSource))); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java index 09016026be1c..1d7d040971cf 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncApiCallMetricCollectionStage.java @@ -57,6 +57,9 @@ public CompletableFuture execute(SdkHttpFullRequest input, RequestExecu } else { future.complete(r); } + }).exceptionally(t -> { + future.completeExceptionally(t); + return null; }); return CompletableFutureUtils.forwardExceptionTo(future, executeFuture); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncRetryableStage.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncRetryableStage.java index d92c264a1b39..3e3d79a68524 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncRetryableStage.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/http/pipeline/stages/AsyncRetryableStage.java @@ -75,7 +75,11 @@ private RetryingExecutor(SdkHttpFullRequest request, RequestExecutionContext con public CompletableFuture> execute() { CompletableFuture> future = new CompletableFuture<>(); - attemptFirstExecute(future); + try { + attemptFirstExecute(future); + } catch (Throwable t) { + future.completeExceptionally(t); + } return future; } @@ -149,7 +153,11 @@ public void maybeAttemptExecute(CompletableFuture> future) { private void maybeRetryExecute(CompletableFuture> future, Exception exception) { retryableStageHelper.setLastException(exception); - maybeAttemptExecute(future); + try { + maybeAttemptExecute(future); + } catch (Throwable t) { + future.completeExceptionally(t); + } } } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/BufferingContentStreamProvider.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/BufferingContentStreamProvider.java index 856f528eadb5..1072ce98caa9 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/BufferingContentStreamProvider.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/BufferingContentStreamProvider.java @@ -61,6 +61,11 @@ public InputStream newStream() { return bufferedStream; } + @Override + public String name() { + return ProviderType.STREAM.getName(); + } + class ByteArrayStream extends ByteArrayInputStream { ByteArrayStream(byte[] buf, int offset, int length) { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/FileContentStreamProvider.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/FileContentStreamProvider.java index 69d277960d4c..fb6262b28c26 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/FileContentStreamProvider.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/sync/FileContentStreamProvider.java @@ -42,6 +42,11 @@ public InputStream newStream() { return currentStream; } + @Override + public String name() { + return ProviderType.FILE.getName(); + } + private void closeCurrentStream() { if (currentStream != null) { invokeSafely(currentStream::close); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/useragent/SdkUserAgentBuilder.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/useragent/SdkUserAgentBuilder.java index ecfa5a862a47..f85fd14a692b 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/useragent/SdkUserAgentBuilder.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/useragent/SdkUserAgentBuilder.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.core.internal.useragent; +import static software.amazon.awssdk.core.internal.useragent.UserAgentConstant.API_METADATA; import static software.amazon.awssdk.core.internal.useragent.UserAgentConstant.APP_ID; import static software.amazon.awssdk.core.internal.useragent.UserAgentConstant.ENV_METADATA; import static software.amazon.awssdk.core.internal.useragent.UserAgentConstant.HTTP; @@ -65,6 +66,7 @@ public static String buildClientUserAgentString(SystemUserAgent systemValues, } appendNonEmptyField(uaString, UA_METADATA, UA_VERSION); + appendNonEmptyField(uaString, API_METADATA, userAgentProperties.getProperty(API_METADATA)); appendNonEmptyField(uaString, OS_METADATA, systemValues.osMetadata()); appendNonEmptyField(uaString, LANG_METADATA, systemValues.langMetadata()); appendAdditionalJvmMetadata(uaString, systemValues); diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java index ed2c85f86943..d30e66692368 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/internal/util/MetricUtils.java @@ -38,6 +38,7 @@ import software.amazon.awssdk.metrics.NoOpMetricCollector; import software.amazon.awssdk.metrics.SdkMetric; import software.amazon.awssdk.utils.Pair; +import software.amazon.awssdk.utils.uri.SdkUri; /** * Utility methods for working with metrics. @@ -112,7 +113,8 @@ public static void collectServiceEndpointMetrics(MetricCollector metricCollector // Only interested in the service endpoint so don't include any path, query, or fragment component URI requestUri = httpRequest.getUri(); try { - URI serviceEndpoint = new URI(requestUri.getScheme(), requestUri.getAuthority(), null, null, null); + URI serviceEndpoint = SdkUri.getInstance().newUri( + requestUri.getScheme(), requestUri.getAuthority(), null, null, null); metricCollector.reportMetric(CoreMetric.SERVICE_ENDPOINT, serviceEndpoint); } catch (URISyntaxException e) { // This should not happen since getUri() should return a valid URI diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/RequestBody.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/RequestBody.java index e751a8e0d631..ae30c95e615c 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/RequestBody.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/RequestBody.java @@ -20,7 +20,6 @@ import static software.amazon.awssdk.utils.Validate.paramNotNull; import static software.amazon.awssdk.utils.Validate.validState; -import java.io.ByteArrayInputStream; import java.io.File; import java.io.InputStream; import java.nio.ByteBuffer; @@ -138,12 +137,21 @@ public static RequestBody fromFile(File file) { public static RequestBody fromInputStream(InputStream inputStream, long contentLength) { IoUtils.markStreamWithMaxReadLimit(inputStream); InputStream nonCloseable = nonCloseableInputStream(inputStream); - return fromContentProvider(() -> { - if (nonCloseable.markSupported()) { - invokeSafely(nonCloseable::reset); + ContentStreamProvider provider = new ContentStreamProvider() { + @Override + public InputStream newStream() { + if (nonCloseable.markSupported()) { + invokeSafely(nonCloseable::reset); + } + return nonCloseable; } - return nonCloseable; - }, contentLength, Mimetype.MIMETYPE_OCTET_STREAM); + + @Override + public String name() { + return ProviderType.STREAM.getName(); + } + }; + return fromContentProvider(provider, contentLength, Mimetype.MIMETYPE_OCTET_STREAM); } /** @@ -268,7 +276,7 @@ private static RequestBody fromBytesDirect(byte[] bytes) { * Creates a {@link RequestBody} using the specified bytes (without copying). */ private static RequestBody fromBytesDirect(byte[] bytes, String mimetype) { - return new RequestBody(() -> new ByteArrayInputStream(bytes), (long) bytes.length, mimetype); + return new RequestBody(ContentStreamProvider.fromByteArrayUnsafe(bytes), (long) bytes.length, mimetype); } private static InputStream nonCloseableInputStream(InputStream inputStream) { diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/ResponseTransformer.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/ResponseTransformer.java index 3aa8ecac2698..f883b591829e 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/ResponseTransformer.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/sync/ResponseTransformer.java @@ -26,6 +26,8 @@ import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.util.Map; +import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.core.ResponseBytes; import software.amazon.awssdk.core.ResponseInputStream; @@ -37,6 +39,7 @@ import software.amazon.awssdk.http.AbortableInputStream; import software.amazon.awssdk.utils.IoUtils; import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.internal.EnumUtils; /** * Interface for processing a streaming response from a service in a synchronous fashion. This interfaces gives @@ -93,6 +96,16 @@ default boolean needsConnectionLeftOpen() { return false; } + /** + * Each ResponseTransformer should return a well-formed name that can be used to identify the implementation. + * The Transformer name should only include alphanumeric characters. + * + * @return String containing the identifying name of this RequestTransformer. + */ + default String name() { + return TransformerType.UNKNOWN.name(); + } + /** * Creates a response transformer that writes all response content to the specified file. If the file already exists * then a {@link java.nio.file.FileAlreadyExistsException} will be thrown. @@ -102,34 +115,42 @@ default boolean needsConnectionLeftOpen() { * @return ResponseTransformer instance. */ static ResponseTransformer toFile(Path path) { - return (resp, in) -> { - try { - InterruptMonitor.checkInterrupted(); - Files.copy(in, path); - return resp; - } catch (IOException copyException) { - String copyError = "Failed to read response into file: " + path; + return new ResponseTransformer() { + @Override + public ResponseT transform(ResponseT response, AbortableInputStream inputStream) throws Exception { + try { + InterruptMonitor.checkInterrupted(); + Files.copy(inputStream, path); + return response; + } catch (IOException copyException) { + String copyError = "Failed to read response into file: " + path; - if (shouldThrowIOException(copyException)) { - throw new IOException(copyError, copyException); - } + if (shouldThrowIOException(copyException)) { + throw new IOException(copyError, copyException); + } - // Try to clean up the file so that we can retry the request. If we can't delete it, don't retry the request. - try { - Files.deleteIfExists(path); - } catch (IOException deletionException) { - Logger.loggerFor(ResponseTransformer.class) - .error(() -> "Failed to delete destination file '" + path + - "' after reading the service response " + - "failed.", deletionException); - - throw new IOException(copyError + ". Additionally, the file could not be cleaned up (" + - deletionException.getMessage() + "), so the request will not be retried.", - copyException); + // Try to clean up the file so that we can retry the request. If we can't delete it, don't retry the request. + try { + Files.deleteIfExists(path); + } catch (IOException deletionException) { + Logger.loggerFor(ResponseTransformer.class) + .error(() -> "Failed to delete destination file '" + path + + "' after reading the service response " + + "failed.", deletionException); + + throw new IOException(copyError + ". Additionally, the file could not be cleaned up (" + + deletionException.getMessage() + "), so the request will not be retried.", + copyException); + } + + // Retry the request + throw RetryableException.builder().message(copyError).cause(copyException).build(); } + } - // Retry the request - throw RetryableException.builder().message(copyError).cause(copyException).build(); + @Override + public String name() { + return TransformerType.FILE.getName(); } }; } @@ -166,10 +187,18 @@ static ResponseTransformer toFile(File file) { * @return ResponseTransformer instance. */ static ResponseTransformer toOutputStream(OutputStream outputStream) { - return (resp, in) -> { - InterruptMonitor.checkInterrupted(); - IoUtils.copy(in, outputStream); - return resp; + return new ResponseTransformer() { + @Override + public ResponseT transform(ResponseT response, AbortableInputStream inputStream) throws Exception { + InterruptMonitor.checkInterrupted(); + IoUtils.copy(inputStream, outputStream); + return response; + } + + @Override + public String name() { + return TransformerType.STREAM.getName(); + } }; } @@ -181,12 +210,20 @@ static ResponseTransformer toOutputStream(Outp * @return The streaming response transformer that can be used on the client streaming method. */ static ResponseTransformer> toBytes() { - return (response, inputStream) -> { - try { - InterruptMonitor.checkInterrupted(); - return ResponseBytes.fromByteArrayUnsafe(response, IoUtils.toByteArray(inputStream)); - } catch (IOException e) { - throw RetryableException.builder().message("Failed to read response.").cause(e).build(); + return new ResponseTransformer>() { + @Override + public ResponseBytes transform(ResponseT response, AbortableInputStream inputStream) throws Exception { + try { + InterruptMonitor.checkInterrupted(); + return ResponseBytes.fromByteArrayUnsafe(response, IoUtils.toByteArray(inputStream)); + } catch (IOException e) { + throw RetryableException.builder().message("Failed to read response.").cause(e).build(); + } + } + + @Override + public String name() { + return TransformerType.BYTES.getName(); } }; } @@ -194,7 +231,7 @@ static ResponseTransformer> toBy /** * Creates a response transformer that returns an unmanaged input stream with the response content. This input stream must * be explicitly closed to release the connection. The unmarshalled response object can be obtained via the {@link - * ResponseInputStream#response} method. + * ResponseInputStream#response()} method. *

    * Note that the returned stream is not subject to the retry policy or timeout settings (except for socket timeout) * of the client. No retries will be performed in the event of a socket read failure or connection reset. @@ -203,7 +240,17 @@ static ResponseTransformer> toBy * @return ResponseTransformer instance. */ static ResponseTransformer> toInputStream() { - return unmanaged(ResponseInputStream::new); + return unmanaged(new ResponseTransformer>() { + @Override + public ResponseInputStream transform(ResponseT response, AbortableInputStream inputStream) { + return new ResponseInputStream<>(response, inputStream); + } + + @Override + public String name() { + return TransformerType.STREAM.getName(); + } + }); } /** @@ -228,7 +275,43 @@ public ReturnT transform(ResponseT response, AbortableInputStream inputStream) t public boolean needsConnectionLeftOpen() { return true; } + + @Override + public String name() { + return transformer.name(); + } }; + } + + @SdkProtectedApi + enum TransformerType { + FILE("File", "f"), + BYTES("Bytes", "b"), + STREAM("Stream", "s"), + UNKNOWN("Unknown", "u"); + + private static final Map VALUE_MAP = + EnumUtils.uniqueIndex(TransformerType.class, TransformerType::getName); + + private final String name; + private final String shortValue; + + + TransformerType(String name, String shortValue) { + this.name = name; + this.shortValue = shortValue; + } + + public String getName() { + return name; + } + + public String getShortValue() { + return shortValue; + } + public static String shortValueFromName(String name) { + return VALUE_MAP.getOrDefault(name, UNKNOWN).getShortValue(); + } } } diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/AdditionalMetadata.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/AdditionalMetadata.java new file mode 100644 index 000000000000..dc9eff087bdf --- /dev/null +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/AdditionalMetadata.java @@ -0,0 +1,118 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.useragent; + +import static software.amazon.awssdk.utils.Validate.notNull; + +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.core.internal.useragent.UserAgentConstant; + +/** + * Represents UserAgent additional metadata following the format: md/[name]#[value] + */ +@SdkProtectedApi +public final class AdditionalMetadata { + private final String name; + private final String value; + + private AdditionalMetadata(BuilderImpl b) { + this.name = notNull(b.name, "name must not be null"); + this.value = notNull(b.value, "value must not be null"); + } + + public String name() { + return name; + } + + public String value() { + return value; + } + + @Override + public String toString() { + // Format "md/{name}#{value}" + return UserAgentConstant.field( + UserAgentConstant.METADATA, + UserAgentConstant.uaPair(name, value)); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AdditionalMetadata that = (AdditionalMetadata) o; + return Objects.equals(name, that.name) && + Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + int result = name != null ? name.hashCode() : 0; + result = 31 * result + (value != null ? value.hashCode() : 0); + return result; + } + + public static Builder builder() { + return new BuilderImpl(); + } + + public interface Builder { + /** + * Set the name of the additional metadata. + * + * @param name The name. + * @return This object for method chaining. + */ + Builder name(String name); + + /** + * Set the value of the additional metadata. + * + * @param value The value. + * @return This object for method chaining. + */ + Builder value(String value); + + AdditionalMetadata build(); + } + + private static final class BuilderImpl implements Builder { + private String name; + private String value; + + @Override + public Builder name(String name) { + this.name = name; + return this; + } + + @Override + public Builder value(String value) { + this.value = value; + return this; + } + + @Override + public AdditionalMetadata build() { + return new AdditionalMetadata(this); + } + } +} diff --git a/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/BusinessMetricFeatureId.java b/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/BusinessMetricFeatureId.java index 3779726894da..7f1483d56895 100644 --- a/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/BusinessMetricFeatureId.java +++ b/core/sdk-core/src/main/java/software/amazon/awssdk/core/useragent/BusinessMetricFeatureId.java @@ -41,6 +41,7 @@ public enum BusinessMetricFeatureId { ACCOUNT_ID_MODE_REQUIRED("R"), RESOLVED_ACCOUNT_ID("T"), DDB_MAPPER("d"), + BEARER_SERVICE_ENV_VARS("3"), UNKNOWN("Unknown"); private static final Map VALUE_MAP = diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientMetricCollectorExceptionTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientMetricCollectorExceptionTest.java new file mode 100644 index 000000000000..52de25b3063f --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientMetricCollectorExceptionTest.java @@ -0,0 +1,129 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.client; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.core.internal.util.AsyncResponseHandlerTestUtils.noOpResponseHandler; +import static utils.HttpTestUtils.testAsyncClientBuilder; + +import java.time.Duration; +import java.util.Collections; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; +import software.amazon.awssdk.core.Response; +import software.amazon.awssdk.core.SdkResponse; +import software.amazon.awssdk.core.async.EmptyPublisher; +import software.amazon.awssdk.core.http.ExecutionContext; +import software.amazon.awssdk.core.http.NoopTestRequest; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptorChain; +import software.amazon.awssdk.core.interceptor.InterceptorContext; +import software.amazon.awssdk.core.internal.http.AmazonAsyncHttpClient; +import software.amazon.awssdk.core.metrics.CoreMetric; +import software.amazon.awssdk.core.protocol.VoidSdkResponse; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpFullResponse; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.http.async.AsyncExecuteRequest; +import software.amazon.awssdk.http.async.SdkAsyncHttpClient; +import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.retries.DefaultRetryStrategy; +import utils.ValidSdkObjects; + +/** + * Tests to verify that exceptions thrown by the MetricCollector are reported through the returned future. + * {@link java.util.concurrent.CompletableFuture}. + * + * @see AsyncClientHandlerExceptionTest + */ +@RunWith(MockitoJUnitRunner.class) +public class AsyncClientMetricCollectorExceptionTest { + + public static final String MESSAGE = "test exception"; + + @Mock + private MetricCollector metricCollector; + + @Mock + private SdkAsyncHttpClient asyncHttpClient; + + @Test + public void exceptionInReportMetricReportedInFuture() { + when(metricCollector.createChild(any())).thenReturn(metricCollector); + Exception exception = new RuntimeException(MESSAGE); + doThrow(exception).when(metricCollector).reportMetric(eq(CoreMetric.API_CALL_DURATION), any(Duration.class)); + + CompletableFuture responseFuture = makeRequest(); + + assertThatThrownBy(() -> responseFuture.get(1, TimeUnit.SECONDS)).hasRootCause(exception); + } + + private CompletableFuture makeRequest() { + when(asyncHttpClient.execute(any(AsyncExecuteRequest.class))).thenAnswer((Answer>) invocationOnMock -> { + SdkAsyncHttpResponseHandler handler = invocationOnMock.getArgument(0, AsyncExecuteRequest.class).responseHandler(); + handler.onHeaders(SdkHttpFullResponse.builder() + .statusCode(200) + .build()); + handler.onStream(new EmptyPublisher<>()); + return CompletableFuture.completedFuture(null); + }); + + AmazonAsyncHttpClient asyncClient = testAsyncClientBuilder() + .retryStrategy(DefaultRetryStrategy.doNotRetry()) + .asyncHttpClient(asyncHttpClient) + .build(); + + SdkHttpFullRequest httpFullRequest = ValidSdkObjects.sdkHttpFullRequest().build(); + NoopTestRequest sdkRequest = NoopTestRequest.builder().build(); + InterceptorContext interceptorContext = InterceptorContext + .builder() + .request(sdkRequest) + .httpRequest(httpFullRequest) + .build(); + + Response response = + Response.builder() + .isSuccess(true) + .response(VoidSdkResponse.builder().build()) + .httpResponse(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + + return asyncClient + .requestExecutionBuilder() + .originalRequest(sdkRequest) + .request(httpFullRequest) + .executionContext( + ExecutionContext + .builder() + .executionAttributes(new ExecutionAttributes()) + .interceptorContext(interceptorContext) + .metricCollector(metricCollector) + .interceptorChain(new ExecutionInterceptorChain(Collections.emptyList())) + .build() + ) + .execute(noOpResponseHandler(response)); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientRetryStrategyExceptionTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientRetryStrategyExceptionTest.java new file mode 100644 index 000000000000..983be14c8451 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/client/AsyncClientRetryStrategyExceptionTest.java @@ -0,0 +1,110 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.client; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; +import static software.amazon.awssdk.core.internal.util.AsyncResponseHandlerTestUtils.noOpResponseHandler; +import static utils.HttpTestUtils.testAsyncClientBuilder; + +import java.time.Duration; +import java.util.Collections; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; +import software.amazon.awssdk.core.SdkResponse; +import software.amazon.awssdk.core.http.ExecutionContext; +import software.amazon.awssdk.core.http.NoopTestRequest; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptorChain; +import software.amazon.awssdk.core.interceptor.InterceptorContext; +import software.amazon.awssdk.core.internal.http.AmazonAsyncHttpClient; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.metrics.MetricCollector; +import software.amazon.awssdk.retries.api.AcquireInitialTokenResponse; +import software.amazon.awssdk.retries.api.RetryStrategy; +import software.amazon.awssdk.retries.api.RetryToken; +import utils.ValidSdkObjects; + +/** + * Tests to verify that exceptions thrown by the RetryStrategy are reported through the returned future. + * {@link java.util.concurrent.CompletableFuture}. + * + * @see AsyncClientHandlerExceptionTest + */ +@RunWith(MockitoJUnitRunner.class) +public class AsyncClientRetryStrategyExceptionTest { + + public static final String MESSAGE = "test exception"; + + @Mock + private RetryStrategy retryStrategy; + + @Test + public void exceptionInInitialTokenReportedInFuture() { + Exception exception = new RuntimeException(MESSAGE); + when(retryStrategy.acquireInitialToken(any())).thenThrow(exception); + + CompletableFuture responseFuture = makeRequest(); + + assertThatThrownBy(() -> responseFuture.get(1, TimeUnit.SECONDS)).hasRootCause(exception); + } + + @Test + public void exceptionInRefreshTokenReportedInFuture() { + when(retryStrategy.acquireInitialToken(any())).thenReturn( + AcquireInitialTokenResponse.create(new RetryToken() { + }, Duration.ZERO) + ); + Exception exception = new RuntimeException(MESSAGE); + when(retryStrategy.refreshRetryToken(any())).thenThrow(exception); + + CompletableFuture responseFuture = makeRequest(); + + assertThatThrownBy(() -> responseFuture.get(1, TimeUnit.SECONDS)).hasRootCause(exception); + } + + private CompletableFuture makeRequest() { + AmazonAsyncHttpClient asyncClient = testAsyncClientBuilder().retryStrategy(retryStrategy).build(); + + SdkHttpFullRequest httpFullRequest = ValidSdkObjects.sdkHttpFullRequest().build(); + NoopTestRequest sdkRequest = NoopTestRequest.builder().build(); + InterceptorContext interceptorContext = InterceptorContext + .builder() + .request(sdkRequest) + .httpRequest(httpFullRequest) + .build(); + + return asyncClient + .requestExecutionBuilder() + .originalRequest(sdkRequest) + .request(httpFullRequest) + .executionContext( + ExecutionContext + .builder() + .executionAttributes(new ExecutionAttributes()) + .interceptorContext(interceptorContext) + .metricCollector(MetricCollector.create("test")) + .interceptorChain(new ExecutionInterceptorChain(Collections.emptyList())) + .build() + ) + .execute(noOpResponseHandler()); + } +} diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStageTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStageTest.java index 68c72afe762b..d02654a78071 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStageTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/http/pipeline/stages/ApplyUserAgentStageTest.java @@ -24,6 +24,7 @@ import static software.amazon.awssdk.core.internal.useragent.UserAgentConstant.RETRY_MODE; import static software.amazon.awssdk.core.internal.useragent.UserAgentConstant.SPACE; +import java.util.Arrays; import java.util.List; import java.util.concurrent.CompletableFuture; import org.junit.Test; @@ -43,6 +44,7 @@ import software.amazon.awssdk.core.internal.http.RequestExecutionContext; import software.amazon.awssdk.core.internal.useragent.SdkClientUserAgentProperties; import software.amazon.awssdk.core.internal.useragent.SdkUserAgentBuilder; +import software.amazon.awssdk.core.useragent.AdditionalMetadata; import software.amazon.awssdk.core.util.SystemUserAgent; import software.amazon.awssdk.http.SdkHttpFullRequest; import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; @@ -120,6 +122,24 @@ public void when_requestContainsApiName_apiNamesArePresent() throws Exception { assertThat(userAgentHeaders.get(0)).contains("myLib/1.0"); } + @Test + public void when_requestContainsMetadata_metadataIsPresent() throws Exception { + ApplyUserAgentStage stage = new ApplyUserAgentStage(dependencies(clientUserAgent())); + + RequestExecutionContext ctx = requestExecutionContext( + executionAttributes(Arrays.asList( + AdditionalMetadata.builder().name("name1").value("value1").build(), + AdditionalMetadata.builder().name("name2").value("value2").build() + )), + noOpRequest()); + SdkHttpFullRequest.Builder request = stage.execute(SdkHttpFullRequest.builder(), ctx); + + List userAgentHeaders = request.headers().get(HEADER_USER_AGENT); + assertThat(userAgentHeaders).isNotNull().hasSize(1); + assertThat(userAgentHeaders.get(0)).contains("md/name1#value1"); + assertThat(userAgentHeaders.get(0)).contains("md/name2#value2"); + } + @Test public void when_identityContainsProvider_authSourceIsPresent() throws Exception { ApplyUserAgentStage stage = new ApplyUserAgentStage(dependencies(clientUserAgent())); @@ -185,6 +205,12 @@ private static ExecutionAttributes executionAttributes(AwsCredentialsIdentity id return executionAttributes; } + private static ExecutionAttributes executionAttributes(List metadata) { + ExecutionAttributes executionAttributes = new ExecutionAttributes(); + executionAttributes.putAttribute(SdkInternalExecutionAttribute.USER_AGENT_METADATA, metadata); + return executionAttributes; + } + private RequestExecutionContext requestExecutionContext(ExecutionAttributes executionAttributes, SdkRequest request) { ExecutionContext executionContext = ExecutionContext.builder() diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/useragent/SdkUserAgentBuilderTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/useragent/SdkUserAgentBuilderTest.java index 62c13f9d9cfc..dbf5dbc95cfe 100644 --- a/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/useragent/SdkUserAgentBuilderTest.java +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/internal/useragent/SdkUserAgentBuilderTest.java @@ -20,6 +20,7 @@ import static software.amazon.awssdk.core.internal.useragent.UserAgentConstant.HTTP; import static software.amazon.awssdk.core.internal.useragent.UserAgentConstant.INTERNAL_METADATA_MARKER; import static software.amazon.awssdk.core.internal.useragent.UserAgentConstant.IO; +import static software.amazon.awssdk.core.internal.useragent.UserAgentConstant.API_METADATA; import java.util.Arrays; import java.util.Collections; @@ -50,8 +51,9 @@ private static Stream inputValues() { "OpenJDK_64-Bit_Server_VM#21.0.2+13-LTS", "vendor#Amazon.com_Inc.", "en_US", Arrays.asList("Kotlin", "Scala")); - SdkClientUserAgentProperties minimalProperties = sdkProperties(null, null, null, null); - SdkClientUserAgentProperties maximalProperties = sdkProperties( "arbitrary", "async", "Netty", "someAppId"); + SdkClientUserAgentProperties minimalProperties = sdkProperties(null, null, null, null, null); + SdkClientUserAgentProperties maximalProperties = sdkProperties("arbitrary", "async", "Netty", "someAppId", "DynamoDB#2.26.22-SNAPSHOT"); + return Stream.of( Arguments.of("default sysagent, empty requestvalues", @@ -61,33 +63,37 @@ private static Stream inputValues() { Arguments.of("standard sysagent, request values - internalMarker", "aws-sdk-java/2.26.22-SNAPSHOT md/internal ua/2.1 os/Mac_OS_X#14.6.1 lang/java#21.0.2 " + "md/OpenJDK_64-Bit_Server_VM#21.0.2+13-LTS md/vendor#Amazon.com_Inc. md/en_US md/Kotlin md/Scala exec-env/lambda", - sdkProperties( "arbitrary", null, null, null), + sdkProperties( "arbitrary", null, null, null, null), maximalSysAgent), Arguments.of("standard sysagent, request values - io", "aws-sdk-java/2.26.22-SNAPSHOT md/io#async ua/2.1 os/Mac_OS_X#14.6.1 lang/java#21.0.2 " + "md/OpenJDK_64-Bit_Server_VM#21.0.2+13-LTS md/vendor#Amazon.com_Inc. md/en_US md/Kotlin md/Scala exec-env/lambda", - sdkProperties( null, "async", null, null), + sdkProperties( null, "async", null, null, null), maximalSysAgent), Arguments.of("standard sysagent, request values - http", "aws-sdk-java/2.26.22-SNAPSHOT md/http#Apache ua/2.1 os/Mac_OS_X#14.6.1 lang/java#21.0.2 " + "md/OpenJDK_64-Bit_Server_VM#21.0.2+13-LTS md/vendor#Amazon.com_Inc. md/en_US md/Kotlin md/Scala exec-env/lambda", - sdkProperties(null, null, "Apache", null), + sdkProperties(null, null, "Apache", null, null), maximalSysAgent), Arguments.of("standard sysagent, request values - authSource", "aws-sdk-java/2.26.22-SNAPSHOT ua/2.1 os/Mac_OS_X#14.6.1 lang/java#21.0.2 " + "md/OpenJDK_64-Bit_Server_VM#21.0.2+13-LTS md/vendor#Amazon.com_Inc. md/en_US md/Kotlin md/Scala " + "exec-env/lambda", - sdkProperties( null, null, null, null), + sdkProperties( null, null, null, null, null), maximalSysAgent), Arguments.of("standard sysagent, request values - appId", "aws-sdk-java/2.26.22-SNAPSHOT ua/2.1 os/Mac_OS_X#14.6.1 lang/java#21.0.2 " + "md/OpenJDK_64-Bit_Server_VM#21.0.2+13-LTS md/vendor#Amazon.com_Inc. md/en_US md/Kotlin md/Scala " + "exec-env/lambda app/someAppId", - sdkProperties( null, null, null, "someAppId"), + sdkProperties( null, null, null, "someAppId", null), maximalSysAgent), + Arguments.of("standard sysagent, request values - apiMetadata", + "aws-sdk-java/2.26.22-SNAPSHOT ua/2.1 api/DynamoDB#2.26.22-SNAPSHOT os/Mac_OS_X#14.6.1 lang/java#21.0.2 md/OpenJDK_64-Bit_Server_VM#21.0.2+13-LTS md/en_US", + sdkProperties(null, null, null, null, "DynamoDB#2.26.22-SNAPSHOT"), + standardValuesSysAgent), Arguments.of("standard sysagent, request values - maximal", - "aws-sdk-java/2.26.22-SNAPSHOT md/io#async md/http#Netty md/internal ua/2.1 os/Mac_OS_X#14.6.1 " - + "lang/java#21.0.2 " + "aws-sdk-java/2.26.22-SNAPSHOT md/io#async md/http#Netty md/internal ua/2.1 api/DynamoDB#2.26.22-SNAPSHOT" + + " os/Mac_OS_X#14.6.1 lang/java#21.0.2 " + "md/OpenJDK_64-Bit_Server_VM#21.0.2+13-LTS md/vendor#Amazon.com_Inc. md/en_US md/Kotlin md/Scala " + "exec-env/lambda app/someAppId", maximalProperties, @@ -95,7 +101,8 @@ private static Stream inputValues() { ); } - private static SdkClientUserAgentProperties sdkProperties(String internalMarker, String io, String http, String appId) { + private static SdkClientUserAgentProperties sdkProperties(String internalMarker, String io, String http, String appId, + String apiMetadata) { SdkClientUserAgentProperties properties = new SdkClientUserAgentProperties(); if (internalMarker != null) { @@ -114,6 +121,10 @@ private static SdkClientUserAgentProperties sdkProperties(String internalMarker, properties.putProperty(APP_ID, appId); } + if (apiMetadata != null) { + properties.putProperty(API_METADATA, apiMetadata); + } + return properties; } diff --git a/core/sdk-core/src/test/java/software/amazon/awssdk/core/useragent/AdditionalMetadataTest.java b/core/sdk-core/src/test/java/software/amazon/awssdk/core/useragent/AdditionalMetadataTest.java new file mode 100644 index 000000000000..fa201ad0d0a1 --- /dev/null +++ b/core/sdk-core/src/test/java/software/amazon/awssdk/core/useragent/AdditionalMetadataTest.java @@ -0,0 +1,41 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.core.useragent; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.Test; + +public class AdditionalMetadataTest { + + @Test + public void toString_formatsCorrectly() { + AdditionalMetadata metadata = AdditionalMetadata.builder() + .name("name") + .value("value") + .build(); + assertEquals("md/name#value", metadata.toString()); + } + @Test + public void equalsHashCode() { + EqualsVerifier.forClass(AdditionalMetadata.class) + .withNonnullFields("name", "value") + .verify(); + } +} + + diff --git a/docs/design/core/presignedURL-Get/DecisionLog.md b/docs/design/core/presignedURL-Get/DecisionLog.md new file mode 100644 index 000000000000..345a3ec9b584 --- /dev/null +++ b/docs/design/core/presignedURL-Get/DecisionLog.md @@ -0,0 +1,32 @@ + +# S3 Pre-signed URL GET - Decision Log + +## Review Meeting: 06/17/2024 +**Attendees**: Alban, John, Zoe, Dongie, Bole, Ran, Saranya + +### Closed Decisions + +1. Create a new PresignedUrlGetObjectResponse specifically for pre-signed URLs, or use the existing GetObjectResponse? Decided to use the existing GetObjectResponse for pre-signed URL operations as the HTTP response from a pre-signed URL GET is same as a standard S3 GetObject response. + +2. Use the existing SDK and S3 exceptions or implement specialized exceptions for validation errors like expired URLs? Decided to utilize existing SDK exceptions rather than creating specialized ones for pre-signed URL operations. + +3. Provide additional client-side validation with server-side validation as fallback or just rely entirely on server-side validation from S3? No additional client-side validation will be implemented for pre-signed URLs. + +### Discussions Addressed + +1. Are there alternative methods to skip signing, such as using NoOpSigner(), instead of setting additional Execution attributes? Added the use of NoOpSigner() in the design doc. + +2. Does the S3 response include a checksum? If so, should checksum-related support be implemented in this project, or deferred until after Transfer Manager support is added? S3 Response doesn't include checksum. + +3. What should we name the Helper API? Options include PresignedURLManager or PresignedUrlExtension. Will be addressed in the Surface API Review. + +## Review Meeting: 06/23/2024 +**Attendees**: John, Zoe, Dongie, Bole, Ran, Saranya, Alex, David + +### Decisions Addressed + +1. Should PresignedUrlGetObjectRequest extend S3Request/SdkRequest? Decided to use a standalone request class with minimal parameters (presignedUrl, rangeStart, rangeEnd) to avoid exposing incompatible configurations like credentials and signers. Internally convert to S3Request for ClientHandler compatibility. + +2. Replace IS_DISCOVERED_ENDPOINT execution attribute with a more semantically appropriate solution. Decided to introduce new SKIP_ENDPOINT_RESOLUTION execution attribute specifically for presigned URL scenarios where endpoint resolution should be bypassed, as IS_DISCOVERED_ENDPOINT is tied to deprecated endpoint discovery feature. + +3. Use separate rangeStart/rangeEnd fields vs single range string parameter. Decided to use separate rangeStart and rangeEnd Long fields for better user experience, as start/end is more intuitive than string parsing. diff --git a/docs/design/core/presignedURL-Get/Design.md b/docs/design/core/presignedURL-Get/Design.md new file mode 100644 index 000000000000..a99d0b8bfccb --- /dev/null +++ b/docs/design/core/presignedURL-Get/Design.md @@ -0,0 +1,177 @@ +# Design Document (S3 Pre-signed URL GET) + +## Introduction + +This design introduces S3 object downloads using pre-signed URLs in AWS SDK Java v2, providing feature parity with [v1](https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/transfer/PresignedUrlDownload.html). Some customers have described a need for downloading S3 objects through pre-signed URLs while maintaining Client side SDK benefits like automatic retries, metrics collection, and typed response objects. + +This document proposes how this functionality should be implemented in the Java SDK v2, addressing customer-requested features ([GitHub Issue #2731](https://github.com/aws/aws-sdk-java-v2/issues/2731), [GitHub Issue #181](https://github.com/aws/aws-sdk-java-v2/issues/181)) by reducing complexity and improving usability for temporary access scenarios. + +## Design Review + +Look at decision log here: [Decision Log Section](DecisionLog.md) + +The Java SDK team has decided to implement a separate `PresignedUrlManager`. The team chose the helper API pattern over direct `S3Client` integration to maintain clean separation of concerns while preserving SDK functionality. + +## Overview + +The design introduces new helper APIs `AsyncPresignedUrlManager` and `PresignedUrlManager` which can be instantiated via the existing `S3AsyncClient` and `S3Client` respectively. These managers provide a clean abstraction layer that preserves SDK benefits while handling the unique requirements of pre-signed URL requests. + +This design will implement only the GET /download function for presigned URLs. + + + +## Proposed APIs + +The v2 SDK will support a presigned URL manager for both sync and async clients that can leverage pre-signed URL downloads. + +### Instantiation +Instantiating from existing client: + +```java +// Async Presigned URL Manager +S3AsyncClient s3Client = S3AsyncClient.create(); +AsyncPresignedUrlManager presignManager = s3Client.presignedManager(); + +// Sync Presigned URL Manager +S3Client s3Client = S3Client.create(); +PresignedUrlManager presignManager = s3Client.presignedManager(); +``` + +### General Usage Examples + +```java +// Create presigned URL request +PresignedUrlGetObjectRequest request = PresignedUrlGetObjectRequest.builder() + .presignedUrl(presignedUrl) + .rangeStart(0L) + .rangeEnd(1024L) + .build(); + +// Async usage +S3AsyncClient s3Client = S3AsyncClient.create(); +AsyncPresignedUrlManager presignManager = s3Client.presignedManager(); +CompletableFuture response = presignManager.getObject(request); + +// Sync usage +S3Client s3Client = S3Client.create(); +PresignedUrlManager presignManager = s3Client.presignedManager(); +GetObjectResponse response = presignManager.getObject(request); +``` + +### AsyncPresignedUrlManager Interface + +```java +/** + * Interface for presigned URL operations used by Async clients. + */ +@SdkPublicApi +public interface AsyncPresignedUrlManager { + + /** + * Downloads S3 objects using pre-signed URLs with custom response transformation. + * + * @param request the presigned URL request. + * @param responseTransformer custom transformer for processing the response. + * @return a CompletableFuture of the transformed response. + */ + CompletableFuture getObject(PresignedUrlGetObjectRequest request, + AsyncResponseTransformer responseTransformer); + + // Additional getObject() overloads for file downloads, byte arrays, etc. + // Standard Builder interface with client() and overrideConfiguration() methods +} +``` + +### PresignedUrlManager Interface + +```java +/** + * Interface for presigned URL operations used by Sync clients. + */ +@SdkPublicApi +public interface PresignedUrlManager { + + /** + * Downloads S3 objects using pre-signed URLs. Bypasses normal authentication + * and endpoint resolution while maintaining SDK benefits like retries and metrics. + * + * @param request the presigned URL request containing URL and optional range parameters. + * @return the GetObjectResponse. + */ + GetObjectResponse getObject(PresignedUrlGetObjectRequest request); + + /** + * Downloads S3 objects using pre-signed URLs with custom response transformation. + * + * @param request the presigned URL request. + * @param responseTransformer custom transformer for processing the response. + * @return the transformed response. + */ + T getObject(PresignedUrlGetObjectRequest request, + ResponseTransformer responseTransformer); + + // Additional getObject() overloads for file downloads, byte arrays, etc. + // Standard Builder interface with client() and overrideConfiguration() methods +} +``` + +### PresignedUrlGetObjectRequest + +```java +/** + * Request object for presigned URL GET operations. + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public final class PresignedUrlGetObjectRequest + implements ToCopyableBuilder { + + private final String presignedUrl; + private final Long rangeStart; + private final Long rangeEnd; + + // Standard getters: presignedUrl(), rangeStart(), rangeEnd() + // Standard builder methods: builder(), toBuilder() + // Standard Builder class with presignedUrl(), rangeStart(), rangeEnd() setter methods +} +``` + +## FAQ + +### Why don't we implement presigned URL download/GET feature directly on the S3Client? + +Three approaches were considered: + +1. **Dedicated PresignedUrlManager (CHOSEN)**: Separate manager accessed via `s3Client.presignedManager()` + - **Pros**: Clean separation, preserves SDK features, follows v2 patterns + - **Cons**: New API surface for users to learn + +2. **Direct S3Client Integration**: Add presigned URL methods directly to S3Client + - **Pros**: Familiar interface, direct migration path from v1 + - **Cons**: Requires core interceptor changes, complex integration, could confuse users by mixing presigned URL APIs with standard service-generated APIs + +3. **S3Presigner Extension**: Extend existing S3Presigner to execute URLs + - **Pros**: Logical extension of presigner concept + - **Cons**: Breaks current stateless presigner patterns + +**Decision**: Option 1 provides clean separation while preserving SDK benefits and following established v2 utility patterns.cutePresignedGetObject(presignedRequest); + +### Why doesn't PresignedUrlGetObjectRequest extend S3Request? + +While extending S3Request would provide access to RequestOverrideConfiguration, many of these configurations (like credentials provider, signers) are not supported with presigned URL execution. Instead, we use a standalone request with only essential parameters (presignedUrl, rangeStart, rangeEnd). Internally, this gets wrapped in an encapsulated class that extends S3Request for use with ClientHandler. + + +## References + +**GitHub feature requests:** +- [S3 Presigned URL Support #2731](https://github.com/aws/aws-sdk-java-v2/issues/2731) +- [Presigned URL GET Support #181](https://github.com/aws/aws-sdk-java-v2/issues/181) + +**AWS Documentation:** +- [S3 Pre-signed URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/presigned-urls.html) + +**SDK Documentation:** +- [AWS SDK for Java v1 implementation](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/welcome.html) +- [S3 Client architecture patterns](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html) + diff --git a/http-client-spi/pom.xml b/http-client-spi/pom.xml index 789263ba7f01..80e780e8d3f4 100644 --- a/http-client-spi/pom.xml +++ b/http-client-spi/pom.xml @@ -22,7 +22,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT http-client-spi AWS Java SDK :: HTTP Client Interface diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/ContentStreamProvider.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/ContentStreamProvider.java index 59493fd33e4d..4ae96838d8b4 100644 --- a/http-client-spi/src/main/java/software/amazon/awssdk/http/ContentStreamProvider.java +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/ContentStreamProvider.java @@ -22,11 +22,14 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.util.Arrays; +import java.util.Map; import java.util.function.Supplier; +import software.amazon.awssdk.annotations.SdkProtectedApi; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.utils.IoUtils; import software.amazon.awssdk.utils.StringInputStream; import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.internal.EnumUtils; /** * Provides the content stream of a request. @@ -45,7 +48,7 @@ public interface ContentStreamProvider { static ContentStreamProvider fromByteArray(byte[] bytes) { Validate.paramNotNull(bytes, "bytes"); byte[] copy = Arrays.copyOf(bytes, bytes.length); - return () -> new ByteArrayInputStream(copy); + return fromByteArrayUnsafe(copy); } /** @@ -58,7 +61,17 @@ static ContentStreamProvider fromByteArray(byte[] bytes) { */ static ContentStreamProvider fromByteArrayUnsafe(byte[] bytes) { Validate.paramNotNull(bytes, "bytes"); - return () -> new ByteArrayInputStream(bytes); + return new ContentStreamProvider() { + @Override + public InputStream newStream() { + return new ByteArrayInputStream(bytes); + } + + @Override + public String name() { + return ProviderType.BYTES.getName(); + } + }; } /** @@ -67,7 +80,17 @@ static ContentStreamProvider fromByteArrayUnsafe(byte[] bytes) { static ContentStreamProvider fromString(String string, Charset charset) { Validate.paramNotNull(string, "string"); Validate.paramNotNull(charset, "charset"); - return () -> new StringInputStream(string, charset); + return new ContentStreamProvider() { + @Override + public InputStream newStream() { + return new StringInputStream(string, charset); + } + + @Override + public String name() { + return ProviderType.STRING.getName(); + } + }; } /** @@ -105,6 +128,11 @@ public InputStream newStream() { throw new IllegalStateException("Content input stream does not support mark/reset, " + "and was already read once."); } + + @Override + public String name() { + return ProviderType.STREAM.getName(); + } }; } @@ -125,6 +153,11 @@ public InputStream newStream() { lastStream = inputStreamSupplier.get(); return lastStream; } + + @Override + public String name() { + return ProviderType.STREAM.getName(); + } }; } @@ -132,4 +165,47 @@ public InputStream newStream() { * @return The content stream. */ InputStream newStream(); + + /** + * Each ContentStreamProvider should return a well-formed name that can be used to identify the implementation. + * The stream name should only include alphanumeric characters. + * + * @return String containing the identifying name of this ContentStreamProvider implementation. + */ + default String name() { + return ProviderType.UNKNOWN.getName(); + } + + @SdkProtectedApi + enum ProviderType { + FILE("File", "f"), + BYTES("Bytes", "b"), + STRING("String", "c"), + STREAM("Stream", "s"), + UNKNOWN("Unknown", "u"); + + private static final Map VALUE_MAP = + EnumUtils.uniqueIndex(ProviderType.class, ProviderType::getName); + + private final String name; + private final String shortValue; + + + ProviderType(String name, String shortValue) { + this.name = name; + this.shortValue = shortValue; + } + + public String getName() { + return name; + } + + public String getShortValue() { + return shortValue; + } + + public static String shortValueFromName(String name) { + return VALUE_MAP.getOrDefault(name, UNKNOWN).getShortValue(); + } + } } diff --git a/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpRequest.java b/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpRequest.java index bb34909b5f36..72bcea0299e6 100644 --- a/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpRequest.java +++ b/http-client-spi/src/main/java/software/amazon/awssdk/http/SdkHttpRequest.java @@ -29,6 +29,7 @@ import software.amazon.awssdk.utils.builder.CopyableBuilder; import software.amazon.awssdk.utils.builder.ToCopyableBuilder; import software.amazon.awssdk.utils.http.SdkHttpUtils; +import software.amazon.awssdk.utils.uri.SdkUri; /** * An immutable HTTP request without access to the request body. {@link SdkHttpFullRequest} should be used when access to a @@ -154,7 +155,7 @@ default URI getUri() { // Do not include the port in the URI when using the default port for the protocol. String portString = SdkHttpUtils.isUsingStandardPort(protocol(), port()) ? "" : ":" + port(); - return URI.create(protocol() + "://" + host() + portString + encodedPath() + encodedQueryString); + return SdkUri.getInstance().create(protocol() + "://" + host() + portString + encodedPath() + encodedQueryString); } /** diff --git a/http-clients/apache-client/pom.xml b/http-clients/apache-client/pom.xml index 5b0be3aed1e7..3cd584cc26ce 100644 --- a/http-clients/apache-client/pom.xml +++ b/http-clients/apache-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT apache-client diff --git a/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/internal/impl/ApacheHttpRequestFactory.java b/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/internal/impl/ApacheHttpRequestFactory.java index cfb22343ba3f..1c77e8738f3e 100644 --- a/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/internal/impl/ApacheHttpRequestFactory.java +++ b/http-clients/apache-client/src/main/java/software/amazon/awssdk/http/apache/internal/impl/ApacheHttpRequestFactory.java @@ -42,6 +42,7 @@ import software.amazon.awssdk.http.apache.internal.utils.ApacheUtils; import software.amazon.awssdk.utils.StringUtils; import software.amazon.awssdk.utils.http.SdkHttpUtils; +import software.amazon.awssdk.utils.uri.SdkUri; /** * Responsible for creating Apache HttpClient 4 request objects. @@ -80,7 +81,7 @@ private URI sanitizeUri(SdkHttpRequest request) { String portString = SdkHttpUtils.isUsingStandardPort(protocol, port) ? "" : ":" + port; - return URI.create(protocol + "://" + request.host() + portString + newPath + encodedQueryString); + return SdkUri.getInstance().create(protocol + "://" + request.host() + portString + newPath + encodedQueryString); } return request.getUri(); diff --git a/http-clients/aws-crt-client/pom.xml b/http-clients/aws-crt-client/pom.xml index 494aae94f94d..b38c99b516cf 100644 --- a/http-clients/aws-crt-client/pom.xml +++ b/http-clients/aws-crt-client/pom.xml @@ -21,7 +21,7 @@ http-clients software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientBase.java b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientBase.java index 76af9bc6d8f2..2df865f0fa0b 100644 --- a/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientBase.java +++ b/http-clients/aws-crt-client/src/main/java/software/amazon/awssdk/http/crt/AwsCrtHttpClientBase.java @@ -44,6 +44,7 @@ import software.amazon.awssdk.utils.IoUtils; import software.amazon.awssdk.utils.Logger; import software.amazon.awssdk.utils.SdkAutoCloseable; +import software.amazon.awssdk.utils.uri.SdkUri; /** * Common functionality and configuration for the CRT Http clients. @@ -162,8 +163,8 @@ HttpClientConnectionManager getOrCreateConnectionPool(URI uri) { } URI poolKey(SdkHttpRequest sdkRequest) { - return invokeSafely(() -> new URI(sdkRequest.protocol(), null, sdkRequest.host(), - sdkRequest.port(), null, null, null)); + return invokeSafely(() -> SdkUri.getInstance().newUri(sdkRequest.protocol(), null, sdkRequest.host(), + sdkRequest.port(), null, null, null)); } @Override diff --git a/http-clients/netty-nio-client/pom.xml b/http-clients/netty-nio-client/pom.xml index b2b4cd472c97..ce662a18cc7d 100644 --- a/http-clients/netty-nio-client/pom.xml +++ b/http-clients/netty-nio-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java index b28b32da1df7..f8a5b99809ca 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.java @@ -60,6 +60,7 @@ import software.amazon.awssdk.utils.AttributeMap; import software.amazon.awssdk.utils.Either; import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.uri.SdkUri; /** * An implementation of {@link SdkAsyncHttpClient} that uses a Netty non-blocking HTTP client to communicate with the service. @@ -169,8 +170,8 @@ private SdkEventLoopGroup eventLoopGroup(DefaultBuilder builder) { } private static URI poolKey(SdkHttpRequest sdkRequest) { - return invokeSafely(() -> new URI(sdkRequest.protocol(), null, sdkRequest.host(), - sdkRequest.port(), null, null, null)); + return invokeSafely(() -> SdkUri.getInstance().newUri(sdkRequest.protocol(), null, sdkRequest.host(), + sdkRequest.port(), null, null, null)); } private SslProvider resolveSslProvider(DefaultBuilder builder) { diff --git a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandler.java b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandler.java index e9769cf40c43..e6f309afbad3 100644 --- a/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandler.java +++ b/http-clients/netty-nio-client/src/main/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandler.java @@ -148,7 +148,7 @@ private void closeAndRelease(ChannelHandlerContext ctx) { private HttpRequest connectRequest() { String uri = getUri(); HttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.CONNECT, uri, - Unpooled.EMPTY_BUFFER, false); + Unpooled.EMPTY_BUFFER); request.headers().add(HttpHeaderNames.HOST, uri); if (!StringUtils.isEmpty(this.username) && !StringUtils.isEmpty(this.password)) { diff --git a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandlerTest.java b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandlerTest.java index 14984d458dc3..9836a953bda9 100644 --- a/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandlerTest.java +++ b/http-clients/netty-nio-client/src/test/java/software/amazon/awssdk/http/nio/netty/internal/ProxyTunnelInitHandlerTest.java @@ -212,7 +212,7 @@ public void handledAdded_writesRequest_withoutAuth() { String uri = REMOTE_HOST.getHost() + ":" + REMOTE_HOST.getPort(); HttpRequest expectedRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.CONNECT, uri, - Unpooled.EMPTY_BUFFER, false); + Unpooled.EMPTY_BUFFER); expectedRequest.headers().add(HttpHeaderNames.HOST, uri); assertThat(requestCaptor.getValue()).isEqualTo(expectedRequest); @@ -229,7 +229,7 @@ public void handledAdded_writesRequest_withAuth() { String uri = REMOTE_HOST.getHost() + ":" + REMOTE_HOST.getPort(); HttpRequest expectedRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.CONNECT, uri, - Unpooled.EMPTY_BUFFER, false); + Unpooled.EMPTY_BUFFER); expectedRequest.headers().add(HttpHeaderNames.HOST, uri); String authB64 = Base64.getEncoder().encodeToString(String.format("%s:%s", PROXY_USER, PROXY_PASSWORD).getBytes(CharsetUtil.UTF_8)); diff --git a/http-clients/pom.xml b/http-clients/pom.xml index bde554c59440..9ccf212922f5 100644 --- a/http-clients/pom.xml +++ b/http-clients/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/http-clients/url-connection-client/pom.xml b/http-clients/url-connection-client/pom.xml index d687bbe2cd32..014715153126 100644 --- a/http-clients/url-connection-client/pom.xml +++ b/http-clients/url-connection-client/pom.xml @@ -20,7 +20,7 @@ http-clients software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 @@ -60,18 +60,6 @@ junit-vintage-engine test - - software.amazon.awssdk - s3 - ${awsjavasdk.version} - test - - - service-test-utils - software.amazon.awssdk - ${awsjavasdk.version} - test - org.assertj assertj-core @@ -83,12 +71,6 @@ ${awsjavasdk.version} test - - software.amazon.awssdk - regions - ${awsjavasdk.version} - test - software.amazon.awssdk test-utils diff --git a/metric-publishers/cloudwatch-metric-publisher/pom.xml b/metric-publishers/cloudwatch-metric-publisher/pom.xml index 6a1fe7d8d6c5..abe73cfe5c71 100644 --- a/metric-publishers/cloudwatch-metric-publisher/pom.xml +++ b/metric-publishers/cloudwatch-metric-publisher/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk metric-publishers - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudwatch-metric-publisher diff --git a/metric-publishers/emf-metric-logging-publisher/pom.xml b/metric-publishers/emf-metric-logging-publisher/pom.xml index 1c612234a2e5..92fef00900ac 100644 --- a/metric-publishers/emf-metric-logging-publisher/pom.xml +++ b/metric-publishers/emf-metric-logging-publisher/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk metric-publishers - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT emf-metric-logging-publisher diff --git a/metric-publishers/emf-metric-logging-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/emf/internal/MetricEmfConverter.java b/metric-publishers/emf-metric-logging-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/emf/internal/MetricEmfConverter.java index 7973fa1fa0de..3ab16d3b0878 100644 --- a/metric-publishers/emf-metric-logging-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/emf/internal/MetricEmfConverter.java +++ b/metric-publishers/emf-metric-logging-publisher/src/main/java/software/amazon/awssdk/metrics/publishers/emf/internal/MetricEmfConverter.java @@ -179,9 +179,13 @@ private void processAndWriteValue(JsonWriter jsonWriter, MetricRecord mRecord return; } - if (Integer.class.isAssignableFrom(valueClass) || Long.class.isAssignableFrom(valueClass)) { + if (Integer.class.isAssignableFrom(valueClass)) { jsonWriter.writeValue((Integer) value); } + + if (Long.class.isAssignableFrom(valueClass)) { + jsonWriter.writeValue((Long) value); + } } private List createEmfStrings(Map, List>> aggregatedMetrics) { diff --git a/metric-publishers/emf-metric-logging-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/emf/internal/MetricEmfConverterTest.java b/metric-publishers/emf-metric-logging-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/emf/internal/MetricEmfConverterTest.java index 07473f5afc02..a01af0f96320 100644 --- a/metric-publishers/emf-metric-logging-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/emf/internal/MetricEmfConverterTest.java +++ b/metric-publishers/emf-metric-logging-publisher/src/test/java/software/amazon/awssdk/metrics/publishers/emf/internal/MetricEmfConverterTest.java @@ -233,4 +233,21 @@ void ConvertMetricCollectionToEMF_shouldConformToSchema() throws Exception { assertThat(errors).isEmpty(); } + + @Test + void ConvertMetricCollectionToEMF_longValueShouldSucceed() { + SdkMetric metric = SdkMetric.create("TestMetric", + Long.class, + MetricLevel.INFO, + MetricCategory.CUSTOM); + + MetricCollector metricCollector = MetricCollector.create("test"); + Long metricValue = 42L; + metricCollector.reportMetric(metric, metricValue); + List emfLogs = metricEmfConverterDefault.convertMetricCollectionToEmf(metricCollector.collect()); + + assertThat(emfLogs).containsOnly("{\"_aws\":{\"Timestamp\":12345678,\"LogGroupName\":\"my_log_group_name\"," + + "\"CloudWatchMetrics\":[{\"Namespace\":\"AwsSdk/JavaSdk2\",\"Dimensions\":[[]]," + + "\"Metrics\":[{\"Name\":\"TestMetric\"}]}]},\"TestMetric\":42}"); + } } \ No newline at end of file diff --git a/metric-publishers/pom.xml b/metric-publishers/pom.xml index d8866f5da1b0..4951059afcf6 100644 --- a/metric-publishers/pom.xml +++ b/metric-publishers/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT metric-publishers diff --git a/pom.xml b/pom.xml index effd92a91228..3a8d0083e9a6 100644 --- a/pom.xml +++ b/pom.xml @@ -20,7 +20,7 @@ 4.0.0 software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT pom AWS Java SDK :: Parent The Amazon Web Services SDK for Java provides Java APIs @@ -101,8 +101,8 @@ ${project.version} - 2.31.39 - 2.31.38 + 2.31.75 + 2.31.74 2.15.2 2.15.2 2.17.3 @@ -191,6 +191,7 @@ 2.1.0 ${skipTests} + true ${project.basedir}/src/it/java ${session.executionRootDirectory} https://github.com/aws/aws-sdk-java-v2 @@ -827,6 +828,21 @@ + + migration-tests + + false + + + false + true + true + true + true + true + + + integration-tests @@ -877,6 +893,55 @@ + + s3-regression-tests + + + doRelease + + + + + true + true + true + true + true + true + + + + + org.apache.maven.plugins + maven-failsafe-plugin + ${maven-failsafe-plugin.version} + + + integration-test + + integration-test + verify + + + + -Xmx12g -Xms4g + + **/*${regression.test}.java + + false + ${project.build.outputDirectory} + ${project.build.testOutputDirectory} + methods + 8 + false + + + + + + + + endpoint-tests diff --git a/release-scripts/pom.xml b/release-scripts/pom.xml index 8b06fe454538..d548a741a67b 100644 --- a/release-scripts/pom.xml +++ b/release-scripts/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../pom.xml release-scripts diff --git a/scripts/changelog/__init__.py b/scripts/changelog/__init__.py index 8b137891791f..160cebd9c3a9 100644 --- a/scripts/changelog/__init__.py +++ b/scripts/changelog/__init__.py @@ -1 +1,2 @@ - +#!/usr/bin/env python3 +# Package initialization file diff --git a/scripts/changelog/git.py b/scripts/changelog/git.py index 0a8e5e8646f7..9eaedf69c2eb 100644 --- a/scripts/changelog/git.py +++ b/scripts/changelog/git.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import subprocess def stage_file(filename): return subprocess.call(["git", "add", "-A", filename]) \ No newline at end of file diff --git a/scripts/changelog/model.py b/scripts/changelog/model.py index 0b21fedd415f..12d334d7d02a 100644 --- a/scripts/changelog/model.py +++ b/scripts/changelog/model.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 class ReleaseChanges(object): def __init__(self, version, date, entries): self.version = version @@ -32,4 +33,5 @@ def prerelease_version_number(self): preview_prefix_len = len("preview-") prerelease_version = self.prerelease[preview_prefix_len:] if prerelease_version != "": - return int(prerelease_version) \ No newline at end of file + return int(prerelease_version) + return None \ No newline at end of file diff --git a/scripts/changelog/util.py b/scripts/changelog/util.py index acb86d8d8049..1815d8b282ed 100644 --- a/scripts/changelog/util.py +++ b/scripts/changelog/util.py @@ -1,14 +1,23 @@ +#!/usr/bin/env python3 import json import os from datetime import date +import functools from changelog.model import ReleaseChanges, ChangelogEntry, Version -def version_cmp(a,b): +def version_cmp(a, b): aa = [a.major, a.minor, a.patch, a.prerelease_version_number()] bb = [b.major, b.minor, b.patch, b.prerelease_version_number()] - return cmp(bb,aa) + # In Python 3, we need to implement our own comparison function + # since the built-in cmp function was removed + if aa > bb: + return -1 + elif aa < bb: + return 1 + else: + return 0 def load_all_released_changes(d): if not os.path.isdir(d): diff --git a/scripts/changelog/writer.py b/scripts/changelog/writer.py index ad9cad65e502..ef714a81b021 100644 --- a/scripts/changelog/writer.py +++ b/scripts/changelog/writer.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 from changelog.git import stage_file from changelog.util import load_all_released_changes, load_unreleased_changes, version_cmp from functools import cmp_to_key @@ -70,9 +71,6 @@ def group_entries(self): def get_sorted_categories(self): return sorted(list(self.categories)) - def is_service_category(self,s): - return s.lower() not in NON_SERVICE_CATEGORIES - def write_header(self): version_string = self.current_changes.version if version_string is None: diff --git a/scripts/doc_crosslinks/generate_cross_link_data.py b/scripts/doc_crosslinks/generate_cross_link_data.py index 247cd155a070..699ec7319d90 100644 --- a/scripts/doc_crosslinks/generate_cross_link_data.py +++ b/scripts/doc_crosslinks/generate_cross_link_data.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import os import argparse import io diff --git a/scripts/finalize-release-changes b/scripts/finalize-release-changes index f22b63872a14..65625c891930 100755 --- a/scripts/finalize-release-changes +++ b/scripts/finalize-release-changes @@ -1,6 +1,4 @@ -#!/usr/bin/env python - -from __future__ import print_function +#!/usr/bin/env python3 import argparse import os diff --git a/scripts/generate-changelog b/scripts/generate-changelog index 882d3657fdd4..722154ba8b45 100755 --- a/scripts/generate-changelog +++ b/scripts/generate-changelog @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from changelog.writer import write_changelog diff --git a/scripts/new-change b/scripts/new-change index cfe8801319e3..093a9edafaea 100755 --- a/scripts/new-change +++ b/scripts/new-change @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # This file is borrowed from the aws/aws-cli project with the following modifications: # - Add a 'deprecation' category, and validation for the category value # - Modify the 'linkify' method to use Markdown syntax instead of reStructuredText (rst) diff --git a/scripts/run-integ-test b/scripts/run-integ-test index 13f10827b7f0..556d2cc91f58 100755 --- a/scripts/run-integ-test +++ b/scripts/run-integ-test @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Run Integ Tests based on the changed files """ @@ -24,9 +24,9 @@ def check_diffs(): diff, stderr = process.communicate() - if process.returncode !=0: + if process.returncode != 0: raise Exception("Unable to do git diff") - return diff.splitlines(False) + return diff.decode('utf-8').splitlines() def get_modules(file_path): """ @@ -44,7 +44,7 @@ def get_modules(file_path): return core_modules_to_test if top_directory in ["http-clients"]: return http_modules_to_test.get(path[1]) - elif top_directory== "services": + elif top_directory == "services": return path[1] def run_tests(modules): diff --git a/scripts/setup-new-module b/scripts/setup-new-module new file mode 100755 index 000000000000..fff112a75ce9 --- /dev/null +++ b/scripts/setup-new-module @@ -0,0 +1,406 @@ +#!/usr/bin/env python + +""" +setup-new-module - Script to set up a new module in the AWS SDK for Java v2 +Usage: ./scripts/setup-new-module -n module-name [-t] [-p parent-dir] +Options: + -n module-name: Name of the new module (required) + -t: Set up as a test module (optional, default: false) + -p parent-dir: Parent directory for the module (optional, default: root project directory for regular modules, test directory for test modules) + -h: Show help +""" + +import os +import sys +import argparse +import re +import shutil +from pathlib import Path + + +def parse_arguments(): + """Parse command line arguments.""" + parser = argparse.ArgumentParser(description='Set up a new module in the AWS SDK for Java v2') + parser.add_argument('-n', '--name', required=True, help='Name of the new module (required)') + parser.add_argument('-t', '--test', action='store_true', help='Set up as a test module (optional, default: false)') + parser.add_argument('-p', '--parent-dir', help='Parent directory for the module (optional)') + return parser.parse_args() + + +def get_sdk_version(root_dir): + """Get the current SDK version from the root pom.xml.""" + pom_path = os.path.join(root_dir, 'pom.xml') + with open(pom_path, 'r') as f: + content = f.read() + + # Find the first version tag + match = re.search(r'([^<]+)', content) + if match: + return match.group(1) + else: + print("Warning: Could not find SDK version in pom.xml") + return "UNKNOWN" + + +def create_module_directory(module_dir): + """Create the module directory structure.""" + print(f"Creating module directory: {module_dir}") + os.makedirs(module_dir, exist_ok=True) + os.makedirs(os.path.join(module_dir, 'src/main/java'), exist_ok=True) + os.makedirs(os.path.join(module_dir, 'src/main/resources'), exist_ok=True) + os.makedirs(os.path.join(module_dir, 'src/test/java'), exist_ok=True) + os.makedirs(os.path.join(module_dir, 'src/test/resources'), exist_ok=True) + + +def create_pom_xml(module_dir, module_name, sdk_version, is_test_module): + """Create a basic pom.xml file for the module.""" + pom_path = os.path.join(module_dir, 'pom.xml') + + print(f"Using SDK version: {sdk_version}") + + with open(pom_path, 'w') as f: + f.write(f''' + + 4.0.0 + + software.amazon.awssdk + aws-sdk-java-pom + {sdk_version} + + + {module_name} + AWS Java SDK :: {module_name} + AWS SDK for Java - {module_name} + https://aws.amazon.com/sdkforjava +''') + + # Add Automatic-Module-Name for non-test modules + if not is_test_module: + f.write(''' + + + + org.apache.maven.plugins + maven-jar-plugin + + + + + software.amazon.awssdk.TODO + + + + + + +''') + + # Close pom.xml + f.write(''' + + + + + +''') + + print(f"Created pom.xml at {pom_path}") +def add_dependency_to_pom(pom_file, module_name): + """Add dependency to a pom.xml file.""" + # Check if the file exists + if not os.path.isfile(pom_file): + print(f"Warning: {pom_file} does not exist. Skipping.") + return + + # Read the file content + with open(pom_file, 'r') as f: + content = f.read() + + # Check if dependency already exists + dependency_pattern = f"{module_name}" + if dependency_pattern in content: + print(f"Dependency already exists in {pom_file}. Skipping.") + return + + # Find the dependencies section and add the new dependency + dependencies_pattern = r"" + new_dependency = f''' + software.amazon.awssdk + {module_name} + ${{awsjavasdk.version}} + +''' + + # Insert the new dependency after the dependencies tag + modified_content = re.sub( + dependencies_pattern, + f"{dependencies_pattern}\n{new_dependency}", + content + ) + + # Write the modified content back to the file + with open(pom_file, 'w') as f: + f.write(modified_content) + + print(f"Added dependency to {pom_file}") + + +def update_root_pom(root_dir, module_name, is_test_module): + """Update the root pom.xml to include the new module.""" + root_pom = os.path.join(root_dir, 'pom.xml') + + # Check if the file exists + if not os.path.isfile(root_pom): + print(f"Warning: {root_pom} does not exist. Skipping.") + return + + # Determine the module path based on whether it's a test module + module_path = module_name + if is_test_module: + module_path = f"test/{module_name}" + + # Read the file content + with open(root_pom, 'r') as f: + content = f.read() + + # Check if module already exists + module_pattern = f"{module_path}" + if module_pattern in content: + print("Module already exists in root pom.xml. Skipping.") + return + + # Find the modules section and add the new module + modules_pattern = r"" + new_module = f" {module_path}" + + # Insert the new module after the modules tag + modified_content = re.sub( + modules_pattern, + f"{modules_pattern}\n{new_module}", + content + ) + + # Write the modified content back to the file + with open(root_pom, 'w') as f: + f.write(modified_content) + + print("Added module to root pom.xml") +def update_japicmp_config(root_dir, module_name): + """Update japicmp plugin config in root pom.xml.""" + root_pom = os.path.join(root_dir, 'pom.xml') + + # Check if the file exists + if not os.path.isfile(root_pom): + print(f"Warning: {root_pom} does not exist. Skipping japicmp update.") + return + + # Read the file content + with open(root_pom, 'r') as f: + content = f.read() + + # Check if module already exists in japicmp config + include_module_pattern = f" {module_name}" + if include_module_pattern in content: + print("Module already exists in japicmp config. Skipping.") + return + + # Find the includeModules section and add the new module + include_modules_end_pattern = r"" + new_include_module = f"{module_name}\n" + + # Insert the new module before the includeModules end tag + modified_content = re.sub( + include_modules_end_pattern, + f"{new_include_module} {include_modules_end_pattern}", + content + ) + + # Write the modified content back to the file + with open(root_pom, 'w') as f: + f.write(modified_content) + + print(f"Added {module_name} to japicmp plugin configuration in {root_pom}") + + +def update_brazil_json(root_dir, module_name, is_test): + """Update .brazil.json file.""" + brazil_json = os.path.join(root_dir, '.brazil.json') + + # Check if the file exists + if not os.path.isfile(brazil_json): + print(f"Warning: {brazil_json} does not exist. Skipping.") + return + + # Read the file content + with open(brazil_json, 'r') as f: + content = f.read() + + # Check if module already exists in .brazil.json + module_pattern = f'"{module_name}":' + if module_pattern in content: + print("Module already exists in .brazil.json. Skipping.") + return + + if is_test: + # Find a specific test module entry to anchor our insertion + anchor_pattern = r'"s3-tests": {"skipImport": true}' + new_module_entry = f'"{module_name}": {{ "skipImport": true }},\n' + + # Insert the new module before the anchor + modified_content = re.sub( + anchor_pattern, + f"{new_module_entry} {anchor_pattern}", + content + ) + + print(f"Added {module_name} to .brazil.json with skipImport: true") + else: + # Find a specific non-test module entry to anchor our insertion + anchor_pattern = r'"annotations": { "packageName": ' + new_module_entry = f'"{module_name}": {{ "packageName": "TODO" }},\n' + + # Insert the new module before the anchor + modified_content = re.sub( + anchor_pattern, + f"{new_module_entry} {anchor_pattern}", + content + ) + + print(f"Added {module_name} to .brazil.json with packageName: TODO") + + # Write the modified content back to the file + with open(brazil_json, 'w') as f: + f.write(modified_content) +def update_buildspecs(root_dir, module_name): + """Update buildspec files for test modules.""" + release_maven = os.path.join(root_dir, 'buildspecs/release-to-maven.yml') + release_javadoc = os.path.join(root_dir, 'buildspecs/release-javadoc.yml') + + # Update release-to-maven.yml + if os.path.isfile(release_maven): + with open(release_maven, 'r') as f: + content = f.read() + + # Look for MODULES_TO_SKIP variable + modules_pattern = r'MODULES_TO_SKIP="([^"]*)"' + match = re.search(modules_pattern, content) + + if match: + current_modules = match.group(1) + new_modules = f"{current_modules},{module_name}" if current_modules else module_name + + # Update the file + modified_content = re.sub( + modules_pattern, + f'MODULES_TO_SKIP="{new_modules}"', + content + ) + + with open(release_maven, 'w') as f: + f.write(modified_content) + + print(f"Updated MODULES_TO_SKIP in {release_maven} to include {module_name}") + else: + print(f"MODULES_TO_SKIP variable not found in {release_maven}. Please manually update.") + else: + print(f"Warning: {release_maven} does not exist. Skipping.") + + # Update release-javadoc.yml + if os.path.isfile(release_javadoc): + with open(release_javadoc, 'r') as f: + content = f.read() + + # Look for MODULES_TO_SKIP variable + modules_pattern = r'MODULES_TO_SKIP="([^"]*)"' + match = re.search(modules_pattern, content) + + if match: + current_modules = match.group(1) + new_modules = f"{current_modules},{module_name}" if current_modules else module_name + + # Update the file + modified_content = re.sub( + modules_pattern, + f'MODULES_TO_SKIP="{new_modules}"', + content + ) + + with open(release_javadoc, 'w') as f: + f.write(modified_content) + + print(f"Updated MODULES_TO_SKIP in {release_javadoc} to include {module_name}") + else: + print(f"MODULES_TO_SKIP variable not found in {release_javadoc}. Please manually update.") + else: + print(f"Warning: {release_javadoc} does not exist. Skipping.") +def main(): + """Main function to set up a new module.""" + args = parse_arguments() + + # Get the root project directory + script_dir = os.path.dirname(os.path.abspath(__file__)) + root_dir = os.path.dirname(script_dir) + + # Set default parent directory based on module type + parent_dir = args.parent_dir + if not parent_dir: + if args.test: + parent_dir = os.path.join(root_dir, 'test') + print(f"Setting default parent directory for test module to: {parent_dir}") + else: + parent_dir = root_dir + + # Create module directory + module_dir = os.path.join(parent_dir, args.name) + create_module_directory(module_dir) + + # Get SDK version + sdk_version = get_sdk_version(root_dir) + + # Create pom.xml + create_pom_xml(module_dir, args.name, sdk_version, args.test) + + # Perform updates based on module type + if not args.test: + print("Performing non-test module updates...") + + # Add to tests-coverage-reporting pom.xml + add_dependency_to_pom(os.path.join(root_dir, 'test/tests-coverage-reporting/pom.xml'), args.name) + + # Add to aws-sdk-java pom.xml + add_dependency_to_pom(os.path.join(root_dir, 'aws-sdk-java/pom.xml'), args.name) + + # Add to architecture-tests pom.xml + add_dependency_to_pom(os.path.join(root_dir, 'test/architecture-tests/pom.xml'), args.name) + + # Add to bom pom.xml + add_dependency_to_pom(os.path.join(root_dir, 'bom/pom.xml'), args.name) + + # Update japicmp plugin config + update_japicmp_config(root_dir, args.name) + + # Update .brazil.json + update_brazil_json(root_dir, args.name, False) + + # Update root pom.xml + update_root_pom(root_dir, args.name, False) + else: + print("Performing test module updates...") + + # Update buildspecs + update_buildspecs(root_dir, args.name) + + # Update .brazil.json + update_brazil_json(root_dir, args.name, True) + + # Update root pom.xml + update_root_pom(root_dir, args.name, True) + + print("") + print("Module setup complete! Please review the changes.") + + +if __name__ == "__main__": + main() diff --git a/scripts/validate-brazil-config b/scripts/validate-brazil-config new file mode 100755 index 000000000000..7b58aacc9cf0 --- /dev/null +++ b/scripts/validate-brazil-config @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 +from pathlib import Path +import json +import sys +import re + +# This script validates that: +# - All SDK modules are mapped or skipped for internal import +# - All non-SDK dependencies of imported modules are mapped to internal +# packages +# +# Usage: validate-brazil-config [module-paths-file] [dependencies-file] + +# Generating module-paths-file: +# mvn exec:exec -Dexec.executable=pwd -pl \!:aws-sdk-java-pom,\!:sdk-benchmarks,\!:module-path-tests -q 2>&1 > modules.txt +# +# Generates contents similar to: +# /workspace/aws-sdk-java-v2/build-tools +# /workspace/aws-sdk-java-v2/core +# /workspace/aws-sdk-java-v2/core/annotations +# /workspace/aws-sdk-java-v2/utils +# /workspace/aws-sdk-java-v2/test/test-utils +# /workspace/aws-sdk-java-v2/core/metrics-spi +# /workspace/aws-sdk-java-v2/http-client-spi +# /workspace/aws-sdk-java-v2/core/endpoints-spi +# /workspace/aws-sdk-java-v2/core/identity-spi +# /workspace/aws-sdk-java-v2/core/http-auth-spi +# ... + +# Generating dependencies-file: +# mvn dependency:list -DexcludeTransitive=true -DincludeScope=runtime 2>&1 > deps.txt +# +# Generates content similar to: +# +# [INFO] -----------------< software.amazon.awssdk:test-utils >------------------ +# [INFO] Building AWS Java SDK :: Test :: Test Utils 2.31.61-SNAPSHOT [6/493] +# [INFO] from test/test-utils/pom.xml +# [INFO] --------------------------------[ jar ]--------------------------------- +# [INFO] +# [INFO] --- dependency:3.1.1:list (default-cli) @ test-utils --- +# [INFO] +# [INFO] The following files have been resolved: +# [INFO] org.slf4j:slf4j-api:jar:1.7.36:compile -- module org.slf4j [auto] +# [INFO] org.junit.jupiter:junit-jupiter:jar:5.10.0:compile -- module org.junit.jupiter +# [INFO] com.fasterxml.jackson.core:jackson-core:jar:2.15.2:compile -- module com.fasterxml.jackson.core +# ... + +brazil_import_config_path=".brazil.json" + +with open(brazil_import_config_path) as f: + brazil_import_config = json.loads(f.read()) + +modules_path=sys.argv[1] + +core_modules = set() + +with open(modules_path) as f: + for l in f.readlines(): + l = l.strip() + module_path = Path(l) + name = module_path.name + if module_path.parent.name != 'services': + core_modules.add(name) + +# Ensure all 'core' modules are mapped. For the purposes of this validation, we +# don't care if we map to a package name or skip import. +config_modules = brazil_import_config['modules'] +for core_module in core_modules: + if core_module not in config_modules: + raise Exception(f"The module {core_module} is not mapped!") + + +# Ensure all dependencies are mapped. +current_module_pattern = re.compile(r"\[INFO\] --- .*:list \(default-cli\) @ (.*) ---") +dependency_pattern = re.compile(r"\[INFO\] ([^: ]+:[^: ]+):jar:[^: ]+:(compile|runtime)") + +deps_path=sys.argv[2] +config_dependencies = brazil_import_config['dependencies'] +with open(deps_path) as f: + for l in f.readlines(): + # Match a line that gives the name of the current module + match = current_module_pattern.match(l) + if match is not None: + # Unless explicitly skipped, all modules are imported + skipping_import = False + current_module = match.group(1) + + if current_module in config_modules: + module_import = config_modules[current_module] + + if 'skipImport' in module_import and module_import['skipImport']: + print(f"Module import skipped for {current_module}") + skipping_import = True + + continue + + # Match a line that gives a dependency of a given module + match = dependency_pattern.match(l) + if match is not None and \ + not skipping_import and \ + not match.group(1).startswith("software.amazon.awssdk:"): + # The current module is being imported, and this dependency is not an SDK + # module. Ensure that it's mapped + dependency_name = match.group(1) + if dependency_name not in config_dependencies: + raise Exception(f"The dependency {dependency_name} is not mapped!") \ No newline at end of file diff --git a/services-custom/dynamodb-enhanced/pom.xml b/services-custom/dynamodb-enhanced/pom.xml index 73eec949b8bc..55d7bd8e3f23 100644 --- a/services-custom/dynamodb-enhanced/pom.xml +++ b/services-custom/dynamodb-enhanced/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services-custom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT dynamodb-enhanced AWS Java SDK :: DynamoDB :: Enhanced Client diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TableIndices.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TableIndices.java new file mode 100644 index 000000000000..fba5ebf91225 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/TableIndices.java @@ -0,0 +1,65 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.enhanced.dynamodb.internal; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.enhanced.dynamodb.IndexMetadata; +import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedLocalSecondaryIndex; +import software.amazon.awssdk.services.dynamodb.model.ProjectionType; + +@SdkInternalApi +public class TableIndices { + private final List indices; + + public TableIndices(List indices) { + this.indices = indices; + } + + public List localSecondaryIndices() { + return Collections.unmodifiableList(indices.stream() + .filter(index -> !TableMetadata.primaryIndexName().equals(index.name())) + .filter(index -> !index.partitionKey().isPresent()) + .map(TableIndices::mapIndexMetadataToEnhancedLocalSecondaryIndex) + .collect(Collectors.toList())); + } + + public List globalSecondaryIndices() { + return Collections.unmodifiableList(indices.stream() + .filter(index -> !TableMetadata.primaryIndexName().equals(index.name())) + .filter(index -> index.partitionKey().isPresent()) + .map(TableIndices::mapIndexMetadataToEnhancedGlobalSecondaryIndex) + .collect(Collectors.toList())); + } + + private static EnhancedLocalSecondaryIndex mapIndexMetadataToEnhancedLocalSecondaryIndex(IndexMetadata indexMetadata) { + return EnhancedLocalSecondaryIndex.builder() + .indexName(indexMetadata.name()) + .projection(pb -> pb.projectionType(ProjectionType.ALL)) + .build(); + } + + private static EnhancedGlobalSecondaryIndex mapIndexMetadataToEnhancedGlobalSecondaryIndex(IndexMetadata indexMetadata) { + return EnhancedGlobalSecondaryIndex.builder() + .indexName(indexMetadata.name()) + .projection(pb -> pb.projectionType(ProjectionType.ALL)) + .build(); + } +} diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTable.java index 1538e977b4c3..cd281dec3d24 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTable.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTable.java @@ -17,6 +17,7 @@ import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.createKeyFromItem; +import java.util.ArrayList; import java.util.concurrent.CompletableFuture; import java.util.function.Consumer; import software.amazon.awssdk.annotations.SdkInternalApi; @@ -25,6 +26,7 @@ import software.amazon.awssdk.enhanced.dynamodb.Key; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.TableIndices; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.CreateTableOperation; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteItemOperation; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteTableOperation; @@ -114,7 +116,12 @@ public CompletableFuture createTable(Consumer createTable() { - return createTable(CreateTableEnhancedRequest.builder().build()); + TableIndices indices = new TableIndices(new ArrayList<>(tableSchema.tableMetadata().indices())); + + return createTable(CreateTableEnhancedRequest.builder() + .localSecondaryIndices(indices.localSecondaryIndices()) + .globalSecondaryIndices(indices.globalSecondaryIndices()) + .build()); } @Override diff --git a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java index 1bd2638892bd..31ce811b3483 100644 --- a/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java +++ b/services-custom/dynamodb-enhanced/src/main/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbTable.java @@ -15,22 +15,17 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.client; -import static java.util.Collections.emptyList; import static software.amazon.awssdk.enhanced.dynamodb.internal.EnhancedClientUtils.createKeyFromItem; -import java.util.Collection; -import java.util.List; -import java.util.Map; +import java.util.ArrayList; import java.util.function.Consumer; -import java.util.stream.Collectors; import software.amazon.awssdk.annotations.SdkInternalApi; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbTable; -import software.amazon.awssdk.enhanced.dynamodb.IndexMetadata; import software.amazon.awssdk.enhanced.dynamodb.Key; -import software.amazon.awssdk.enhanced.dynamodb.KeyAttributeMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableMetadata; import software.amazon.awssdk.enhanced.dynamodb.TableSchema; +import software.amazon.awssdk.enhanced.dynamodb.internal.TableIndices; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.CreateTableOperation; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteItemOperation; import software.amazon.awssdk.enhanced.dynamodb.internal.operations.DeleteTableOperation; @@ -46,8 +41,6 @@ import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedRequest; import software.amazon.awssdk.enhanced.dynamodb.model.DeleteItemEnhancedResponse; import software.amazon.awssdk.enhanced.dynamodb.model.DescribeTableEnhancedResponse; -import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; -import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedLocalSecondaryIndex; import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedRequest; import software.amazon.awssdk.enhanced.dynamodb.model.GetItemEnhancedResponse; import software.amazon.awssdk.enhanced.dynamodb.model.PageIterable; @@ -61,7 +54,6 @@ import software.amazon.awssdk.services.dynamodb.DynamoDbClient; import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest; import software.amazon.awssdk.services.dynamodb.model.DescribeTableResponse; -import software.amazon.awssdk.services.dynamodb.model.ProjectionType; @SdkInternalApi public class DefaultDynamoDbTable implements DynamoDbTable { @@ -126,52 +118,14 @@ public void createTable(Consumer requestCons @Override public void createTable() { - Map> indexGroups = splitSecondaryIndicesToLocalAndGlobalOnes(); + TableIndices indices = new TableIndices(new ArrayList<>(tableSchema.tableMetadata().indices())); + createTable(CreateTableEnhancedRequest.builder() - .localSecondaryIndices(extractLocalSecondaryIndices(indexGroups)) - .globalSecondaryIndices(extractGlobalSecondaryIndices(indexGroups)) + .localSecondaryIndices(indices.localSecondaryIndices()) + .globalSecondaryIndices(indices.globalSecondaryIndices()) .build()); } - private Map> splitSecondaryIndicesToLocalAndGlobalOnes() { - Collection indices = tableSchema.tableMetadata().indices(); - return indices.stream() - .filter(index -> !TableMetadata.primaryIndexName().equals(index.name())) - .collect(Collectors.groupingBy(metadata -> { - String partitionKeyName = metadata.partitionKey().map(KeyAttributeMetadata::name).orElse(null); - if (partitionKeyName == null) { - return IndexType.LSI; - } - return IndexType.GSI; - })); - } - - private List extractLocalSecondaryIndices(Map> indicesGroups) { - return indicesGroups.getOrDefault(IndexType.LSI, emptyList()).stream() - .map(this::mapIndexMetadataToEnhancedLocalSecondaryIndex) - .collect(Collectors.toList()); - } - - private EnhancedLocalSecondaryIndex mapIndexMetadataToEnhancedLocalSecondaryIndex(IndexMetadata indexMetadata) { - return EnhancedLocalSecondaryIndex.builder() - .indexName(indexMetadata.name()) - .projection(pb -> pb.projectionType(ProjectionType.ALL)) - .build(); - } - - private List extractGlobalSecondaryIndices(Map> indicesGroups) { - return indicesGroups.getOrDefault(IndexType.GSI, emptyList()).stream() - .map(this::mapIndexMetadataToEnhancedGlobalSecondaryIndex) - .collect(Collectors.toList()); - } - - private EnhancedGlobalSecondaryIndex mapIndexMetadataToEnhancedGlobalSecondaryIndex(IndexMetadata indexMetadata) { - return EnhancedGlobalSecondaryIndex.builder() - .indexName(indexMetadata.name()) - .projection(pb -> pb.projectionType(ProjectionType.ALL)) - .build(); - } - @Override public T deleteItem(DeleteItemEnhancedRequest request) { TableOperation> operation = DeleteItemOperation.create(request); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableIndicesTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableIndicesTest.java new file mode 100644 index 000000000000..fe02468958d1 --- /dev/null +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/TableIndicesTest.java @@ -0,0 +1,105 @@ +package software.amazon.awssdk.enhanced.dynamodb; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.enhanced.dynamodb.internal.TableIndices; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.StaticIndexMetadata; +import software.amazon.awssdk.enhanced.dynamodb.internal.mapper.StaticKeyAttributeMetadata; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedGlobalSecondaryIndex; +import software.amazon.awssdk.enhanced.dynamodb.model.EnhancedLocalSecondaryIndex; + +public class TableIndicesTest { + + @Test + public void testLocalSecondaryIndices_onlyIncludesLSIs() { + List indices = Arrays.asList(StaticIndexMetadata.builder() + .name("lsi-1") + .build(), + StaticIndexMetadata.builder() + .name("lsi-2") + .build(), + StaticIndexMetadata.builder() + .name("gsi-1") + .partitionKey(StaticKeyAttributeMetadata.create( + "GlobalIndexPartitionKey", + AttributeValueType.N)) + .build()); + + TableIndices tableIndices = new TableIndices(indices); + + List lsiList = tableIndices.localSecondaryIndices(); + + assertEquals(2, lsiList.size()); + assertTrue(lsiList.stream().anyMatch(i -> "lsi-1".equals(i.indexName()))); + assertTrue(lsiList.stream().anyMatch(i -> "lsi-2".equals(i.indexName()))); + } + + @Test + public void testGlobalSecondaryIndices_onlyIncludesGSIs() { + List indices = Arrays.asList(StaticIndexMetadata.builder() + .name("lsi-1") + .build(), + StaticIndexMetadata.builder() + .name("gsi-1") + .partitionKey(StaticKeyAttributeMetadata.create( + "GlobalIndexPartitionKey1", + AttributeValueType.N)) + .build(), + StaticIndexMetadata.builder() + .name("gsi-2") + .partitionKey(StaticKeyAttributeMetadata.create( + "GlobalIndexPartitionKey2", + AttributeValueType.N)) + .build()); + + TableIndices tableIndices = new TableIndices(indices); + + List gsiList = tableIndices.globalSecondaryIndices(); + + assertEquals(2, gsiList.size()); + assertTrue(gsiList.stream().anyMatch(i -> "gsi-1".equals(i.indexName()))); + assertTrue(gsiList.stream().anyMatch(i -> "gsi-2".equals(i.indexName()))); + } + + @Test + public void testPrimaryIndexIsExcluded() { + List indices = Arrays.asList(StaticIndexMetadata.builder() + .name(TableMetadata.primaryIndexName()) + .partitionKey(StaticKeyAttributeMetadata.create("pk", + AttributeValueType.S)) + .build(), + StaticIndexMetadata.builder() + .name("lsi-1") + .build(), + StaticIndexMetadata.builder() + .name("gsi-1") + .partitionKey(StaticKeyAttributeMetadata.create( + "GlobalIndexPartitionKey", + AttributeValueType.N)) + .build()); + + TableIndices tableIndices = new TableIndices(indices); + + List gsiList = tableIndices.globalSecondaryIndices(); + List lsiList = tableIndices.localSecondaryIndices(); + + assertEquals(1, gsiList.size()); + assertEquals("gsi-1", gsiList.get(0).indexName()); + + assertEquals(1, lsiList.size()); + assertEquals("lsi-1", lsiList.get(0).indexName()); + } + + @Test + public void testEmptyIndexList() { + TableIndices tableIndices = new TableIndices(Collections.emptyList()); + + assertTrue(tableIndices.globalSecondaryIndices().isEmpty()); + assertTrue(tableIndices.localSecondaryIndices().isEmpty()); + } +} diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java index 4f61db7487e9..b7cbb4eb428a 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/extensions/VersionedRecordExtensionTest.java @@ -166,7 +166,7 @@ public void beforeWrite_returnsNoOpModification_ifVersionAttributeNotDefined() { } @Test(expected = IllegalArgumentException.class) - public void beforeWrite_throwsIllegalArgumentException_ifVersionAttributeIsWrongType() { + public void beforeWrite_throwsIllegalArgumentException_ifVersioPnAttributeIsWrongType() { FakeItem fakeItem = createUniqueFakeItem(); Map fakeItemWIthBadVersion = new HashMap<>(FakeItem.getTableSchema().itemToMap(fakeItem, true)); diff --git a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTableTest.java b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTableTest.java index cbf1b7acba56..dd5745b8c048 100644 --- a/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTableTest.java +++ b/services-custom/dynamodb-enhanced/src/test/java/software/amazon/awssdk/enhanced/dynamodb/internal/client/DefaultDynamoDbAsyncTableTest.java @@ -16,13 +16,22 @@ package software.amazon.awssdk.enhanced.dynamodb.internal.client; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import static software.amazon.awssdk.enhanced.dynamodb.internal.AttributeValues.stringValue; +import java.util.Iterator; +import java.util.List; import java.util.Optional; +import java.util.concurrent.CompletableFuture; +import java.util.stream.Collectors; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.enhanced.dynamodb.DynamoDbEnhancedClientExtension; @@ -31,6 +40,10 @@ import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithIndices; import software.amazon.awssdk.enhanced.dynamodb.functionaltests.models.FakeItemWithSort; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; +import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; +import software.amazon.awssdk.services.dynamodb.model.CreateTableResponse; +import software.amazon.awssdk.services.dynamodb.model.GlobalSecondaryIndex; +import software.amazon.awssdk.services.dynamodb.model.LocalSecondaryIndex; @RunWith(MockitoJUnitRunner.class) public class DefaultDynamoDbAsyncTableTest { @@ -113,4 +126,55 @@ public void keyFrom_primaryIndex_partitionAndNullSort() { assertThat(key.partitionKeyValue(), is(stringValue(item.getId()))); assertThat(key.sortKeyValue(), is(Optional.empty())); } + + @Test + public void createTable_doesNotTreatPrimaryIndexAsAnyOfSecondaryIndexes() { + DefaultDynamoDbAsyncTable dynamoDbMappedIndex = + new DefaultDynamoDbAsyncTable<>(mockDynamoDbAsyncClient, + mockDynamoDbEnhancedClientExtension, + FakeItem.getTableSchema(), + "test_table"); + + when(mockDynamoDbAsyncClient.createTable(any(CreateTableRequest.class))) + .thenReturn(CompletableFuture.completedFuture(CreateTableResponse.builder().build())); + + dynamoDbMappedIndex.createTable().join(); + + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(CreateTableRequest.class); + verify(mockDynamoDbAsyncClient).createTable(requestCaptor.capture()); + + CreateTableRequest request = requestCaptor.getValue(); + + assertThat(request.localSecondaryIndexes().size(), is(0)); + assertThat(request.globalSecondaryIndexes().size(), is(0)); + } + + @Test + public void createTable_groupsSecondaryIndexesExistingInTableSchema() { + DefaultDynamoDbAsyncTable dynamoDbMappedIndex = + new DefaultDynamoDbAsyncTable<>(mockDynamoDbAsyncClient, + mockDynamoDbEnhancedClientExtension, + FakeItemWithIndices.getTableSchema(), + "test_table"); + + when(mockDynamoDbAsyncClient.createTable(any(CreateTableRequest.class))) + .thenReturn(CompletableFuture.completedFuture(CreateTableResponse.builder().build())); + + dynamoDbMappedIndex.createTable().join(); + + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(CreateTableRequest.class); + verify(mockDynamoDbAsyncClient).createTable(requestCaptor.capture()); + + CreateTableRequest request = requestCaptor.getValue(); + + assertThat(request.localSecondaryIndexes().size(), is(1)); + Iterator lsiIterator = request.localSecondaryIndexes().iterator(); + assertThat(lsiIterator.next().indexName(), is("lsi_1")); + + assertThat(request.globalSecondaryIndexes().size(), is(2)); + List globalIndicesNames = request.globalSecondaryIndexes().stream() + .map(GlobalSecondaryIndex::indexName) + .collect(Collectors.toList()); + assertThat(globalIndicesNames, containsInAnyOrder("gsi_1", "gsi_2")); + } } diff --git a/services-custom/iam-policy-builder/pom.xml b/services-custom/iam-policy-builder/pom.xml index f736d31f2b3e..6025b76ac208 100644 --- a/services-custom/iam-policy-builder/pom.xml +++ b/services-custom/iam-policy-builder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml iam-policy-builder diff --git a/services-custom/pom.xml b/services-custom/pom.xml index 7a4308a62fd6..82eb5703491f 100644 --- a/services-custom/pom.xml +++ b/services-custom/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT services-custom AWS Java SDK :: Custom Services diff --git a/services-custom/s3-event-notifications/pom.xml b/services-custom/s3-event-notifications/pom.xml index 7a885f0606b1..f6cdeb11c80a 100644 --- a/services-custom/s3-event-notifications/pom.xml +++ b/services-custom/s3-event-notifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml s3-event-notifications diff --git a/services-custom/s3-event-notifications/src/main/java/software/amazon/awssdk/eventnotifications/s3/internal/DefaultS3EventNotificationReader.java b/services-custom/s3-event-notifications/src/main/java/software/amazon/awssdk/eventnotifications/s3/internal/DefaultS3EventNotificationReader.java index cdb1f1975e71..47e139869ec7 100644 --- a/services-custom/s3-event-notifications/src/main/java/software/amazon/awssdk/eventnotifications/s3/internal/DefaultS3EventNotificationReader.java +++ b/services-custom/s3-event-notifications/src/main/java/software/amazon/awssdk/eventnotifications/s3/internal/DefaultS3EventNotificationReader.java @@ -101,7 +101,7 @@ private S3EventNotificationRecord readEventNotificationRecord(JsonNode jsonNode) eventNotificationRecord.setEventSource(eventSource); String eventTime = expectStringOrNull(recordNode, "eventTime"); - eventNotificationRecord.setEventTime(eventName != null ? Instant.parse(eventTime) : null); + eventNotificationRecord.setEventTime(eventTime != null ? Instant.parse(eventTime) : null); RequestParameters requestParameters = readRequestParameters(recordNode.get("requestParameters")); eventNotificationRecord.setRequestParameters(requestParameters); diff --git a/services-custom/s3-event-notifications/src/test/java/software/amazon/awssdk/eventnotifications/s3/model/S3EventNotificationReaderTest.java b/services-custom/s3-event-notifications/src/test/java/software/amazon/awssdk/eventnotifications/s3/model/S3EventNotificationReaderTest.java index ef9d2c89154a..b55a2bb74142 100644 --- a/services-custom/s3-event-notifications/src/test/java/software/amazon/awssdk/eventnotifications/s3/model/S3EventNotificationReaderTest.java +++ b/services-custom/s3-event-notifications/src/test/java/software/amazon/awssdk/eventnotifications/s3/model/S3EventNotificationReaderTest.java @@ -442,6 +442,53 @@ void missingField_shouldBeNull() { assertThat(rec.getResponseElements()).isNull(); } + @Test + void eventTimeIsNullWhenEventNamePresent_shouldSucceed() { + String json = "{\n" + + " \"Records\" : [ {\n" + + " \"eventVersion\" : \"2.1\",\n" + + " \"eventSource\" : \"aws:s3\",\n" + + " \"awsRegion\" : \"us-west-2\",\n" + // missing eventTime + + " \"eventName\" : \"ObjectCreated:Put\",\n" + + " \"userIdentity\" : {\n" + + " \"principalId\" : \"AIDAJDPLRKLG7UEXAMUID\"\n" + + " },\n" + + " \"requestParameters\" : {\n" + + " \"sourceIPAddress\" : \"127.1.2.3\"\n" + + " },\n" + + " \"responseElements\":{\n" + + " \"x-amz-request-id\":\"C3D13FE58DE4C810\",\n" + + " \"x-amz-id-2\":\"FMyUVURIY8/IgAtTv8xRjskZQpcIZ9KG4V5Wp6S7S/JRWeUWerMUE5JgHvANOjpD\"\n" + + " },\n" + + " \"s3\" : {\n" + + " \"s3SchemaVersion\" : \"1.0\",\n" + + " \"configurationId\" : \"testConfigRule\",\n" + + " \"bucket\" : {\n" + + " \"name\" : \"mybucket-test\",\n" + + " \"ownerIdentity\" : {\n" + + " \"principalId\" : \"A3NL1KOZZKExample\"\n" + + " },\n" + + " \"arn\" : \"arn:aws:s3:::mybucket\"\n" + + " },\n" + + " \"object\" : {\n" + + " \"key\" : \"HappyFace-test.jpg\",\n" + + " \"size\" : 2048,\n" + + " \"eTag\" : \"d41d8cd98f00b204e9800998ecf8etag\",\n" + + " \"versionId\" : \"096fKKXTRTtl3on89fVO.nfljtsv6vid\",\n" + + " \"sequencer\" : \"0055AED6DCD9028SEQ\"\n" + + " }\n" + + " }\n" + + " } ]\n" + + "}"; + + S3EventNotification event = S3EventNotification.fromJson(json); + S3EventNotificationRecord rec = event.getRecords().get(0); + assertThat(rec).isNotNull(); + assertThat(rec.getEventName()).isEqualTo("ObjectCreated:Put"); + assertThat(rec.getEventTime()).isNull(); + } + @Test void extraFields_areIgnored() { String json = "{\"Records\":[], \"toto\":123}"; diff --git a/services-custom/s3-transfer-manager/pom.xml b/services-custom/s3-transfer-manager/pom.xml index 7fcb0ee33553..298f44b9bb0f 100644 --- a/services-custom/s3-transfer-manager/pom.xml +++ b/services-custom/s3-transfer-manager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml s3-transfer-manager diff --git a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/config/DownloadFilter.java b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/config/DownloadFilter.java index 1398f7bd6ec4..483703c33022 100644 --- a/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/config/DownloadFilter.java +++ b/services-custom/s3-transfer-manager/src/main/java/software/amazon/awssdk/transfer/s3/config/DownloadFilter.java @@ -15,6 +15,7 @@ package software.amazon.awssdk.transfer.s3.config; +import java.util.Objects; import java.util.function.Predicate; import software.amazon.awssdk.annotations.SdkPublicApi; import software.amazon.awssdk.services.s3.model.S3Object; @@ -39,8 +40,47 @@ public interface DownloadFilter extends Predicate { boolean test(S3Object s3Object); /** - * A {@link DownloadFilter} that downloads all non-folder objects. A folder is a 0-byte object created when a customer - * uses S3 console to create a folder, and it always ends with "/". + * Returns a composed filter that represents the logical AND of this filter and another. + * The composed filter returns true only if both this filter and the other filter return true. + * @param other a predicate that will be logically-ANDed with this + * predicate + * @return a composed filter that represents the logical AND of this filter and the other filter + * @throws NullPointerException if other is null + */ + @Override + default DownloadFilter and(Predicate other) { + Objects.requireNonNull(other, "Other predicate cannot be null"); + return s3Object -> test(s3Object) && other.test(s3Object); + } + + /** + * Returns a composed filter that represents the logical OR of this filter and another. + * The composed filter returns true if either this filter or the other filter returns true. + * @param other a predicate that will be logically-ORed with this + * predicate + * @return a composed filter that represents the logical OR of this filter and the other filter + * @throws NullPointerException if other is null + */ + @Override + default DownloadFilter or(Predicate other) { + Objects.requireNonNull(other, "Other predicate cannot be null"); + return s3Object -> test(s3Object) || other.test(s3Object); + } + + /** + * Returns a filter that represents the logical negation of this predicate. + * The returned filter returns true when this filter returns false, and vice versa. + * @return a filter that represents the logical negation of this filter + * predicate + */ + @Override + default DownloadFilter negate() { + return s3Object -> !test(s3Object); + } + + /** + * A {@link DownloadFilter} that downloads all non-folder objects. A folder is a 0-byte object created when a customer uses S3 + * console to create a folder, and it always ends with "/". * *

    * This is the default behavior if no filter is provided. diff --git a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/config/DownloadFilterTest.java b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/config/DownloadFilterTest.java index cc6da432435d..b3690244f93b 100644 --- a/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/config/DownloadFilterTest.java +++ b/services-custom/s3-transfer-manager/src/test/java/software/amazon/awssdk/transfer/s3/config/DownloadFilterTest.java @@ -16,8 +16,11 @@ package software.amazon.awssdk.transfer.s3.config; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import java.util.function.Function; import java.util.stream.Stream; +import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -38,4 +41,168 @@ public static Stream s3Objects() { void allObjectsFilter_shouldWork(S3Object s3Object, boolean result) { assertThat(DownloadFilter.allObjects().test(s3Object)).isEqualTo(result); } + + private static Stream filterOperationTestCases() { + Function folder1OrFolder3Filter = s3Object -> { + DownloadFilter folder1 = obj -> obj.key().startsWith("folder1"); + DownloadFilter folder3 = obj -> obj.key().startsWith("folder3"); + return folder1.or(folder3); + }; + + Function txtAndLargeSizeFilter = s3Object -> { + DownloadFilter txtFilter = obj -> obj.key().endsWith(".txt"); + DownloadFilter sizeFilter = obj -> obj.size() > 1000L; + return txtFilter.and(sizeFilter); + }; + + Function notFolder1Filter = s3Object -> { + DownloadFilter folder1 = obj -> obj.key().startsWith("folder1"); + return folder1.negate(); + }; + + Function notLargeSizeFilter = s3Object -> { + DownloadFilter largeSize = obj -> obj.size() > 1000L; + return largeSize.negate(); + }; + + Function complexFilter = s3Object -> { + DownloadFilter folder1 = obj -> obj.key().startsWith("folder1"); + DownloadFilter folder3 = obj -> obj.key().startsWith("folder3"); + DownloadFilter sizeFilter = obj -> obj.size() > 1000L; + return folder1.or(folder3).and(sizeFilter); + }; + Function nullParameterFilter = s3Object -> { + DownloadFilter baseFilter = obj -> obj.key().startsWith("folder1"); + return s -> { + assertThrows(NullPointerException.class, + () -> baseFilter.or(null), + "or() should throw NullPointerException when other is null"); + assertThrows(NullPointerException.class, + () -> baseFilter.and(null), + "and() should throw NullPointerException when other is null"); + return true; // Return value doesn't matter as we're testing for exceptions + }; + }; + + + return Stream.of( + // OR operation tests + Arguments.of( + "OR: folder1/test.txt matches (folder1 OR folder3)", + S3Object.builder().key("folder1/test.txt").size(2000L).build(), + folder1OrFolder3Filter, + true + ), + Arguments.of( + "OR: folder3/test.txt matches (folder1 OR folder3)", + S3Object.builder().key("folder3/test.txt").size(2000L).build(), + folder1OrFolder3Filter, + true + ), + Arguments.of( + "OR: folder2/test.txt does not match (folder1 OR folder3)", + S3Object.builder().key("folder2/test.txt").size(2000L).build(), + folder1OrFolder3Filter, + false + ), + + // AND operation tests + Arguments.of( + "AND: large .txt file matches (.txt AND size > 1000)", + S3Object.builder().key("folder1/test.txt").size(2000L).build(), + txtAndLargeSizeFilter, + true + ), + Arguments.of( + "AND: small .txt file does not match (.txt AND size > 1000)", + S3Object.builder().key("folder1/test.txt").size(500L).build(), + txtAndLargeSizeFilter, + false + ), + Arguments.of( + "AND: large .pdf file does not match (.txt AND size > 1000)", + S3Object.builder().key("folder1/test.pdf").size(2000L).build(), + txtAndLargeSizeFilter, + false + ), + + // NEGATE operation tests + Arguments.of( + "NEGATE: folder1 file does not match NOT(folder1)", + S3Object.builder().key("folder1/test.txt").size(1000L).build(), + notFolder1Filter, + false + ), + Arguments.of( + "NEGATE: folder2 file matches NOT(folder1)", + S3Object.builder().key("folder2/test.txt").size(1000L).build(), + notFolder1Filter, + true + ), + Arguments.of( + "NEGATE: large file does not match NOT(size > 1000)", + S3Object.builder().key("test.txt").size(2000L).build(), + notLargeSizeFilter, + false + ), + Arguments.of( + "NEGATE: small file matches NOT(size > 1000)", + S3Object.builder().key("test.txt").size(500L).build(), + notLargeSizeFilter, + true + ), + + // Complex chained operations + Arguments.of( + "COMPLEX: large file in folder1 matches ((folder1 OR folder3) AND size > 1000)", + S3Object.builder().key("folder1/test.txt").size(2000L).build(), + complexFilter, + true + ), + Arguments.of( + "COMPLEX: small file in folder1 does not match ((folder1 OR folder3) AND size > 1000)", + S3Object.builder().key("folder1/test.txt").size(500L).build(), + complexFilter, + false + ), + Arguments.of( + "COMPLEX: large file in folder2 does not match ((folder1 OR folder3) AND size > 1000)", + S3Object.builder().key("folder2/test.txt").size(2000L).build(), + complexFilter, + false + ), + Arguments.of( + "COMPLEX: large file in folder3 matches ((folder1 OR folder3) AND size > 1000)", + S3Object.builder().key("folder3/test.txt").size(2000L).build(), + complexFilter, + true + ), + // NullPointerException + Arguments.of( + "NULL: or/and with null parameter should throw NullPointerException", + S3Object.builder().key("folder1/test.txt").size(1000L).build(), + nullParameterFilter, + true + ) + + ); + } + + @ParameterizedTest + @MethodSource("filterOperationTestCases") + @DisplayName("Test DownloadFilter operations (AND, OR, NEGATE)") + void testFilterOperations(String scenario, S3Object s3Object, + Function filterFactory, + boolean expectedResult) { + // Given + DownloadFilter filter = filterFactory.apply(s3Object); + + // When + boolean actualResult = filter.test(s3Object); + + // Then + assertThat(actualResult) + .as(scenario) + .isEqualTo(expectedResult); + } } diff --git a/services/accessanalyzer/pom.xml b/services/accessanalyzer/pom.xml index 9aa0bdc3e5c8..9ec44c608073 100644 --- a/services/accessanalyzer/pom.xml +++ b/services/accessanalyzer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT accessanalyzer AWS Java SDK :: Services :: AccessAnalyzer diff --git a/services/accessanalyzer/src/main/resources/codegen-resources/customization.config b/services/accessanalyzer/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/accessanalyzer/src/main/resources/codegen-resources/customization.config +++ b/services/accessanalyzer/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json b/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json index 76176dbd1f51..c188f8b7c1d7 100644 --- a/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json +++ b/services/accessanalyzer/src/main/resources/codegen-resources/service-2.json @@ -1125,6 +1125,10 @@ "unusedAccess":{ "shape":"UnusedAccessConfiguration", "documentation":"

    Specifies the configuration of an unused access analyzer for an Amazon Web Services organization or account.

    " + }, + "internalAccess":{ + "shape":"InternalAccessConfiguration", + "documentation":"

    Specifies the configuration of an internal access analyzer for an Amazon Web Services organization or account. This configuration determines how the analyzer evaluates access within your Amazon Web Services environment.

    " } }, "documentation":"

    Contains information about the configuration of an analyzer for an Amazon Web Services organization or account.

    ", @@ -1187,7 +1191,7 @@ }, "configuration":{ "shape":"AnalyzerConfiguration", - "documentation":"

    Specifies whether the analyzer is an external access or unused access analyzer.

    " + "documentation":"

    Specifies if the analyzer is an external access, unused access, or internal access analyzer.

    " } }, "documentation":"

    Contains information about the analyzer.

    " @@ -1609,7 +1613,7 @@ }, "type":{ "shape":"Type", - "documentation":"

    The type of analyzer to create. Only ACCOUNT, ORGANIZATION, ACCOUNT_UNUSED_ACCESS, and ORGANIZATION_UNUSED_ACCESS analyzers are supported. You can create only one analyzer per account per Region. You can create up to 5 analyzers per organization per Region.

    " + "documentation":"

    The type of analyzer to create. You can create only one analyzer per account per Region. You can create up to 5 analyzers per organization per Region.

    " }, "archiveRules":{ "shape":"InlineArchiveRulesList", @@ -1626,7 +1630,7 @@ }, "configuration":{ "shape":"AnalyzerConfiguration", - "documentation":"

    Specifies the configuration of the analyzer. If the analyzer is an unused access analyzer, the specified scope of unused access is used for the configuration.

    " + "documentation":"

    Specifies the configuration of the analyzer. If the analyzer is an unused access analyzer, the specified scope of unused access is used for the configuration. If the analyzer is an internal access analyzer, the specified internal access analysis rules are used for the configuration.

    " } }, "documentation":"

    Creates an analyzer.

    " @@ -1841,7 +1845,7 @@ }, "resourceControlPolicyRestriction":{ "shape":"ResourceControlPolicyRestriction", - "documentation":"

    The type of restriction applied to the finding by the resource owner with an Organizations resource control policy (RCP).

    " + "documentation":"

    The type of restriction applied to the finding by the resource owner with an Organizations resource control policy (RCP).

    • APPLICABLE: There is an RCP present in the organization but IAM Access Analyzer does not include it in the evaluation of effective permissions. For example, if s3:DeleteObject is blocked by the RCP and the restriction is APPLICABLE, then s3:DeleteObject would still be included in the list of actions for the finding.

    • FAILED_TO_EVALUATE_RCP: There was an error evaluating the RCP.

    • NOT_APPLICABLE: There was no RCP present in the organization, or there was no RCP applicable to the resource. For example, the resource being analyzed is an Amazon RDS snapshot and there is an RCP in the organization, but the RCP only impacts Amazon S3 buckets.

    • APPLIED: This restriction is not currently available for external access findings.

    " } }, "documentation":"

    Contains information about an external access finding.

    " @@ -1983,6 +1987,10 @@ "FindingDetails":{ "type":"structure", "members":{ + "internalAccessDetails":{ + "shape":"InternalAccessDetails", + "documentation":"

    The details for an internal access analyzer finding. This contains information about access patterns identified within your Amazon Web Services organization or account.

    " + }, "externalAccessDetails":{ "shape":"ExternalAccessDetails", "documentation":"

    The details for an external access analyzer finding.

    " @@ -2199,7 +2207,7 @@ }, "findingType":{ "shape":"FindingType", - "documentation":"

    The type of the external access or unused access finding.

    " + "documentation":"

    The type of the access finding. For external access analyzers, the type is ExternalAccess. For unused access analyzers, the type can be UnusedIAMRole, UnusedIAMUserAccessKey, UnusedIAMUserPassword, or UnusedPermission. For internal access analyzers, the type is InternalAccess.

    " } }, "documentation":"

    Contains information about a finding.

    " @@ -2211,7 +2219,8 @@ "UnusedIAMRole", "UnusedIAMUserAccessKey", "UnusedIAMUserPassword", - "UnusedPermission" + "UnusedPermission", + "InternalAccess" ] }, "FindingsList":{ @@ -2229,6 +2238,10 @@ "shape":"ExternalAccessFindingsStatistics", "documentation":"

    The aggregate statistics for an external access analyzer.

    " }, + "internalAccessFindingsStatistics":{ + "shape":"InternalAccessFindingsStatistics", + "documentation":"

    The aggregate statistics for an internal access analyzer. This includes information about active, archived, and resolved findings related to internal access within your Amazon Web Services organization or account.

    " + }, "unusedAccessFindingsStatistics":{ "shape":"UnusedAccessFindingsStatistics", "documentation":"

    The aggregate statistics for an unused access analyzer.

    " @@ -2644,7 +2657,7 @@ }, "findingType":{ "shape":"FindingType", - "documentation":"

    The type of the finding. For external access analyzers, the type is ExternalAccess. For unused access analyzers, the type can be UnusedIAMRole, UnusedIAMUserAccessKey, UnusedIAMUserPassword, or UnusedPermission.

    " + "documentation":"

    The type of the finding. For external access analyzers, the type is ExternalAccess. For unused access analyzers, the type can be UnusedIAMRole, UnusedIAMUserAccessKey, UnusedIAMUserPassword, or UnusedPermission. For internal access analyzers, the type is InternalAccess.

    " } } }, @@ -2750,6 +2763,142 @@ "type":"integer", "box":true }, + "InternalAccessAnalysisRule":{ + "type":"structure", + "members":{ + "inclusions":{ + "shape":"InternalAccessAnalysisRuleCriteriaList", + "documentation":"

    A list of rules for the internal access analyzer containing criteria to include in analysis. Only resources that meet the rule criteria will generate findings.

    " + } + }, + "documentation":"

    Contains information about analysis rules for the internal access analyzer. Analysis rules determine which entities will generate findings based on the criteria you define when you create the rule.

    " + }, + "InternalAccessAnalysisRuleCriteria":{ + "type":"structure", + "members":{ + "accountIds":{ + "shape":"AccountIdsList", + "documentation":"

    A list of Amazon Web Services account IDs to apply to the internal access analysis rule criteria. Account IDs can only be applied to the analysis rule criteria for organization-level analyzers.

    " + }, + "resourceTypes":{ + "shape":"ResourceTypeList", + "documentation":"

    A list of resource types to apply to the internal access analysis rule criteria. The analyzer will only generate findings for resources of these types. These resource types are currently supported for internal access analyzers:

    • AWS::S3::Bucket

    • AWS::RDS::DBSnapshot

    • AWS::RDS::DBClusterSnapshot

    • AWS::S3Express::DirectoryBucket

    • AWS::DynamoDB::Table

    • AWS::DynamoDB::Stream

    " + }, + "resourceArns":{ + "shape":"ResourceArnsList", + "documentation":"

    A list of resource ARNs to apply to the internal access analysis rule criteria. The analyzer will only generate findings for resources that match these ARNs.

    " + } + }, + "documentation":"

    The criteria for an analysis rule for an internal access analyzer.

    " + }, + "InternalAccessAnalysisRuleCriteriaList":{ + "type":"list", + "member":{"shape":"InternalAccessAnalysisRuleCriteria"} + }, + "InternalAccessConfiguration":{ + "type":"structure", + "members":{ + "analysisRule":{ + "shape":"InternalAccessAnalysisRule", + "documentation":"

    Contains information about analysis rules for the internal access analyzer. These rules determine which resources and access patterns will be analyzed.

    " + } + }, + "documentation":"

    Specifies the configuration of an internal access analyzer for an Amazon Web Services organization or account. This configuration determines how the analyzer evaluates internal access within your Amazon Web Services environment.

    " + }, + "InternalAccessDetails":{ + "type":"structure", + "members":{ + "action":{ + "shape":"ActionList", + "documentation":"

    The action in the analyzed policy statement that has internal access permission to use.

    " + }, + "condition":{ + "shape":"ConditionKeyMap", + "documentation":"

    The condition in the analyzed policy statement that resulted in an internal access finding.

    " + }, + "principal":{ + "shape":"PrincipalMap", + "documentation":"

    The principal that has access to a resource within the internal environment.

    " + }, + "principalOwnerAccount":{ + "shape":"String", + "documentation":"

    The Amazon Web Services account ID that owns the principal identified in the internal access finding.

    " + }, + "accessType":{ + "shape":"InternalAccessType", + "documentation":"

    The type of internal access identified in the finding. This indicates how the access is granted within your Amazon Web Services environment.

    " + }, + "principalType":{ + "shape":"PrincipalType", + "documentation":"

    The type of principal identified in the internal access finding, such as IAM role or IAM user.

    " + }, + "sources":{ + "shape":"FindingSourceList", + "documentation":"

    The sources of the internal access finding. This indicates how the access that generated the finding is granted within your Amazon Web Services environment.

    " + }, + "resourceControlPolicyRestriction":{ + "shape":"ResourceControlPolicyRestriction", + "documentation":"

    The type of restriction applied to the finding by the resource owner with an Organizations resource control policy (RCP).

    • APPLICABLE: There is an RCP present in the organization but IAM Access Analyzer does not include it in the evaluation of effective permissions. For example, if s3:DeleteObject is blocked by the RCP and the restriction is APPLICABLE, then s3:DeleteObject would still be included in the list of actions for the finding. Only applicable to internal access findings with the account as the zone of trust.

    • FAILED_TO_EVALUATE_RCP: There was an error evaluating the RCP.

    • NOT_APPLICABLE: There was no RCP present in the organization. For internal access findings with the account as the zone of trust, NOT_APPLICABLE could also indicate that there was no RCP applicable to the resource.

    • APPLIED: An RCP is present in the organization and IAM Access Analyzer included it in the evaluation of effective permissions. For example, if s3:DeleteObject is blocked by the RCP and the restriction is APPLIED, then s3:DeleteObject would not be included in the list of actions for the finding. Only applicable to internal access findings with the organization as the zone of trust.

    " + }, + "serviceControlPolicyRestriction":{ + "shape":"ServiceControlPolicyRestriction", + "documentation":"

    The type of restriction applied to the finding by an Organizations service control policy (SCP).

    • APPLICABLE: There is an SCP present in the organization but IAM Access Analyzer does not include it in the evaluation of effective permissions. Only applicable to internal access findings with the account as the zone of trust.

    • FAILED_TO_EVALUATE_SCP: There was an error evaluating the SCP.

    • NOT_APPLICABLE: There was no SCP present in the organization. For internal access findings with the account as the zone of trust, NOT_APPLICABLE could also indicate that there was no SCP applicable to the principal.

    • APPLIED: An SCP is present in the organization and IAM Access Analyzer included it in the evaluation of effective permissions. Only applicable to internal access findings with the organization as the zone of trust.

    " + } + }, + "documentation":"

    Contains information about an internal access finding. This includes details about the access that was identified within your Amazon Web Services organization or account.

    " + }, + "InternalAccessFindingsStatistics":{ + "type":"structure", + "members":{ + "resourceTypeStatistics":{ + "shape":"InternalAccessResourceTypeStatisticsMap", + "documentation":"

    The total number of active findings for each resource type of the specified internal access analyzer.

    " + }, + "totalActiveFindings":{ + "shape":"Integer", + "documentation":"

    The number of active findings for the specified internal access analyzer.

    " + }, + "totalArchivedFindings":{ + "shape":"Integer", + "documentation":"

    The number of archived findings for the specified internal access analyzer.

    " + }, + "totalResolvedFindings":{ + "shape":"Integer", + "documentation":"

    The number of resolved findings for the specified internal access analyzer.

    " + } + }, + "documentation":"

    Provides aggregate statistics about the findings for the specified internal access analyzer. This includes counts of active, archived, and resolved findings.

    " + }, + "InternalAccessResourceTypeDetails":{ + "type":"structure", + "members":{ + "totalActiveFindings":{ + "shape":"Integer", + "documentation":"

    The total number of active findings for the resource type in the internal access analyzer.

    " + }, + "totalResolvedFindings":{ + "shape":"Integer", + "documentation":"

    The total number of resolved findings for the resource type in the internal access analyzer.

    " + }, + "totalArchivedFindings":{ + "shape":"Integer", + "documentation":"

    The total number of archived findings for the resource type in the internal access analyzer.

    " + } + }, + "documentation":"

    Contains information about the total number of active, archived, and resolved findings for a resource type of an internal access analyzer.

    " + }, + "InternalAccessResourceTypeStatisticsMap":{ + "type":"map", + "key":{"shape":"ResourceType"}, + "value":{"shape":"InternalAccessResourceTypeDetails"} + }, + "InternalAccessType":{ + "type":"string", + "enum":[ + "INTRA_ACCOUNT", + "INTRA_ORG" + ] + }, "InternalServerException":{ "type":"structure", "required":["message"], @@ -3480,6 +3629,13 @@ "key":{"shape":"String"}, "value":{"shape":"String"} }, + "PrincipalType":{ + "type":"string", + "enum":[ + "IAM_ROLE", + "IAM_USER" + ] + }, "RdsDbClusterSnapshotAccountId":{"type":"string"}, "RdsDbClusterSnapshotAccountIdsList":{ "type":"list", @@ -3642,12 +3798,17 @@ "type":"string", "pattern":"arn:[^:]*:[^:]*:[^:]*:[^:]*:.*" }, + "ResourceArnsList":{ + "type":"list", + "member":{"shape":"String"} + }, "ResourceControlPolicyRestriction":{ "type":"string", "enum":[ "APPLICABLE", "FAILED_TO_EVALUATE_RCP", - "NOT_APPLICABLE" + "NOT_APPLICABLE", + "APPLIED" ] }, "ResourceNotFoundException":{ @@ -3711,6 +3872,10 @@ }, "documentation":"

    Contains information about the total number of active cross-account and public findings for a resource type of an external access analyzer.

    " }, + "ResourceTypeList":{ + "type":"list", + "member":{"shape":"ResourceType"} + }, "ResourceTypeStatisticsMap":{ "type":"map", "key":{"shape":"ResourceType"}, @@ -3858,6 +4023,15 @@ }, "SecretsManagerSecretKmsId":{"type":"string"}, "SecretsManagerSecretPolicy":{"type":"string"}, + "ServiceControlPolicyRestriction":{ + "type":"string", + "enum":[ + "APPLICABLE", + "FAILED_TO_EVALUATE_SCP", + "NOT_APPLICABLE", + "APPLIED" + ] + }, "ServiceQuotaExceededException":{ "type":"structure", "required":[ @@ -4150,7 +4324,9 @@ "ACCOUNT", "ORGANIZATION", "ACCOUNT_UNUSED_ACCESS", - "ORGANIZATION_UNUSED_ACCESS" + "ORGANIZATION_UNUSED_ACCESS", + "ACCOUNT_INTERNAL_ACCESS", + "ORGANIZATION_INTERNAL_ACCESS" ] }, "UnprocessableEntityException":{ diff --git a/services/account/pom.xml b/services/account/pom.xml index 888c03311aa3..08bf7848437c 100644 --- a/services/account/pom.xml +++ b/services/account/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT account AWS Java SDK :: Services :: Account diff --git a/services/account/src/main/resources/codegen-resources/customization.config b/services/account/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/account/src/main/resources/codegen-resources/customization.config +++ b/services/account/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/acm/pom.xml b/services/acm/pom.xml index 2eb9840c3f08..d64bff35e82b 100644 --- a/services/acm/pom.xml +++ b/services/acm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT acm AWS Java SDK :: Services :: AWS Certificate Manager diff --git a/services/acm/src/main/resources/codegen-resources/customization.config b/services/acm/src/main/resources/codegen-resources/customization.config index ca4eb50f0286..0d45746d8f8a 100644 --- a/services/acm/src/main/resources/codegen-resources/customization.config +++ b/services/acm/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listCertificates" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/acm/src/main/resources/codegen-resources/service-2.json b/services/acm/src/main/resources/codegen-resources/service-2.json index fdcdf5eb1d58..955882e0f005 100644 --- a/services/acm/src/main/resources/codegen-resources/service-2.json +++ b/services/acm/src/main/resources/codegen-resources/service-2.json @@ -24,11 +24,11 @@ }, "input":{"shape":"AddTagsToCertificateRequest"}, "errors":[ - {"shape":"TagPolicyException"}, {"shape":"InvalidParameterException"}, + {"shape":"TagPolicyException"}, {"shape":"TooManyTagsException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidArnException"}, + {"shape":"ThrottlingException"}, {"shape":"InvalidTagException"}, {"shape":"ResourceNotFoundException"} ], @@ -43,8 +43,8 @@ "input":{"shape":"DeleteCertificateRequest"}, "errors":[ {"shape":"ResourceInUseException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidArnException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} @@ -78,7 +78,7 @@ {"shape":"RequestInProgressException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Exports a private certificate issued by a private certificate authority (CA) for use anywhere. The exported file contains the certificate, the certificate chain, and the encrypted private 2048-bit RSA key associated with the public key that is embedded in the certificate. For security, you must assign a passphrase for the private key when exporting it.

    For information about exporting and formatting a certificate using the ACM console or CLI, see Export a Private Certificate.

    " + "documentation":"

    Exports a private certificate issued by a private certificate authority (CA) or public certificate for use anywhere. The exported file contains the certificate, the certificate chain, and the encrypted private key associated with the public key that is embedded in the certificate. For security, you must assign a passphrase for the private key when exporting it.

    For information about exporting and formatting a certificate using the ACM console or CLI, see Export a private certificate and Export a public certificate.

    " }, "GetAccountConfiguration":{ "name":"GetAccountConfiguration", @@ -117,8 +117,8 @@ "input":{"shape":"ImportCertificateRequest"}, "output":{"shape":"ImportCertificateResponse"}, "errors":[ - {"shape":"TagPolicyException"}, {"shape":"InvalidParameterException"}, + {"shape":"TagPolicyException"}, {"shape":"TooManyTagsException"}, {"shape":"InvalidArnException"}, {"shape":"InvalidTagException"}, @@ -178,10 +178,10 @@ }, "input":{"shape":"RemoveTagsFromCertificateRequest"}, "errors":[ - {"shape":"TagPolicyException"}, {"shape":"InvalidParameterException"}, - {"shape":"ThrottlingException"}, + {"shape":"TagPolicyException"}, {"shape":"InvalidArnException"}, + {"shape":"ThrottlingException"}, {"shape":"InvalidTagException"}, {"shape":"ResourceNotFoundException"} ], @@ -199,7 +199,7 @@ {"shape":"RequestInProgressException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Renews an eligible ACM certificate. At this time, only exported private certificates can be renewed with this operation. In order to renew your Amazon Web Services Private CA certificates with ACM, you must first grant the ACM service principal permission to do so. For more information, see Testing Managed Renewal in the ACM User Guide.

    " + "documentation":"

    Renews an eligible ACM certificate. In order to renew your Amazon Web Services Private CA certificates with ACM, you must first grant the ACM service principal permission to do so. For more information, see Testing Managed Renewal in the ACM User Guide.

    " }, "RequestCertificate":{ "name":"RequestCertificate", @@ -210,15 +210,15 @@ "input":{"shape":"RequestCertificateRequest"}, "output":{"shape":"RequestCertificateResponse"}, "errors":[ - {"shape":"TagPolicyException"}, {"shape":"InvalidParameterException"}, + {"shape":"TagPolicyException"}, {"shape":"TooManyTagsException"}, {"shape":"InvalidArnException"}, {"shape":"InvalidTagException"}, {"shape":"InvalidDomainValidationOptionsException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Requests an ACM certificate for use with other Amazon Web Services services. To request an ACM certificate, you must specify a fully qualified domain name (FQDN) in the DomainName parameter. You can also specify additional FQDNs in the SubjectAlternativeNames parameter.

    If you are requesting a private certificate, domain validation is not required. If you are requesting a public certificate, each domain name that you specify must be validated to verify that you own or control the domain. You can use DNS validation or email validation. We recommend that you use DNS validation. ACM issues public certificates after receiving approval from the domain owner.

    ACM behavior differs from the RFC 6125 specification of the certificate validation process. ACM first checks for a Subject Alternative Name, and, if it finds one, ignores the common name (CN).

    After successful completion of the RequestCertificate action, there is a delay of several seconds before you can retrieve information about the new certificate.

    " + "documentation":"

    Requests an ACM certificate for use with other Amazon Web Services services. To request an ACM certificate, you must specify a fully qualified domain name (FQDN) in the DomainName parameter. You can also specify additional FQDNs in the SubjectAlternativeNames parameter.

    If you are requesting a private certificate, domain validation is not required. If you are requesting a public certificate, each domain name that you specify must be validated to verify that you own or control the domain. You can use DNS validation or email validation. We recommend that you use DNS validation.

    ACM behavior differs from the RFC 6125 specification of the certificate validation process. ACM first checks for a Subject Alternative Name, and, if it finds one, ignores the common name (CN).

    After successful completion of the RequestCertificate action, there is a delay of several seconds before you can retrieve information about the new certificate.

    " }, "ResendValidationEmail":{ "name":"ResendValidationEmail", @@ -235,6 +235,24 @@ ], "documentation":"

    Resends the email that requests domain ownership validation. The domain owner or an authorized representative must approve the ACM certificate before it can be issued. The certificate can be approved by clicking a link in the mail to navigate to the Amazon certificate approval website and then clicking I Approve. However, the validation email can be blocked by spam filters. Therefore, if you do not receive the original mail, you can request that the mail be resent within 72 hours of requesting the ACM certificate. If more than 72 hours have elapsed since your original request or since your last attempt to resend validation mail, you must request a new certificate. For more information about setting up your contact email addresses, see Configure Email for your Domain.

    " }, + "RevokeCertificate":{ + "name":"RevokeCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RevokeCertificateRequest"}, + "output":{"shape":"RevokeCertificateResponse"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"InvalidArnException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Revokes a public ACM certificate. You can only revoke certificates that have been previously exported.

    " + }, "UpdateCertificateOptions":{ "name":"UpdateCertificateOptions", "http":{ @@ -248,7 +266,7 @@ {"shape":"InvalidStateException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

    Updates a certificate. Currently, you can use this function to specify whether to opt in to or out of recording your certificate in a certificate transparency log. For more information, see Opting Out of Certificate Transparency Logging.

    " + "documentation":"

    Updates a certificate. You can use this function to specify whether to opt in to or out of recording your certificate in a certificate transparency log and exporting. For more information, see Opting Out of Certificate Transparency Logging and Certificate Manager Exportable Managed Certificates.

    " } }, "shapes":{ @@ -420,6 +438,13 @@ }, "documentation":"

    Contains metadata about an ACM certificate. This structure is returned in the response to a DescribeCertificate request.

    " }, + "CertificateExport":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "CertificateManagedBy":{ "type":"string", "enum":["CLOUDFRONT"] @@ -430,9 +455,13 @@ "CertificateTransparencyLoggingPreference":{ "shape":"CertificateTransparencyLoggingPreference", "documentation":"

    You can opt out of certificate transparency logging by specifying the DISABLED option. Opt in by specifying ENABLED.

    " + }, + "Export":{ + "shape":"CertificateExport", + "documentation":"

    You can opt in to allow the export of your certificates by specifying ENABLED.

    " } }, - "documentation":"

    Structure that contains options for your certificate. Currently, you can use this only to specify whether to opt in to or out of certificate transparency logging. Some browsers require that public certificates issued for your domain be recorded in a log. Certificates that are not logged typically generate a browser error. Transparency makes it possible for you to detect SSL/TLS certificates that have been mistakenly or maliciously issued for your domain. For general information, see Certificate Transparency Logging.

    " + "documentation":"

    Structure that contains options for your certificate. You can use this structure to specify whether to opt in to or out of certificate transparency logging and export your certificate.

    Some browsers require that public certificates issued for your domain be recorded in a log. Certificates that are not logged typically generate a browser error. Transparency makes it possible for you to detect SSL/TLS certificates that have been mistakenly or maliciously issued for your domain. For general information, see Certificate Transparency Logging.

    You can export public ACM certificates to use with Amazon Web Services services as well as outside Amazon Web Services Cloud. For more information, see Certificate Manager exportable public certificate.

    " }, "CertificateStatus":{ "type":"string", @@ -489,6 +518,10 @@ "shape":"ExtendedKeyUsageNames", "documentation":"

    Contains a list of Extended Key Usage X.509 v3 extension objects. Each object specifies a purpose for which the certificate public key can be used and consists of a name and an object identifier (OID).

    " }, + "ExportOption":{ + "shape":"CertificateExport", + "documentation":"

    Indicates if export is enabled for the certificate.

    " + }, "InUse":{ "shape":"NullableBoolean", "documentation":"

    Indicates whether the certificate is currently in use by any Amazon Web Services resources.

    " @@ -630,11 +663,11 @@ }, "ResourceRecord":{ "shape":"ResourceRecord", - "documentation":"

    Contains the CNAME record that you add to your DNS database for domain validation. For more information, see Use DNS to Validate Domain Ownership.

    Note: The CNAME information that you need does not include the name of your domain. If you include your domain name in the DNS database CNAME record, validation fails. For example, if the name is \"_a79865eb4cd1a6ab990a45779b4e0b96.yourdomain.com\", only \"_a79865eb4cd1a6ab990a45779b4e0b96\" must be used.

    " + "documentation":"

    Contains the CNAME record that you add to your DNS database for domain validation. For more information, see Use DNS to Validate Domain Ownership.

    The CNAME information that you need does not include the name of your domain. If you include your domain name in the DNS database CNAME record, validation fails. For example, if the name is _a79865eb4cd1a6ab990a45779b4e0b96.yourdomain.com, only _a79865eb4cd1a6ab990a45779b4e0b96 must be used.

    " }, "HttpRedirect":{ "shape":"HttpRedirect", - "documentation":"

    Contains information for HTTP-based domain validation of certificates requested through CloudFront and issued by ACM. This field exists only when the certificate type is AMAZON_ISSUED and the validation method is HTTP.

    " + "documentation":"

    Contains information for HTTP-based domain validation of certificates requested through Amazon CloudFront and issued by ACM. This field exists only when the certificate type is AMAZON_ISSUED and the validation method is HTTP.

    " }, "ValidationMethod":{ "shape":"ValidationMethod", @@ -797,6 +830,10 @@ "shape":"KeyAlgorithmList", "documentation":"

    Specify one or more algorithms that can be used to generate key pairs.

    Default filtering returns only RSA_1024 and RSA_2048 certificates that have at least one domain. To return other certificate types, provide the desired type signatures in a comma-separated list. For example, \"keyTypes\": [\"RSA_2048\",\"RSA_4096\"] returns both RSA_2048 and RSA_4096 certificates.

    " }, + "exportOption":{ + "shape":"CertificateExport", + "documentation":"

    Specify ENABLED or DISABLED to identify certificates that can be exported.

    " + }, "managedBy":{ "shape":"CertificateManagedBy", "documentation":"

    Identifies the Amazon Web Services service that manages the certificate issued by ACM.

    " @@ -848,7 +885,7 @@ "documentation":"

    The URL hosting the validation token. RedirectFrom must return this content or redirect here.

    " } }, - "documentation":"

    Contains information for HTTP-based domain validation of certificates requested through CloudFront and issued by ACM. This field exists only when the certificate type is AMAZON_ISSUED and the validation method is HTTP.

    " + "documentation":"

    Contains information for HTTP-based domain validation of certificates requested through Amazon CloudFront and issued by ACM. This field exists only when the certificate type is AMAZON_ISSUED and the validation method is HTTP.

    " }, "IdempotencyToken":{ "type":"string", @@ -903,7 +940,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

    One or more of of request parameters specified is not valid.

    ", + "documentation":"

    One or more of request parameters specified is not valid.

    ", "exception":true }, "InvalidArnException":{ @@ -1229,7 +1266,7 @@ }, "Options":{ "shape":"CertificateOptions", - "documentation":"

    Currently, you can use this parameter to specify whether to add the certificate to a certificate transparency log. Certificate transparency makes it possible to detect SSL/TLS certificates that have been mistakenly or maliciously issued. Certificates that have not been logged typically produce an error message in a browser. For more information, see Opting Out of Certificate Transparency Logging.

    " + "documentation":"

    You can use this parameter to specify whether to add the certificate to a certificate transparency log and export your certificate.

    Certificate transparency makes it possible to detect SSL/TLS certificates that have been mistakenly or maliciously issued. Certificates that have not been logged typically produce an error message in a browser. For more information, see Opting Out of Certificate Transparency Logging.

    You can export public ACM certificates to use with Amazon Web Services services as well as outside the Amazon Web Services Cloud. For more information, see Certificate Manager exportable public certificate.

    " }, "CertificateAuthorityArn":{ "shape":"PcaArn", @@ -1343,6 +1380,32 @@ "A_A_COMPROMISE" ] }, + "RevokeCertificateRequest":{ + "type":"structure", + "required":[ + "CertificateArn", + "RevocationReason" + ], + "members":{ + "CertificateArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the public or private certificate that will be revoked. The ARN must have the following form:

    arn:aws:acm:region:account:certificate/12345678-1234-1234-1234-123456789012

    " + }, + "RevocationReason":{ + "shape":"RevocationReason", + "documentation":"

    Specifies why you revoked the certificate.

    " + } + } + }, + "RevokeCertificateResponse":{ + "type":"structure", + "members":{ + "CertificateArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the public or private certificate that was revoked.

    " + } + } + }, "ServiceErrorMessage":{"type":"string"}, "SortBy":{ "type":"string", @@ -1427,7 +1490,7 @@ }, "Options":{ "shape":"CertificateOptions", - "documentation":"

    Use to update the options for your certificate. Currently, you can specify whether to add your certificate to a transparency log. Certificate transparency makes it possible to detect SSL/TLS certificates that have been mistakenly or maliciously issued. Certificates that have not been logged typically produce an error message in a browser.

    " + "documentation":"

    Use to update the options for your certificate. Currently, you can specify whether to add your certificate to a transparency log or export your certificate. Certificate transparency makes it possible to detect SSL/TLS certificates that have been mistakenly or maliciously issued. Certificates that have not been logged typically produce an error message in a browser.

    " } } }, diff --git a/services/acmpca/pom.xml b/services/acmpca/pom.xml index 3a7bf5b44912..2bdaccbc66e6 100644 --- a/services/acmpca/pom.xml +++ b/services/acmpca/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT acmpca AWS Java SDK :: Services :: ACM PCA diff --git a/services/acmpca/src/main/resources/codegen-resources/customization.config b/services/acmpca/src/main/resources/codegen-resources/customization.config index 739238e9dc0d..866842bc7215 100644 --- a/services/acmpca/src/main/resources/codegen-resources/customization.config +++ b/services/acmpca/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listCertificateAuthorities" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/privatenetworks/pom.xml b/services/aiops/pom.xml similarity index 76% rename from services/privatenetworks/pom.xml rename to services/aiops/pom.xml index 25e28b2836b7..e0200384988c 100644 --- a/services/privatenetworks/pom.xml +++ b/services/aiops/pom.xml @@ -1,4 +1,4 @@ - + - - + --> 4.0.0 software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT - privatenetworks - AWS Java SDK :: Services :: Private Networks - The AWS Java SDK for Private Networks module holds the client classes that are used for - communicating with Private Networks. + aiops + AWS Java SDK :: Services :: AI Ops + The AWS Java SDK for AI Ops module holds the client classes that are used for + communicating with AI Ops. https://aws.amazon.com/sdkforjava @@ -37,14 +33,13 @@ - software.amazon.awssdk.services.privatenetworks + software.amazon.awssdk.services.aiops - software.amazon.awssdk diff --git a/services/aiops/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/aiops/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..aabfd5a429fd --- /dev/null +++ b/services/aiops/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://aiops-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://aiops-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://aiops.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://aiops.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/services/privatenetworks/src/main/resources/codegen-resources/endpoint-tests.json b/services/aiops/src/main/resources/codegen-resources/endpoint-tests.json similarity index 87% rename from services/privatenetworks/src/main/resources/codegen-resources/endpoint-tests.json rename to services/aiops/src/main/resources/codegen-resources/endpoint-tests.json index 4bce020482b0..de9ed7358e68 100644 --- a/services/privatenetworks/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/aiops/src/main/resources/codegen-resources/endpoint-tests.json @@ -4,7 +4,7 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://private-networks-fips.us-east-1.api.aws" + "url": "https://aiops-fips.us-east-1.api.aws" } }, "params": { @@ -17,7 +17,7 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://private-networks-fips.us-east-1.amazonaws.com" + "url": "https://aiops-fips.us-east-1.amazonaws.com" } }, "params": { @@ -30,7 +30,7 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://private-networks.us-east-1.api.aws" + "url": "https://aiops.us-east-1.api.aws" } }, "params": { @@ -43,7 +43,7 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://private-networks.us-east-1.amazonaws.com" + "url": "https://aiops.us-east-1.amazonaws.com" } }, "params": { @@ -56,7 +56,7 @@ "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://private-networks-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://aiops-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { @@ -69,7 +69,7 @@ "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://private-networks-fips.cn-north-1.amazonaws.com.cn" + "url": "https://aiops-fips.cn-north-1.amazonaws.com.cn" } }, "params": { @@ -82,7 +82,7 @@ "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://private-networks.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://aiops.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { @@ -95,7 +95,7 @@ "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://private-networks.cn-north-1.amazonaws.com.cn" + "url": "https://aiops.cn-north-1.amazonaws.com.cn" } }, "params": { @@ -108,7 +108,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://private-networks-fips.us-gov-east-1.api.aws" + "url": "https://aiops-fips.us-gov-east-1.api.aws" } }, "params": { @@ -121,7 +121,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://private-networks-fips.us-gov-east-1.amazonaws.com" + "url": "https://aiops-fips.us-gov-east-1.amazonaws.com" } }, "params": { @@ -134,7 +134,7 @@ "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://private-networks.us-gov-east-1.api.aws" + "url": "https://aiops.us-gov-east-1.api.aws" } }, "params": { @@ -147,7 +147,7 @@ "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://private-networks.us-gov-east-1.amazonaws.com" + "url": "https://aiops.us-gov-east-1.amazonaws.com" } }, "params": { @@ -171,7 +171,7 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://private-networks-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://aiops-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { @@ -195,7 +195,7 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://private-networks.us-iso-east-1.c2s.ic.gov" + "url": "https://aiops.us-iso-east-1.c2s.ic.gov" } }, "params": { @@ -219,7 +219,7 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://private-networks-fips.us-isob-east-1.sc2s.sgov.gov" + "url": "https://aiops-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { @@ -243,7 +243,7 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://private-networks.us-isob-east-1.sc2s.sgov.gov" + "url": "https://aiops.us-isob-east-1.sc2s.sgov.gov" } }, "params": { diff --git a/services/aiops/src/main/resources/codegen-resources/paginators-1.json b/services/aiops/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..7377349ca59a --- /dev/null +++ b/services/aiops/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListInvestigationGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "investigationGroups" + } + } +} diff --git a/services/aiops/src/main/resources/codegen-resources/service-2.json b/services/aiops/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..091c80f2fd82 --- /dev/null +++ b/services/aiops/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,871 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"aiops", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"AWS AI Ops", + "serviceId":"AIOps", + "signatureVersion":"v4", + "signingName":"aiops", + "uid":"aiops-2018-05-10" + }, + "operations":{ + "CreateInvestigationGroup":{ + "name":"CreateInvestigationGroup", + "http":{ + "method":"POST", + "requestUri":"/investigationGroups", + "responseCode":201 + }, + "input":{"shape":"CreateInvestigationGroupInput"}, + "output":{"shape":"CreateInvestigationGroupOutput"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Creates an investigation group in your account. Creating an investigation group is a one-time setup task for each Region in your account. It is a necessary task to be able to perform investigations.

    Settings in the investigation group help you centrally manage the common properties of your investigations, such as the following:

    • Who can access the investigations

    • Whether investigation data is encrypted with a customer managed Key Management Service key.

    • How long investigations and their data are retained by default.

    Currently, you can have one investigation group in each Region in your account. Each investigation in a Region is a part of the investigation group in that Region

    To create an investigation group and set up CloudWatch investigations, you must be signed in to an IAM principal that has the either the AIOpsConsoleAdminPolicy or the AdministratorAccess IAM policy attached, or to an account that has similar permissions.

    You can configure CloudWatch alarms to start investigations and add events to investigations. If you create your investigation group with CreateInvestigationGroup and you want to enable alarms to do this, you must use PutInvestigationGroupPolicy to create a resource policy that grants this permission to CloudWatch alarms.

    For more information about configuring CloudWatch alarms to work with CloudWatch investigations, see

    ", + "idempotent":true + }, + "DeleteInvestigationGroup":{ + "name":"DeleteInvestigationGroup", + "http":{ + "method":"DELETE", + "requestUri":"/investigationGroups/{identifier}", + "responseCode":200 + }, + "input":{"shape":"DeleteInvestigationGroupRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Deletes the specified investigation group from your account. You can currently have one investigation group per Region in your account. After you delete an investigation group, you can later create a new investigation group in the same Region.

    ", + "idempotent":true + }, + "DeleteInvestigationGroupPolicy":{ + "name":"DeleteInvestigationGroupPolicy", + "http":{ + "method":"DELETE", + "requestUri":"/investigationGroups/{identifier}/policy", + "responseCode":200 + }, + "input":{"shape":"DeleteInvestigationGroupPolicyRequest"}, + "output":{"shape":"DeleteInvestigationGroupPolicyOutput"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Removes the IAM resource policy from being associated with the investigation group that you specify.

    ", + "idempotent":true + }, + "GetInvestigationGroup":{ + "name":"GetInvestigationGroup", + "http":{ + "method":"GET", + "requestUri":"/investigationGroups/{identifier}", + "responseCode":200 + }, + "input":{"shape":"GetInvestigationGroupRequest"}, + "output":{"shape":"GetInvestigationGroupResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Returns the configuration information for the specified investigation group.

    " + }, + "GetInvestigationGroupPolicy":{ + "name":"GetInvestigationGroupPolicy", + "http":{ + "method":"GET", + "requestUri":"/investigationGroups/{identifier}/policy", + "responseCode":200 + }, + "input":{"shape":"GetInvestigationGroupPolicyRequest"}, + "output":{"shape":"GetInvestigationGroupPolicyResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Returns the IAM resource policy that is associated with the specified investigation group.

    " + }, + "ListInvestigationGroups":{ + "name":"ListInvestigationGroups", + "http":{ + "method":"GET", + "requestUri":"/investigationGroups", + "responseCode":200 + }, + "input":{"shape":"ListInvestigationGroupsInput"}, + "output":{"shape":"ListInvestigationGroupsOutput"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Returns the ARN and name of each investigation group in the account.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Displays the tags associated with a CloudWatch investigations resource. Currently, investigation groups support tagging.

    " + }, + "PutInvestigationGroupPolicy":{ + "name":"PutInvestigationGroupPolicy", + "http":{ + "method":"POST", + "requestUri":"/investigationGroups/{identifier}/policy", + "responseCode":200 + }, + "input":{"shape":"PutInvestigationGroupPolicyRequest"}, + "output":{"shape":"PutInvestigationGroupPolicyResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Creates an IAM resource policy and assigns it to the specified investigation group.

    If you create your investigation group with CreateInvestigationGroup and you want to enable CloudWatch alarms to create investigations and add events to investigations, you must use this operation to create a policy similar to this example.

    { \"Version\": \"2008-10-17\", \"Statement\": [{ \"Effect\": \"Allow\", \"Principal\": { \"Service\": \"aiops.alarms.cloudwatch.amazonaws.com\" }, \"Action\": [\"aiops:CreateInvestigation\", \"aiops:CreateInvestigationEvent\"], \"Resource\": \"*\", \"Condition\": { \"StringEquals\": { \"aws:SourceAccount\": \"account-id\" }, \"ArnLike\": { \"aws:SourceArn\": \"arn:aws:cloudwatch:region:account-id:alarm:*\" } } }] }

    ", + "idempotent":true + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Assigns one or more tags (key-value pairs) to the specified resource.

    Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

    Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

    You can associate as many as 50 tags with a resource.

    ", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Removes one or more tags from the specified resource.

    ", + "idempotent":true + }, + "UpdateInvestigationGroup":{ + "name":"UpdateInvestigationGroup", + "http":{ + "method":"PATCH", + "requestUri":"/investigationGroups/{identifier}", + "responseCode":200 + }, + "input":{"shape":"UpdateInvestigationGroupRequest"}, + "output":{"shape":"UpdateInvestigationGroupOutput"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

    Updates the configuration of the specified investigation group.

    ", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    You don't have sufficient permissions to perform this action.

    ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "ChatConfigurationArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:.*" + }, + "ChatConfigurationArns":{ + "type":"list", + "member":{"shape":"ChatConfigurationArn"}, + "max":5, + "min":1 + }, + "ChatbotNotificationChannel":{ + "type":"map", + "key":{"shape":"SNSTopicArn"}, + "value":{"shape":"ChatConfigurationArns"} + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    This operation couldn't be completed because of a conflict in resource states.

    ", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateInvestigationGroupInput":{ + "type":"structure", + "required":[ + "name", + "roleArn" + ], + "members":{ + "name":{ + "shape":"StringWithPatternAndLengthLimits", + "documentation":"

    Provides a name for the investigation group.

    " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

    Specify the ARN of the IAM role that CloudWatch investigations will use when it gathers investigation data. The permissions in this role determine which of your resources that CloudWatch investigations will have access to during investigations.

    For more information, see How to control what data Amazon Q has access to during investigations.

    " + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

    Use this structure if you want to use a customer managed KMS key to encrypt your investigation data. If you omit this parameter, CloudWatch investigations will use an Amazon Web Services key to encrypt the data. For more information, see Encryption of investigation data.

    " + }, + "retentionInDays":{ + "shape":"Retention", + "documentation":"

    Specify how long that investigation data is kept. For more information, see Operational investigation data retention.

    If you omit this parameter, the default of 90 days is used.

    " + }, + "tags":{ + "shape":"Tags", + "documentation":"

    A list of key-value pairs to associate with the investigation group. You can associate as many as 50 tags with an investigation group. To be able to associate tags when you create the investigation group, you must have the cloudwatch:TagResource permission.

    Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

    " + }, + "tagKeyBoundaries":{ + "shape":"TagKeyBoundaries", + "documentation":"

    Enter the existing custom tag keys for custom applications in your system. Resource tags help Amazon Q narrow the search space when it is unable to discover definite relationships between resources. For example, to discover that an Amazon ECS service depends on an Amazon RDS database, Amazon Q can discover this relationship using data sources such as X-Ray and CloudWatch Application Signals. However, if you haven't deployed these features, Amazon Q will attempt to identify possible relationships. Tag boundaries can be used to narrow the resources that will be discovered by Amazon Q in these cases.

    You don't need to enter tags created by myApplications or CloudFormation, because Amazon Q can automatically detect those tags.

    " + }, + "chatbotNotificationChannel":{ + "shape":"ChatbotNotificationChannel", + "documentation":"

    Use this structure to integrate CloudWatch investigations with Amazon Q in chat applications. This structure is a string array. For the first string, specify the ARN of an Amazon SNS topic. For the array of strings, specify the ARNs of one or more Amazon Q in chat applications configurations that you want to associate with that topic. For more information about these configuration ARNs, see Getting started with Amazon Q in chat applications and Resource type defined by Amazon Web Services Chatbot.

    " + }, + "isCloudTrailEventHistoryEnabled":{ + "shape":"Boolean", + "documentation":"

    Specify true to enable CloudWatch investigations to have access to change events that are recorded by CloudTrail. The default is true.

    " + }, + "crossAccountConfigurations":{ + "shape":"CrossAccountConfigurations", + "documentation":"

    Number of sourceAccountId values that have been configured for cross-account access.

    " + } + } + }, + "CreateInvestigationGroupOutput":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"InvestigationGroupArn", + "documentation":"

    The ARN of the investigation group that you just created.

    " + } + } + }, + "CrossAccountConfiguration":{ + "type":"structure", + "members":{ + "sourceRoleArn":{ + "shape":"RoleArn", + "documentation":"

    The ARN of an existing role which will be used to do investigations on your behalf.

    " + } + }, + "documentation":"

    This structure contains information about the cross-account configuration in the account.

    " + }, + "CrossAccountConfigurations":{ + "type":"list", + "member":{"shape":"CrossAccountConfiguration"}, + "max":25, + "min":0 + }, + "DeleteInvestigationGroupPolicyOutput":{ + "type":"structure", + "members":{ + } + }, + "DeleteInvestigationGroupPolicyRequest":{ + "type":"structure", + "required":["identifier"], + "members":{ + "identifier":{ + "shape":"InvestigationGroupIdentifier", + "documentation":"

    Specify either the name or the ARN of the investigation group that you want to remove the policy from.

    ", + "location":"uri", + "locationName":"identifier" + } + } + }, + "DeleteInvestigationGroupRequest":{ + "type":"structure", + "required":["identifier"], + "members":{ + "identifier":{ + "shape":"InvestigationGroupIdentifier", + "documentation":"

    Specify either the name or the ARN of the investigation group that you want to delete.

    ", + "location":"uri", + "locationName":"identifier" + } + } + }, + "EncryptionConfiguration":{ + "type":"structure", + "members":{ + "type":{ + "shape":"EncryptionConfigurationType", + "documentation":"

    Displays whether investigation data is encrypted by a customer managed key or an Amazon Web Services owned kay.

    " + }, + "kmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

    If the investigation group uses a customer managed key for encryption, this field displays the ID of that key.

    " + } + }, + "documentation":"

    Use this structure to specify a customer managed KMS key to use to encrypt investigation data.

    " + }, + "EncryptionConfigurationType":{ + "type":"string", + "enum":[ + "AWS_OWNED_KEY", + "CUSTOMER_MANAGED_KMS_KEY" + ] + }, + "ForbiddenException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    Access id denied for this operation, or this operation is not valid for the specified resource.

    ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "GetInvestigationGroupPolicyRequest":{ + "type":"structure", + "required":["identifier"], + "members":{ + "identifier":{ + "shape":"InvestigationGroupIdentifier", + "documentation":"

    Specify either the name or the ARN of the investigation group that you want to view the policy of.

    ", + "location":"uri", + "locationName":"identifier" + } + } + }, + "GetInvestigationGroupPolicyResponse":{ + "type":"structure", + "members":{ + "investigationGroupArn":{ + "shape":"InvestigationGroupArn", + "documentation":"

    The Amazon Resource Name (ARN) of the investigation group that you want to view the policy of.

    " + }, + "policy":{ + "shape":"InvestigationGroupPolicyDocument", + "documentation":"

    The policy, in JSON format.

    " + } + } + }, + "GetInvestigationGroupRequest":{ + "type":"structure", + "required":["identifier"], + "members":{ + "identifier":{ + "shape":"InvestigationGroupIdentifier", + "documentation":"

    Specify either the name or the ARN of the investigation group that you want to view.

    ", + "location":"uri", + "locationName":"identifier" + } + } + }, + "GetInvestigationGroupResponse":{ + "type":"structure", + "members":{ + "createdBy":{ + "shape":"IdentifierStringWithPatternAndLengthLimits", + "documentation":"

    The name of the user who created the investigation group.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the investigation group was created.

    " + }, + "lastModifiedBy":{ + "shape":"IdentifierStringWithPatternAndLengthLimits", + "documentation":"

    The name of the user who created the investigation group.

    " + }, + "lastModifiedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the investigation group was most recently modified.

    " + }, + "name":{ + "shape":"StringWithPatternAndLengthLimits", + "documentation":"

    The name of the investigation group.

    " + }, + "arn":{ + "shape":"InvestigationGroupArn", + "documentation":"

    The Amazon Resource Name (ARN) of the investigation group.

    " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

    The ARN of the IAM role that the investigation group uses for permissions to gather data.

    " + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

    Specifies the customer managed KMS key that the investigation group uses to encrypt data, if there is one. If not, the investigation group uses an Amazon Web Services key to encrypt the data.

    " + }, + "retentionInDays":{ + "shape":"Retention", + "documentation":"

    Specifies how long that investigation data is kept.

    " + }, + "chatbotNotificationChannel":{ + "shape":"ChatbotNotificationChannel", + "documentation":"

    This structure is a string array. The first string is the ARN of a Amazon SNS topic. The array of strings display the ARNs of Amazon Q in chat applications configurations that are associated with that topic. For more information about these configuration ARNs, see Getting started with Amazon Q in chat applications and Resource type defined by Amazon Web Services Chatbot.

    " + }, + "tagKeyBoundaries":{ + "shape":"TagKeyBoundaries", + "documentation":"

    Displays the custom tag keys for custom applications in your system that you have specified in the investigation group. Resource tags help Amazon Q narrow the search space when it is unable to discover definite relationships between resources.

    " + }, + "isCloudTrailEventHistoryEnabled":{ + "shape":"Boolean", + "documentation":"

    Specifies whether CloudWatch investigationshas access to change events that are recorded by CloudTrail.

    " + }, + "crossAccountConfigurations":{ + "shape":"CrossAccountConfigurations", + "documentation":"

    Lists the AWSAccountId of the accounts configured for cross-account access and the results of the last scan performed on each account.

    " + } + } + }, + "IdentifierStringWithPatternAndLengthLimits":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\-_\\/A-Za-z0-9:\\.]+" + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    An internal server error occurred. You can try again later.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvestigationGroupArn":{ + "type":"string", + "pattern":"arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):aiops:[a-zA-Z0-9-]*:[0-9]{12}:investigation-group\\/[A-Za-z0-9]{16}" + }, + "InvestigationGroupIdentifier":{ + "type":"string", + "pattern":"(?:[\\-_A-Za-z0-9]{1,512}|arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):aiops:[a-zA-Z0-9-]*:[0-9]{12}:investigation-group\\/[A-Za-z0-9]{16})" + }, + "InvestigationGroupPolicyDocument":{ + "type":"string", + "max":32768, + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" + }, + "InvestigationGroups":{ + "type":"list", + "member":{"shape":"ListInvestigationGroupsModel"} + }, + "KmsKeyId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"arn:.*" + }, + "ListInvestigationGroupsInput":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"SensitiveStringWithLengthLimits", + "documentation":"

    Include this value, if it was returned by the previous operation, to get the next set of service operations.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"ListInvestigationGroupsInputMaxResultsInteger", + "documentation":"

    The maximum number of results to return in one operation. If you omit this parameter, the default of 50 is used.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListInvestigationGroupsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, + "ListInvestigationGroupsModel":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"InvestigationGroupArn", + "documentation":"

    The Amazon Resource Name (ARN) of the investigation group.

    " + }, + "name":{ + "shape":"StringWithPatternAndLengthLimits", + "documentation":"

    The name of the investigation group.

    " + } + }, + "documentation":"

    This structure contains information about one investigation group in the account.

    " + }, + "ListInvestigationGroupsOutput":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"SensitiveStringWithLengthLimits", + "documentation":"

    Include this value in your next use of this operation to get the next set of service operations.

    " + }, + "investigationGroups":{ + "shape":"InvestigationGroups", + "documentation":"

    An array of structures, where each structure contains the information about one investigation group in the account.

    " + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"Tags", + "documentation":"

    The list of tag keys and values associated with the resource you specified.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

    The ARN of the CloudWatch investigations resource that you want to view tags for. You can use the ListInvestigationGroups operation to find the ARNs of investigation groups.

    The ARN format for an investigation group is arn:aws:aiops:Region:account-id:investigation-group:investigation-group-id .

    ", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "PutInvestigationGroupPolicyRequest":{ + "type":"structure", + "required":[ + "identifier", + "policy" + ], + "members":{ + "identifier":{ + "shape":"InvestigationGroupIdentifier", + "documentation":"

    Specify either the name or the ARN of the investigation group that you want to assign the policy to.

    ", + "location":"uri", + "locationName":"identifier" + }, + "policy":{ + "shape":"InvestigationGroupPolicyDocument", + "documentation":"

    The policy, in JSON format.

    " + } + } + }, + "PutInvestigationGroupPolicyResponse":{ + "type":"structure", + "members":{ + "investigationGroupArn":{ + "shape":"InvestigationGroupArn", + "documentation":"

    The ARN of the investigation group that will use this policy.

    " + } + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The specified resource doesn't exist.

    ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "Retention":{ + "type":"long", + "box":true, + "max":90, + "min":7 + }, + "RoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:.*" + }, + "SNSTopicArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:.*" + }, + "SensitiveStringWithLengthLimits":{ + "type":"string", + "max":2048, + "min":0, + "sensitive":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

    The resource that caused the quota exception.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The type of resource that caused the quota exception.

    " + }, + "serviceCode":{ + "shape":"String", + "documentation":"

    This name of the service associated with the error.

    " + }, + "quotaCode":{ + "shape":"String", + "documentation":"

    This quota that was exceeded.

    " + } + }, + "documentation":"

    This request exceeds a service quota.

    ", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "StringWithPatternAndLengthLimits":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\-_A-Za-z0-9\\[\\]\\(\\)\\{\\}\\.: ]+" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+)" + }, + "TagKeyBoundaries":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that you want to apply the tags to. You can use the ListInvestigationGroups operation to find the ARNs of investigation groups.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"Tags", + "documentation":"

    The list of key-value pairs to associate with the resource.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1, + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The request was throttled because of quota limits. You can try again later.

    ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the resource that you want to remove the tags from. You can use the ListInvestigationGroups operation to find the ARNs of investigation groups.

    ", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"

    The list of tag keys to remove from the resource.

    ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateInvestigationGroupOutput":{ + "type":"structure", + "members":{ + } + }, + "UpdateInvestigationGroupRequest":{ + "type":"structure", + "required":["identifier"], + "members":{ + "identifier":{ + "shape":"InvestigationGroupIdentifier", + "documentation":"

    Specify either the name or the ARN of the investigation group that you want to modify.

    ", + "location":"uri", + "locationName":"identifier" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

    Specify this field if you want to change the IAM role that CloudWatch investigations will use when it gathers investigation data. To do so, specify the ARN of the new role.

    The permissions in this role determine which of your resources that CloudWatch investigations will have access to during investigations.

    For more information, see EHow to control what data Amazon Q has access to during investigations.

    " + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

    Use this structure if you want to use a customer managed KMS key to encrypt your investigation data. If you omit this parameter, CloudWatch investigations will use an Amazon Web Services key to encrypt the data. For more information, see Encryption of investigation data.

    " + }, + "tagKeyBoundaries":{ + "shape":"TagKeyBoundaries", + "documentation":"

    Enter the existing custom tag keys for custom applications in your system. Resource tags help Amazon Q narrow the search space when it is unable to discover definite relationships between resources. For example, to discover that an Amazon ECS service depends on an Amazon RDS database, Amazon Q can discover this relationship using data sources such as X-Ray and CloudWatch Application Signals. However, if you haven't deployed these features, Amazon Q will attempt to identify possible relationships. Tag boundaries can be used to narrow the resources that will be discovered by Amazon Q in these cases.

    You don't need to enter tags created by myApplications or CloudFormation, because Amazon Q can automatically detect those tags.

    " + }, + "chatbotNotificationChannel":{ + "shape":"ChatbotNotificationChannel", + "documentation":"

    Use this structure to integrate CloudWatch investigations with Amazon Q in chat applications. This structure is a string array. For the first string, specify the ARN of an Amazon SNS topic. For the array of strings, specify the ARNs of one or more Amazon Q in chat applications configurations that you want to associate with that topic. For more information about these configuration ARNs, see Getting started with Amazon Q in chat applications and Resource type defined by Amazon Web Services Chatbot.

    " + }, + "isCloudTrailEventHistoryEnabled":{ + "shape":"Boolean", + "documentation":"

    Specify true to enable CloudWatch investigations to have access to change events that are recorded by CloudTrail. The default is true.

    " + }, + "crossAccountConfigurations":{ + "shape":"CrossAccountConfigurations", + "documentation":"

    Used to configure cross-account access for an investigation group. It allows the investigation group to access resources in other accounts.

    " + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    This operation or its parameters aren't formatted correctly.

    ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + }, + "documentation":"

    The CloudWatch investigations feature is a generative AI-powered assistant that can help you respond to incidents in your system. It uses generative AI to scan your system's telemetry and quickly surface suggestions that might be related to your issue. These suggestions include metrics, logs, deployment events, and root-cause hypotheses.

    You can use API actions to create, manage, and delete investigation groups and investigation group policies. To start and manage investigations, you must use the CloudWatch console.

    " +} diff --git a/services/aiops/src/main/resources/codegen-resources/waiters-2.json b/services/aiops/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/aiops/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/amp/pom.xml b/services/amp/pom.xml index 49dc24220dd4..2d8cf3251eb5 100644 --- a/services/amp/pom.xml +++ b/services/amp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT amp AWS Java SDK :: Services :: Amp diff --git a/services/amp/src/main/resources/codegen-resources/customization.config b/services/amp/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/amp/src/main/resources/codegen-resources/customization.config +++ b/services/amp/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/amp/src/main/resources/codegen-resources/service-2.json b/services/amp/src/main/resources/codegen-resources/service-2.json index 0496e3d3bb80..041ea180f346 100644 --- a/services/amp/src/main/resources/codegen-resources/service-2.json +++ b/services/amp/src/main/resources/codegen-resources/service-2.json @@ -50,7 +50,25 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    The CreateLoggingConfiguration operation creates a logging configuration for the workspace. Use this operation to set the CloudWatch log group to which the logs will be published to.

    ", + "documentation":"

    The CreateLoggingConfiguration operation creates rules and alerting logging configuration for the workspace. Use this operation to set the CloudWatch log group to which the logs will be published to.

    These logging configurations are only for rules and alerting logs.

    ", + "idempotent":true + }, + "CreateQueryLoggingConfiguration":{ + "name":"CreateQueryLoggingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/workspaces/{workspaceId}/logging/query", + "responseCode":202 + }, + "input":{"shape":"CreateQueryLoggingConfigurationRequest"}, + "output":{"shape":"CreateQueryLoggingConfigurationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates a query logging configuration for the specified workspace. This operation enables logging of queries that exceed the specified QSP threshold.

    ", "idempotent":true }, "CreateRuleGroupsNamespace":{ @@ -92,7 +110,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    The CreateScraper operation creates a scraper to collect metrics. A scraper pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your Amazon Managed Service for Prometheus workspace. Scrapers are flexible, and can be configured to control what metrics are collected, the frequency of collection, what transformations are applied to the metrics, and more.

    An IAM role will be created for you that Amazon Managed Service for Prometheus uses to access the metrics in your cluster. You must configure this role with a policy that allows it to scrape metrics from your cluster. For more information, see Configuring your Amazon EKS cluster in the Amazon Managed Service for Prometheus User Guide.

    The scrapeConfiguration parameter contains the base-64 encoded YAML configuration for the scraper.

    For more information about collectors, including what metrics are collected, and how to configure the scraper, see Using an Amazon Web Services managed collector in the Amazon Managed Service for Prometheus User Guide.

    ", + "documentation":"

    The CreateScraper operation creates a scraper to collect metrics. A scraper pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your Amazon Managed Service for Prometheus workspace. Scrapers are flexible, and can be configured to control what metrics are collected, the frequency of collection, what transformations are applied to the metrics, and more.

    An IAM role will be created for you that Amazon Managed Service for Prometheus uses to access the metrics in your cluster. You must configure this role with a policy that allows it to scrape metrics from your cluster. For more information, see Configuring your Amazon EKS cluster in the Amazon Managed Service for Prometheus User Guide.

    The scrapeConfiguration parameter contains the base-64 encoded YAML configuration for the scraper.

    When creating a scraper, the service creates a Network Interface in each Availability Zone that are passed into CreateScraper through subnets. These network interfaces are used to connect to the Amazon EKS cluster within the VPC for scraping metrics.

    For more information about collectors, including what metrics are collected, and how to configure the scraper, see Using an Amazon Web Services managed collector in the Amazon Managed Service for Prometheus User Guide.

    ", "idempotent":true }, "CreateWorkspace":{ @@ -149,7 +167,25 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Deletes the logging configuration for a workspace.

    ", + "documentation":"

    Deletes the rules and alerting logging configuration for a workspace.

    These logging configurations are only for rules and alerting logs.

    ", + "idempotent":true + }, + "DeleteQueryLoggingConfiguration":{ + "name":"DeleteQueryLoggingConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/workspaces/{workspaceId}/logging/query", + "responseCode":202 + }, + "input":{"shape":"DeleteQueryLoggingConfigurationRequest"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes the query logging configuration for the specified workspace.

    ", "idempotent":true }, "DeleteRuleGroupsNamespace":{ @@ -243,7 +279,24 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns complete information about the current logging configuration of the workspace.

    " + "documentation":"

    Returns complete information about the current rules and alerting logging configuration of the workspace.

    These logging configurations are only for rules and alerting logs.

    " + }, + "DescribeQueryLoggingConfiguration":{ + "name":"DescribeQueryLoggingConfiguration", + "http":{ + "method":"GET", + "requestUri":"/workspaces/{workspaceId}/logging/query", + "responseCode":200 + }, + "input":{"shape":"DescribeQueryLoggingConfigurationRequest"}, + "output":{"shape":"DescribeQueryLoggingConfigurationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Retrieves the details of the query logging configuration for the specified workspace.

    " }, "DescribeRuleGroupsNamespace":{ "name":"DescribeRuleGroupsNamespace", @@ -498,7 +551,26 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Updates the log group ARN or the workspace ID of the current logging configuration.

    ", + "documentation":"

    Updates the log group ARN or the workspace ID of the current rules and alerting logging configuration.

    These logging configurations are only for rules and alerting logs.

    ", + "idempotent":true + }, + "UpdateQueryLoggingConfiguration":{ + "name":"UpdateQueryLoggingConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/workspaces/{workspaceId}/logging/query", + "responseCode":202 + }, + "input":{"shape":"UpdateQueryLoggingConfigurationRequest"}, + "output":{"shape":"UpdateQueryLoggingConfigurationResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the query logging configuration for the specified workspace.

    ", "idempotent":true }, "UpdateScraper":{ @@ -652,6 +724,17 @@ "documentation":"

    The AmpConfiguration structure defines the Amazon Managed Service for Prometheus instance a scraper should send metrics to.

    " }, "Blob":{"type":"blob"}, + "CloudWatchLogDestination":{ + "type":"structure", + "required":["logGroupArn"], + "members":{ + "logGroupArn":{ + "shape":"LogGroupArn", + "documentation":"

    The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this operation.

    " + } + }, + "documentation":"

    Configuration details for logging to CloudWatch Logs.

    " + }, "ClusterArn":{ "type":"string", "documentation":"

    The ARN of an EKS cluster.

    ", @@ -757,6 +840,40 @@ }, "documentation":"

    Represents the output of a CreateLoggingConfiguration operation.

    " }, + "CreateQueryLoggingConfigurationRequest":{ + "type":"structure", + "required":[ + "destinations", + "workspaceId" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    (Optional) A unique, case-sensitive identifier that you can provide to ensure the idempotency of the request.

    ", + "idempotencyToken":true + }, + "destinations":{ + "shape":"LoggingDestinations", + "documentation":"

    The destinations where query logs will be sent. Only CloudWatch Logs destination is supported. The list must contain exactly one element.

    " + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace for which to create the query logging configuration.

    ", + "location":"uri", + "locationName":"workspaceId" + } + } + }, + "CreateQueryLoggingConfigurationResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"QueryLoggingConfigurationStatus", + "documentation":"

    The current status of the query logging configuration.

    " + } + } + }, "CreateRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ @@ -979,6 +1096,25 @@ }, "documentation":"

    Represents the input of a DeleteLoggingConfiguration operation.

    " }, + "DeleteQueryLoggingConfigurationRequest":{ + "type":"structure", + "required":["workspaceId"], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    (Optional) A unique, case-sensitive identifier that you can provide to ensure the idempotency of the request.

    ", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace from which to delete the query logging configuration.

    ", + "location":"uri", + "locationName":"workspaceId" + } + } + }, "DeleteRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ @@ -1114,6 +1250,28 @@ }, "documentation":"

    Represents the output of a DescribeLoggingConfiguration operation.

    " }, + "DescribeQueryLoggingConfigurationRequest":{ + "type":"structure", + "required":["workspaceId"], + "members":{ + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace for which to retrieve the query logging configuration.

    ", + "location":"uri", + "locationName":"workspaceId" + } + } + }, + "DescribeQueryLoggingConfigurationResponse":{ + "type":"structure", + "required":["queryLoggingConfiguration"], + "members":{ + "queryLoggingConfiguration":{ + "shape":"QueryLoggingConfigurationMetadata", + "documentation":"

    The detailed information about the query logging configuration for the specified workspace.

    " + } + } + }, "DescribeRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ @@ -1353,14 +1511,14 @@ "members":{ "labelSet":{ "shape":"LabelSet", - "documentation":"

    This defines one label set that will have an enforced ingestion limit.

    Label values accept ASCII characters and must contain at least one character that isn't whitespace. ASCII control characters are not accepted. If the label name is metric name label __name__, then the metric part of the name must conform to the following pattern: [a-zA-Z_:][a-zA-Z0-9_:]*

    " + "documentation":"

    This defines one label set that will have an enforced active time series limit.

    Label values accept ASCII characters and must contain at least one character that isn't whitespace. ASCII control characters are not accepted. If the label name is metric name label __name__, then the metric part of the name must conform to the following pattern: [a-zA-Z_:][a-zA-Z0-9_:]*

    " }, "limits":{ "shape":"LimitsPerLabelSetEntry", "documentation":"

    This structure contains the information about the limits that apply to time series that match this label set.

    " } }, - "documentation":"

    This structure defines one label set used to enforce ingestion limits for the workspace, and defines the limit for that label set.

    A label set is a unique combination of label-value pairs. Use them to control time series ingestion limits and to monitor usage by specific label groups. Example label sets might be team:finance or env:prod

    " + "documentation":"

    This structure defines one label set used to enforce active time series limits for the workspace, and defines the limit for that label set.

    A label set is a unique combination of label-value pairs. Use them to control time series limits and to monitor usage by specific label groups. Example label sets might be team:finance or env:prod

    " }, "LimitsPerLabelSetEntry":{ "type":"structure", @@ -1578,7 +1736,7 @@ "documentation":"

    The ID of the workspace the logging configuration is for.

    " } }, - "documentation":"

    Contains information about the logging configuration for the workspace.

    " + "documentation":"

    Contains information about the current rules and alerting logging configuration for the workspace.

    These logging configurations are only for rules and alerting logs.

    " }, "LoggingConfigurationStatus":{ "type":"structure", @@ -1586,7 +1744,7 @@ "members":{ "statusCode":{ "shape":"LoggingConfigurationStatusCode", - "documentation":"

    The current status of the logging configuration.

    " + "documentation":"

    The current status of the current rules and alerting logging configuration.

    These logging configurations are only for rules and alerting logs.

    " }, "statusReason":{ "shape":"String", @@ -1607,6 +1765,47 @@ "UPDATE_FAILED" ] }, + "LoggingDestination":{ + "type":"structure", + "required":[ + "cloudWatchLogs", + "filters" + ], + "members":{ + "cloudWatchLogs":{ + "shape":"CloudWatchLogDestination", + "documentation":"

    Configuration details for logging to CloudWatch Logs.

    " + }, + "filters":{ + "shape":"LoggingFilter", + "documentation":"

    Filtering criteria that determine which queries are logged.

    " + } + }, + "documentation":"

    Defines a destination and its associated filtering criteria for query logging.

    " + }, + "LoggingDestinations":{ + "type":"list", + "member":{"shape":"LoggingDestination"}, + "documentation":"

    A list structure that contains a single CloudWatch Logs destination.

    ", + "max":1, + "min":1 + }, + "LoggingFilter":{ + "type":"structure", + "required":["qspThreshold"], + "members":{ + "qspThreshold":{ + "shape":"LoggingFilterQspThresholdLong", + "documentation":"

    The Query Samples Processed (QSP) threshold above which queries will be logged. Queries processing more samples than this threshold will be captured in logs.

    " + } + }, + "documentation":"

    Filtering criteria that determine which queries are logged.

    " + }, + "LoggingFilterQspThresholdLong":{ + "type":"long", + "box":true, + "min":0 + }, "PaginationToken":{ "type":"string", "documentation":"

    A token used to access the next page in a paginated result set.

    ", @@ -1708,6 +1907,65 @@ }, "documentation":"

    Represents the output of a PutRuleGroupsNamespace operation.

    " }, + "QueryLoggingConfigurationMetadata":{ + "type":"structure", + "required":[ + "createdAt", + "destinations", + "modifiedAt", + "status", + "workspace" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time when the query logging configuration was created.

    " + }, + "destinations":{ + "shape":"LoggingDestinations", + "documentation":"

    The configured destinations for the query logging configuration.

    " + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time when the query logging configuration was last modified.

    " + }, + "status":{ + "shape":"QueryLoggingConfigurationStatus", + "documentation":"

    The current status of the query logging configuration.

    " + }, + "workspace":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace associated with this query logging configuration.

    " + } + }, + "documentation":"

    The metadata for a query logging configuration.

    " + }, + "QueryLoggingConfigurationStatus":{ + "type":"structure", + "required":["statusCode"], + "members":{ + "statusCode":{ + "shape":"QueryLoggingConfigurationStatusCode", + "documentation":"

    The current status of the query logging configuration.

    " + }, + "statusReason":{ + "shape":"String", + "documentation":"

    If there is a failure, the reason for the failure.

    " + } + }, + "documentation":"

    The status information for a query logging configuration.

    " + }, + "QueryLoggingConfigurationStatusCode":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETING", + "CREATION_FAILED", + "UPDATE_FAILED" + ] + }, "ResourceNotFoundException":{ "type":"structure", "required":[ @@ -2313,6 +2571,40 @@ }, "documentation":"

    Represents the output of an UpdateLoggingConfiguration operation.

    " }, + "UpdateQueryLoggingConfigurationRequest":{ + "type":"structure", + "required":[ + "destinations", + "workspaceId" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

    (Optional) A unique, case-sensitive identifier that you can provide to ensure the idempotency of the request.

    ", + "idempotencyToken":true + }, + "destinations":{ + "shape":"LoggingDestinations", + "documentation":"

    The destinations where query logs will be sent. Only CloudWatch Logs destination is supported. The list must contain exactly one element.

    " + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

    The ID of the workspace for which to update the query logging configuration.

    ", + "location":"uri", + "locationName":"workspaceId" + } + } + }, + "UpdateQueryLoggingConfigurationResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"QueryLoggingConfigurationStatus", + "documentation":"

    The current status of the query logging configuration.

    " + } + } + }, "UpdateScraperRequest":{ "type":"structure", "required":["scraperId"], @@ -2405,7 +2697,7 @@ }, "limitsPerLabelSet":{ "shape":"LimitsPerLabelSetList", - "documentation":"

    This is an array of structures, where each structure defines a label set for the workspace, and defines the ingestion limit for active time series for each of those label sets. Each label name in a label set must be unique.

    " + "documentation":"

    This is an array of structures, where each structure defines a label set for the workspace, and defines the active time series limit for each of those label sets. Each label name in a label set must be unique.

    " }, "retentionPeriodInDays":{ "shape":"UpdateWorkspaceConfigurationRequestRetentionPeriodInDaysInteger", diff --git a/services/amplify/pom.xml b/services/amplify/pom.xml index 6e1fb502d2c2..3bb5137fa490 100644 --- a/services/amplify/pom.xml +++ b/services/amplify/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT amplify AWS Java SDK :: Services :: Amplify diff --git a/services/amplify/src/main/resources/codegen-resources/customization.config b/services/amplify/src/main/resources/codegen-resources/customization.config index 96596b19f621..1a37c2370c98 100644 --- a/services/amplify/src/main/resources/codegen-resources/customization.config +++ b/services/amplify/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listApps" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/amplify/src/main/resources/codegen-resources/service-2.json b/services/amplify/src/main/resources/codegen-resources/service-2.json index a2d51e374032..e300f0afbf98 100644 --- a/services/amplify/src/main/resources/codegen-resources/service-2.json +++ b/services/amplify/src/main/resources/codegen-resources/service-2.json @@ -764,6 +764,10 @@ "wafConfiguration":{ "shape":"WafConfiguration", "documentation":"

    Describes the Firewall configuration for the Amplify app. Firewall support enables you to protect your hosted applications with a direct integration with WAF.

    " + }, + "jobConfig":{ + "shape":"JobConfig", + "documentation":"

    The configuration details that apply to the jobs for an Amplify app.

    " } }, "documentation":"

    Represents the different branches of a repository for building, deploying, and hosting an Amplify app.

    " @@ -1130,6 +1134,14 @@ "member":{"shape":"Branch"}, "max":255 }, + "BuildComputeType":{ + "type":"string", + "enum":[ + "STANDARD_8GB", + "LARGE_16GB", + "XLARGE_72GB" + ] + }, "BuildSpec":{ "type":"string", "documentation":"

    The build specification (build spec) file for an Amplify app build.

    ", @@ -1144,7 +1156,7 @@ "members":{ "type":{ "shape":"CacheConfigType", - "documentation":"

    The type of cache configuration to use for an Amplify app.

    The AMPLIFY_MANAGED cache configuration automatically applies an optimized cache configuration for your app based on its platform, routing rules, and rewrite rules. This is the default setting.

    The AMPLIFY_MANAGED_NO_COOKIES cache configuration type is the same as AMPLIFY_MANAGED, except that it excludes all cookies from the cache key.

    " + "documentation":"

    The type of cache configuration to use for an Amplify app.

    The AMPLIFY_MANAGED cache configuration automatically applies an optimized cache configuration for your app based on its platform, routing rules, and rewrite rules.

    The AMPLIFY_MANAGED_NO_COOKIES cache configuration type is the same as AMPLIFY_MANAGED, except that it excludes all cookies from the cache key. This is the default setting.

    " } }, "documentation":"

    Describes the cache configuration for an Amplify app.

    For more information about how Amplify applies an optimal cache configuration for your app based on the type of content that is being served, see Managing cache configuration in the Amplify User guide.

    " @@ -1316,6 +1328,10 @@ "shape":"AutoBranchCreationConfig", "documentation":"

    The automated branch creation configuration for an Amplify app.

    " }, + "jobConfig":{ + "shape":"JobConfig", + "documentation":"

    Describes the configuration details that apply to the jobs for an Amplify app.

    " + }, "cacheConfig":{ "shape":"CacheConfig", "documentation":"

    The cache configuration for the Amplify app.

    " @@ -2292,6 +2308,17 @@ "type":"string", "max":1000 }, + "JobConfig":{ + "type":"structure", + "required":["buildComputeType"], + "members":{ + "buildComputeType":{ + "shape":"BuildComputeType", + "documentation":"

    Specifies the size of the build instance. Amplify supports three instance sizes: STANDARD_8GB, LARGE_16GB, and XLARGE_72GB. If you don't specify a value, Amplify uses the STANDARD_8GB default.

    The following list describes the CPU, memory, and storage capacity for each build instance type:

    STANDARD_8GB
    • vCPUs: 4

    • Memory: 8 GiB

    • Disk space: 128 GB

    LARGE_16GB
    • vCPUs: 8

    • Memory: 16 GiB

    • Disk space: 128 GB

    XLARGE_72GB
    • vCPUs: 36

    • Memory: 72 GiB

    • Disk space: 256 GB

    " + } + }, + "documentation":"

    Describes the configuration details that apply to the jobs for an Amplify app.

    Use JobConfig to apply configuration to jobs, such as customizing the build instance size when you create or update an Amplify app. For more information about customizable build instances, see Custom build instances in the Amplify User Guide.

    " + }, "JobId":{ "type":"string", "max":255, @@ -3207,8 +3234,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response for the tag resource request.

    " }, "TagValue":{ @@ -3276,8 +3302,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response for the untag resource request.

    " }, "UpdateAppRequest":{ @@ -3366,6 +3391,10 @@ "shape":"AccessToken", "documentation":"

    The personal access token for a GitHub repository for an Amplify app. The personal access token is used to authorize access to a GitHub repository using the Amplify GitHub App. The token is not stored.

    Use accessToken for GitHub repositories only. To authorize access to a repository provider such as Bitbucket or CodeCommit, use oauthToken.

    You must specify either accessToken or oauthToken when you update an app.

    Existing Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see Migrating an existing OAuth app to the Amplify GitHub App in the Amplify User Guide .

    " }, + "jobConfig":{ + "shape":"JobConfig", + "documentation":"

    Describes the configuration details that apply to the jobs for an Amplify app.

    " + }, "cacheConfig":{ "shape":"CacheConfig", "documentation":"

    The cache configuration for the Amplify app.

    " diff --git a/services/amplifybackend/pom.xml b/services/amplifybackend/pom.xml index 6467c2d1f716..f0c788385ca5 100644 --- a/services/amplifybackend/pom.xml +++ b/services/amplifybackend/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT amplifybackend AWS Java SDK :: Services :: Amplify Backend diff --git a/services/amplifybackend/src/main/resources/codegen-resources/customization.config b/services/amplifybackend/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/amplifybackend/src/main/resources/codegen-resources/customization.config +++ b/services/amplifybackend/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/amplifyuibuilder/pom.xml b/services/amplifyuibuilder/pom.xml index 5e7485f70236..71d9cb1857df 100644 --- a/services/amplifyuibuilder/pom.xml +++ b/services/amplifyuibuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT amplifyuibuilder AWS Java SDK :: Services :: Amplify UI Builder diff --git a/services/amplifyuibuilder/src/main/resources/codegen-resources/customization.config b/services/amplifyuibuilder/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/amplifyuibuilder/src/main/resources/codegen-resources/customization.config +++ b/services/amplifyuibuilder/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/apigateway/pom.xml b/services/apigateway/pom.xml index 59092a8723b6..f2a5e481fe4c 100644 --- a/services/apigateway/pom.xml +++ b/services/apigateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT apigateway AWS Java SDK :: Services :: Amazon API Gateway diff --git a/services/apigateway/src/main/resources/codegen-resources/customization.config b/services/apigateway/src/main/resources/codegen-resources/customization.config index 6223dc34d3f7..082dea59b374 100644 --- a/services/apigateway/src/main/resources/codegen-resources/customization.config +++ b/services/apigateway/src/main/resources/codegen-resources/customization.config @@ -24,6 +24,5 @@ "interceptors": [ "software.amazon.awssdk.services.apigateway.internal.AcceptJsonInterceptor" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/apigateway/src/main/resources/codegen-resources/service-2.json b/services/apigateway/src/main/resources/codegen-resources/service-2.json index 93c7f5bc0e72..67b4e67e4637 100644 --- a/services/apigateway/src/main/resources/codegen-resources/service-2.json +++ b/services/apigateway/src/main/resources/codegen-resources/service-2.json @@ -2825,6 +2825,10 @@ "policy":{ "shape":"String", "documentation":"

    A stringified JSON policy document that applies to the execute-api service for this DomainName regardless of the caller and Method configuration. Supported only for private custom domain names.

    " + }, + "routingMode":{ + "shape":"RoutingMode", + "documentation":"

    The routing mode for this domain name. The routing mode determines how API Gateway sends traffic from your custom domain name to your private APIs.

    " } }, "documentation":"

    A request to create a new domain name.

    " @@ -3765,7 +3769,7 @@ }, "domainNameArn":{ "shape":"String", - "documentation":"

    The ARN of the domain name. Supported only for private custom domain names.

    " + "documentation":"

    The ARN of the domain name.

    " }, "certificateName":{ "shape":"String", @@ -3838,6 +3842,10 @@ "policy":{ "shape":"String", "documentation":"

    A stringified JSON policy document that applies to the execute-api service for this DomainName regardless of the caller and Method configuration. Supported only for private custom domain names.

    " + }, + "routingMode":{ + "shape":"RoutingMode", + "documentation":"

    The routing mode for this domain name. The routing mode determines how API Gateway sends traffic from your custom domain name to your private APIs.

    " } }, "documentation":"

    Represents a custom domain name as a user-friendly host name of an API (RestApi).

    " @@ -4076,8 +4084,7 @@ }, "GetAccountRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Requests API Gateway to get information about the current Account resource.

    " }, "GetApiKeyRequest":{ @@ -5366,7 +5373,7 @@ }, "timeoutInMillis":{ "shape":"Integer", - "documentation":"

    Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds or 29 seconds.

    " + "documentation":"

    Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds or 29 seconds. You can increase the default value to longer than 29 seconds for Regional or private APIs only.

    " }, "cacheNamespace":{ "shape":"String", @@ -5976,7 +5983,7 @@ }, "timeoutInMillis":{ "shape":"NullableInteger", - "documentation":"

    Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds or 29 seconds.

    " + "documentation":"

    Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds or 29 seconds. You can increase the default value to longer than 29 seconds for Regional or private APIs only.

    " }, "tlsConfig":{"shape":"TlsConfig"} }, @@ -6385,6 +6392,14 @@ }, "documentation":"

    Contains references to your APIs and links that guide you in how to interact with your collection. A collection offers a paginated view of your APIs.

    " }, + "RoutingMode":{ + "type":"string", + "enum":[ + "BASE_PATH_MAPPING_ONLY", + "ROUTING_RULE_ONLY", + "ROUTING_RULE_THEN_BASE_PATH_MAPPING" + ] + }, "SdkConfigurationProperty":{ "type":"structure", "members":{ diff --git a/services/apigatewaymanagementapi/pom.xml b/services/apigatewaymanagementapi/pom.xml index b51e91bf88c0..e979576c11ad 100644 --- a/services/apigatewaymanagementapi/pom.xml +++ b/services/apigatewaymanagementapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT apigatewaymanagementapi AWS Java SDK :: Services :: ApiGatewayManagementApi diff --git a/services/apigatewaymanagementapi/src/main/resources/codegen-resources/customization.config b/services/apigatewaymanagementapi/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/apigatewaymanagementapi/src/main/resources/codegen-resources/customization.config +++ b/services/apigatewaymanagementapi/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/apigatewayv2/pom.xml b/services/apigatewayv2/pom.xml index 33df9fc1a995..8c65ece3579e 100644 --- a/services/apigatewayv2/pom.xml +++ b/services/apigatewayv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT apigatewayv2 AWS Java SDK :: Services :: ApiGatewayV2 diff --git a/services/apigatewayv2/src/main/resources/codegen-resources/customization.config b/services/apigatewayv2/src/main/resources/codegen-resources/customization.config index 7ced6efb920b..11eb6b271d1f 100644 --- a/services/apigatewayv2/src/main/resources/codegen-resources/customization.config +++ b/services/apigatewayv2/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "getApis", "getDomainNames" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/apigatewayv2/src/main/resources/codegen-resources/paginators-1.json b/services/apigatewayv2/src/main/resources/codegen-resources/paginators-1.json index f3b7195d8e13..52644aaef144 100644 --- a/services/apigatewayv2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/apigatewayv2/src/main/resources/codegen-resources/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination" : { } + "pagination": { + "ListRoutingRules": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "RoutingRules" + } + } } \ No newline at end of file diff --git a/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json b/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json index cce75eb08107..fa0e91c1b3f0 100644 --- a/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json +++ b/services/apigatewayv2/src/main/resources/codegen-resources/service-2.json @@ -358,6 +358,40 @@ ], "documentation": "

    Creates a RouteResponse for a Route.

    " }, + "CreateRoutingRule": { + "name": "CreateRoutingRule", + "documentation": "

    Creates a RoutingRule.

    ", + "http": { + "method": "POST", + "requestUri": "/v2/domainnames/{domainName}/routingrules", + "responseCode": 201 + }, + "input": { + "shape": "CreateRoutingRuleRequest" + }, + "output": { + "shape": "CreateRoutingRuleResponse", + "documentation": "

    The request has succeeded and has resulted in the creation of a resource.

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    The resource specified in the request was not found.

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    The client is sending more than the allowed number of requests per unit of time.

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    One of the parameters in the request is invalid.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The resource already exists.

    " + } + ] + }, "CreateStage": { "name": "CreateStage", "http": { @@ -730,6 +764,33 @@ ], "documentation": "

    Deletes the RouteSettings for a stage.

    " }, + "DeleteRoutingRule": { + "name": "DeleteRoutingRule", + "documentation": "

    Deletes a routing rule.

    ", + "http": { + "method": "DELETE", + "requestUri": "/v2/domainnames/{domainName}/routingrules/{routingRuleId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteRoutingRuleRequest" + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    The resource specified in the request was not found.

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    The client is sending more than the allowed number of requests per unit of time.

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    One of the parameters in the request is invalid.

    " + } + ], + "idempotent": true + }, "DeleteStage": { "name": "DeleteStage", "http": { @@ -1419,6 +1480,66 @@ ], "documentation": "

    Gets the Routes for an API.

    " }, + "GetRoutingRule": { + "name": "GetRoutingRule", + "documentation": "

    Gets a routing rule.

    ", + "http": { + "method": "GET", + "requestUri": "/v2/domainnames/{domainName}/routingrules/{routingRuleId}", + "responseCode": 200 + }, + "input": { + "shape": "GetRoutingRuleRequest" + }, + "output": { + "shape": "GetRoutingRuleResponse", + "documentation": "

    Success

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    The resource specified in the request was not found.

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    The client is sending more than the allowed number of requests per unit of time.

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    One of the parameters in the request is invalid.

    " + } + ] + }, + "ListRoutingRules": { + "name": "ListRoutingRules", + "documentation": "

    Lists routing rules.

    ", + "http": { + "method": "GET", + "requestUri": "/v2/domainnames/{domainName}/routingrules", + "responseCode": 200 + }, + "input": { + "shape": "ListRoutingRulesRequest" + }, + "output": { + "shape": "ListRoutingRulesResponse", + "documentation": "

    Success

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    The resource specified in the request was not found.

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    The client is sending more than the allowed number of requests per unit of time.

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    One of the parameters in the request is invalid.

    " + } + ] + }, "GetStage": { "name": "GetStage", "http": { @@ -1595,6 +1716,41 @@ ], "documentation": "

    Imports an API.

    " }, + "PutRoutingRule": { + "name": "PutRoutingRule", + "documentation": "

    Updates a routing rule.

    ", + "http": { + "method": "PUT", + "requestUri": "/v2/domainnames/{domainName}/routingrules/{routingRuleId}", + "responseCode": 200 + }, + "input": { + "shape": "PutRoutingRuleRequest" + }, + "output": { + "shape": "PutRoutingRuleResponse", + "documentation": "

    Success

    " + }, + "errors": [ + { + "shape": "NotFoundException", + "documentation": "

    The resource specified in the request was not found.

    " + }, + { + "shape": "TooManyRequestsException", + "documentation": "

    The client is sending more than the allowed number of requests per unit of time.

    " + }, + { + "shape": "BadRequestException", + "documentation": "

    One of the parameters in the request is invalid.

    " + }, + { + "shape": "ConflictException", + "documentation": "

    The resource already exists.

    " + } + ], + "idempotent": true + }, "ReimportApi": { "name": "ReimportApi", "http": { @@ -3108,6 +3264,11 @@ "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " + }, "Tags": { "shape": "Tags", "locationName": "tags", @@ -3137,6 +3298,11 @@ "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " + }, "Tags": { "shape": "Tags", "locationName": "tags", @@ -3161,6 +3327,10 @@ "locationName": "domainName", "documentation": "

    The name of the DomainName resource.

    " }, + "DomainNameArn": { + "shape": "Arn", + "locationName": "domainNameArn" + }, "DomainNameConfigurations": { "shape": "DomainNameConfigurations", "locationName": "domainNameConfigurations", @@ -3171,6 +3341,11 @@ "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " + }, "Tags": { "shape": "Tags", "locationName": "tags", @@ -3999,6 +4174,74 @@ } } }, + "CreateRoutingRuleRequest": { + "type": "structure", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions", + "documentation": "

    Represents a routing rule action. The only supported action is invokeApi.

    " + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions", + "documentation": "

    Represents a condition. Conditions can contain up to two matchHeaders conditions and one matchBasePaths conditions. API Gateway evaluates header conditions and base path conditions together. You can only use AND between header and base path conditions.

    " + }, + "DomainName": { + "shape": "__string", + "location": "uri", + "locationName": "domainName", + "documentation": "

    The domain name.

    " + }, + "DomainNameId": { + "shape": "__string", + "location": "querystring", + "locationName": "domainNameId", + "documentation": "

    The domain name ID.

    " + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority", + "documentation": "Represents the priority of the routing rule." + } + }, + "required": [ + "DomainName", + "Actions", + "Priority", + "Conditions" + ] + }, + "CreateRoutingRuleResponse": { + "type": "structure", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions", + "documentation": "

    Represents a routing rule action. The only supported action is invokeApi.

    " + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions", + "documentation": "

    Represents a condition. Conditions can contain up to two matchHeaders conditions and one matchBasePaths conditions. API Gateway evaluates header conditions and base path conditions together. You can only use AND between header and base path conditions.

    " + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority", + "documentation": "

    Represents the priority of the routing rule.

    " + }, + "RoutingRuleArn": { + "shape": "Arn", + "locationName": "routingRuleArn", + "documentation": "

    The ARN of the domain name.

    " + }, + "RoutingRuleId": { + "shape": "Id", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + } + }, "CreateStageInput": { "type": "structure", "members": { @@ -4610,6 +4853,33 @@ "ApiId" ] }, + "DeleteRoutingRuleRequest": { + "type": "structure", + "members": { + "DomainName": { + "shape": "__string", + "location": "uri", + "locationName": "domainName", + "documentation": "

    The domain name.

    " + }, + "DomainNameId": { + "shape": "__string", + "location": "querystring", + "locationName": "domainNameId", + "documentation": "

    The domain name ID.

    " + }, + "RoutingRuleId": { + "shape": "__string", + "location": "uri", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + }, + "required": [ + "RoutingRuleId", + "DomainName" + ] + }, "DeleteStageRequest": { "type": "structure", "members": { @@ -4647,8 +4917,7 @@ }, "DeleteVpcLinkResponse": { "type": "structure", - "members": { - } + "members": {} }, "Deployment": { "type": "structure", @@ -4724,6 +4993,10 @@ "locationName": "domainName", "documentation": "

    The name of the DomainName resource.

    " }, + "DomainNameArn": { + "shape": "Arn", + "locationName": "domainNameArn" + }, "DomainNameConfigurations": { "shape": "DomainNameConfigurations", "locationName": "domainNameConfigurations", @@ -4734,6 +5007,11 @@ "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " + }, "Tags": { "shape": "Tags", "locationName": "tags", @@ -5403,6 +5681,10 @@ "locationName": "domainName", "documentation": "

    The name of the DomainName resource.

    " }, + "DomainNameArn": { + "shape": "Arn", + "locationName": "domainNameArn" + }, "DomainNameConfigurations": { "shape": "DomainNameConfigurations", "locationName": "domainNameConfigurations", @@ -5413,6 +5695,11 @@ "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " + }, "Tags": { "shape": "Tags", "locationName": "tags", @@ -6091,6 +6378,109 @@ } } }, + "GetRoutingRuleRequest": { + "type": "structure", + "members": { + "DomainName": { + "shape": "__string", + "location": "uri", + "locationName": "domainName", + "documentation": "

    The domain name.

    " + }, + "DomainNameId": { + "shape": "__string", + "location": "querystring", + "locationName": "domainNameId", + "documentation": "

    The domain name ID.

    " + }, + "RoutingRuleId": { + "shape": "__string", + "location": "uri", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + }, + "required": [ + "RoutingRuleId", + "DomainName" + ] + }, + "GetRoutingRuleResponse": { + "type": "structure", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions", + "documentation": "

    The resulting action based on matching a routing rules condition. Only InvokeApi is supported.

    " + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions", + "documentation": "

    The conditions of the routing rule.

    " + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority", + "documentation": "

    The order in which API Gateway evaluates a rule. Priority is evaluated from the lowest value to the highest value.

    " + }, + "RoutingRuleArn": { + "shape": "Arn", + "locationName": "routingRuleArn", + "documentation": "

    The routing rule ARN.

    " + }, + "RoutingRuleId": { + "shape": "Id", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + } + }, + "ListRoutingRulesRequest": { + "type": "structure", + "members": { + "DomainName": { + "shape": "__string", + "location": "uri", + "locationName": "domainName", + "documentation": "

    The domain name.

    " + }, + "DomainNameId": { + "shape": "__string", + "location": "querystring", + "locationName": "domainNameId", + "documentation": "

    The domain name ID.

    " + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

    The maximum number of elements to be returned for this resource.

    " + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

    The next page of elements from this collection. Not valid for the last element of the collection.

    " + } + }, + "required": [ + "DomainName" + ] + }, + "ListRoutingRulesResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "NextToken", + "locationName": "nextToken" + }, + "RoutingRules": { + "shape": "__listOfRoutingRule", + "locationName": "routingRules", + "documentation": "

    The routing rules.

    " + } + } + }, "GetStageRequest": { "type": "structure", "members": { @@ -6890,6 +7280,81 @@ "HTTP" ] }, + "PutRoutingRuleRequest": { + "type": "structure", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions", + "documentation": "

    The routing rule action.

    " + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions", + "documentation": "

    The routing rule condition.

    " + }, + "DomainName": { + "shape": "__string", + "location": "uri", + "locationName": "domainName", + "documentation": "

    The domain name.

    " + }, + "DomainNameId": { + "shape": "__string", + "location": "querystring", + "locationName": "domainNameId", + "documentation": "

    The domain name ID.

    " + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority", + "documentation": "

    The routing rule priority.

    " + }, + "RoutingRuleId": { + "shape": "__string", + "location": "uri", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + }, + "required": [ + "RoutingRuleId", + "DomainName", + "Actions", + "Priority", + "Conditions" + ] + }, + "PutRoutingRuleResponse": { + "type": "structure", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions", + "documentation": "

    The routing rule action.

    " + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions", + "documentation": "

    The conditions of the routing rule.

    " + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority", + "documentation": "

    The routing rule priority.

    " + }, + "RoutingRuleArn": { + "shape": "Arn", + "locationName": "routingRuleArn", + "documentation": "

    The routing rule ARN.

    " + }, + "RoutingRuleId": { + "shape": "Id", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + } + }, "ReimportApiInput": { "type": "structure", "members": { @@ -7238,6 +7703,190 @@ }, "documentation": "

    Represents a collection of routes.

    " }, + "RoutingMode": { + "type": "string", + "enum": [ + "API_MAPPING_ONLY", + "ROUTING_RULE_ONLY", + "ROUTING_RULE_THEN_API_MAPPING" + ] + }, + "RoutingRule": { + "type": "structure", + "documentation": "

    Represents a routing rule.

    ", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions", + "documentation": "

    The routing rule action.

    " + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions", + "documentation": "

    The routing rule condition.

    " + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority", + "documentation": "

    The routing rule priority.

    " + }, + "RoutingRuleArn": { + "shape": "Arn", + "locationName": "routingRuleArn", + "documentation": "

    The routing rule ARN.

    " + }, + "RoutingRuleId": { + "shape": "Id", + "locationName": "routingRuleId", + "documentation": "

    The routing rule ID.

    " + } + } + }, + "RoutingRuleAction": { + "type": "structure", + "members": { + "InvokeApi": { + "shape": "RoutingRuleActionInvokeApi", + "locationName": "invokeApi" + } + }, + "documentation": "

    The routing rule action.

    ", + "required": [ + "InvokeApi" + ] + }, + "RoutingRuleActionInvokeApi": { + "type": "structure", + "members": { + "ApiId": { + "shape": "Id", + "locationName": "apiId" + }, + "Stage": { + "shape": "StringWithLengthBetween1And128", + "locationName": "stage" + }, + "StripBasePath": { + "shape": "__boolean", + "locationName": "stripBasePath", + "documentation": "

    The strip base path setting.

    " + } + }, + "documentation": "

    Represents an InvokeApi action.

    ", + "required": [ + "Stage", + "ApiId" + ] + }, + "RoutingRuleCondition": { + "type": "structure", + "documentation": "

    Represents a routing rule condition.

    ", + "members": { + "MatchBasePaths": { + "shape": "RoutingRuleMatchBasePaths", + "locationName": "matchBasePaths", + "documentation": "

    The base path to be matched.

    " + }, + "MatchHeaders": { + "shape": "RoutingRuleMatchHeaders", + "locationName": "matchHeaders", + "documentation": "

    The headers to be matched.

    " + } + } + }, + "RoutingRuleInput": { + "type": "structure", + "members": { + "Actions": { + "shape": "__listOfRoutingRuleAction", + "locationName": "actions" + }, + "Conditions": { + "shape": "__listOfRoutingRuleCondition", + "locationName": "conditions" + }, + "Priority": { + "shape": "RoutingRulePriority", + "locationName": "priority" + } + }, + "required": [ + "Actions", + "Priority", + "Conditions" + ] + }, + "RoutingRuleMatchBasePaths": { + "type": "structure", + "members": { + "AnyOf": { + "shape": "__listOfSelectionKey", + "locationName": "anyOf", + "documentation": "The string of the case sensitive base path to be matched." + } + }, + "documentation": "

    Represents a MatchBasePaths condition.

    ", + "required": [ + "AnyOf" + ] + }, + "RoutingRuleMatchHeaderValue": { + "type": "structure", + "documentation": "

    Represents a MatchHeaderValue.

    ", + "members": { + "Header": { + "shape": "SelectionKey", + "locationName": "header" + }, + "ValueGlob": { + "shape": "SelectionExpression", + "locationName": "valueGlob" + } + }, + "required": [ + "ValueGlob", + "Header" + ] + }, + "RoutingRuleMatchHeaders": { + "type": "structure", + "documentation": "

    Represents a MatchHeaders condition.

    ", + "members": { + "AnyOf": { + "shape": "__listOfRoutingRuleMatchHeaderValue", + "locationName": "anyOf", + "documentation": "

    The header name and header value glob to be matched. The matchHeaders condition is matched if any of the header name and header value globs are matched.

    " + } + }, + "required": [ + "AnyOf" + ] + }, + "RoutingRulePriority": { + "type": "integer", + "min": 1, + "max": 1000000, + "documentation": "

    The routing rule priority.

    " + }, + "MaxResults": { + "type": "integer", + "min": 1, + "max": 100 + }, + "RoutingRules": { + "type": "structure", + "documentation": "

    A collection of routing rules.

    ", + "members": { + "NextToken": { + "shape": "NextToken", + "locationName": "nextToken" + }, + "RoutingRules": { + "shape": "__listOfRoutingRule", + "locationName": "routingRules" + } + } + }, "SecurityGroupIdList": { "type": "list", "documentation": "

    A list of security group IDs for the VPC link.

    ", @@ -7442,8 +8091,7 @@ }, "TagResourceResponse": { "type": "structure", - "members": { - } + "members": {} }, "Tags": { "type": "map", @@ -8127,6 +8775,11 @@ "shape": "MutualTlsAuthenticationInput", "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " + }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " } }, "documentation": "

    Represents the input parameters for an UpdateDomainName request.

    " @@ -8149,6 +8802,11 @@ "shape": "MutualTlsAuthenticationInput", "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " + }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " } }, "documentation": "

    Updates a DomainName.

    ", @@ -8169,6 +8827,10 @@ "locationName": "domainName", "documentation": "

    The name of the DomainName resource.

    " }, + "DomainNameArn": { + "shape": "Arn", + "locationName": "domainNameArn" + }, "DomainNameConfigurations": { "shape": "DomainNameConfigurations", "locationName": "domainNameConfigurations", @@ -8179,6 +8841,11 @@ "locationName": "mutualTlsAuthentication", "documentation": "

    The mutual TLS authentication configuration for a custom domain name.

    " }, + "RoutingMode": { + "shape": "RoutingMode", + "locationName": "routingMode", + "documentation": "

    The routing mode.

    " + }, "Tags": { "shape": "Tags", "locationName": "tags", @@ -9448,6 +10115,36 @@ "shape": "RouteResponse" } }, + "__listOfRoutingRule": { + "type": "list", + "member": { + "shape": "RoutingRule" + } + }, + "__listOfRoutingRuleAction": { + "type": "list", + "member": { + "shape": "RoutingRuleAction" + } + }, + "__listOfRoutingRuleCondition": { + "type": "list", + "member": { + "shape": "RoutingRuleCondition" + } + }, + "__listOfRoutingRuleMatchHeaderValue": { + "type": "list", + "member": { + "shape": "RoutingRuleMatchHeaderValue" + } + }, + "__listOfSelectionKey": { + "type": "list", + "member": { + "shape": "SelectionKey" + } + }, "__listOfStage": { "type": "list", "member": { diff --git a/services/appconfig/pom.xml b/services/appconfig/pom.xml index 6ba193cfcf17..7243e36ba1bf 100644 --- a/services/appconfig/pom.xml +++ b/services/appconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT appconfig AWS Java SDK :: Services :: AppConfig diff --git a/services/appconfig/src/main/resources/codegen-resources/customization.config b/services/appconfig/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/appconfig/src/main/resources/codegen-resources/customization.config +++ b/services/appconfig/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/appconfigdata/pom.xml b/services/appconfigdata/pom.xml index 49718361be72..b900f5ecde20 100644 --- a/services/appconfigdata/pom.xml +++ b/services/appconfigdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT appconfigdata AWS Java SDK :: Services :: App Config Data diff --git a/services/appconfigdata/src/main/resources/codegen-resources/customization.config b/services/appconfigdata/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/appconfigdata/src/main/resources/codegen-resources/customization.config +++ b/services/appconfigdata/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/appfabric/pom.xml b/services/appfabric/pom.xml index ff5276ac8844..319919539b89 100644 --- a/services/appfabric/pom.xml +++ b/services/appfabric/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT appfabric AWS Java SDK :: Services :: App Fabric diff --git a/services/appfabric/src/main/resources/codegen-resources/customization.config b/services/appfabric/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/appfabric/src/main/resources/codegen-resources/customization.config +++ b/services/appfabric/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/appflow/pom.xml b/services/appflow/pom.xml index 36b6c7e90560..498cba75199d 100644 --- a/services/appflow/pom.xml +++ b/services/appflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT appflow AWS Java SDK :: Services :: Appflow diff --git a/services/appflow/src/main/resources/codegen-resources/customization.config b/services/appflow/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/appflow/src/main/resources/codegen-resources/customization.config +++ b/services/appflow/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/appintegrations/pom.xml b/services/appintegrations/pom.xml index 85382e28f106..63a59964a74d 100644 --- a/services/appintegrations/pom.xml +++ b/services/appintegrations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT appintegrations AWS Java SDK :: Services :: App Integrations diff --git a/services/appintegrations/src/main/resources/codegen-resources/customization.config b/services/appintegrations/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/appintegrations/src/main/resources/codegen-resources/customization.config +++ b/services/appintegrations/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/applicationautoscaling/pom.xml b/services/applicationautoscaling/pom.xml index eadefbfff932..981c6a1548ce 100644 --- a/services/applicationautoscaling/pom.xml +++ b/services/applicationautoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT applicationautoscaling AWS Java SDK :: Services :: AWS Application Auto Scaling diff --git a/services/applicationautoscaling/src/main/resources/codegen-resources/customization.config b/services/applicationautoscaling/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/applicationautoscaling/src/main/resources/codegen-resources/customization.config +++ b/services/applicationautoscaling/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/applicationautoscaling/src/main/resources/codegen-resources/service-2.json b/services/applicationautoscaling/src/main/resources/codegen-resources/service-2.json index 79aff80cdc22..0cabbb26245f 100644 --- a/services/applicationautoscaling/src/main/resources/codegen-resources/service-2.json +++ b/services/applicationautoscaling/src/main/resources/codegen-resources/service-2.json @@ -359,8 +359,7 @@ }, "DeleteScalingPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteScheduledActionRequest":{ "type":"structure", @@ -391,8 +390,7 @@ }, "DeleteScheduledActionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeregisterScalableTargetRequest":{ "type":"structure", @@ -418,8 +416,7 @@ }, "DeregisterScalableTargetResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeScalableTargetsRequest":{ "type":"structure", @@ -1128,7 +1125,7 @@ "documentation":"

    A label that uniquely identifies a target group.

    " } }, - "documentation":"

    Describes a load metric for a predictive scaling policy.

    When returned in the output of DescribePolicies, it indicates that a predictive scaling policy uses individually specified load and scaling metrics instead of a metric pair.

    " + "documentation":"

    Describes a load metric for a predictive scaling policy.

    When returned in the output of DescribePolicies, it indicates that a predictive scaling policy uses individually specified load and scaling metrics instead of a metric pair.

    The following predefined metrics are available for predictive scaling:

    • ECSServiceAverageCPUUtilization

    • ECSServiceAverageMemoryUtilization

    • ECSServiceCPUUtilization

    • ECSServiceMemoryUtilization

    • ECSServiceTotalCPUUtilization

    • ECSServiceTotalMemoryUtilization

    • ALBRequestCount

    • ALBRequestCountPerTarget

    • TotalALBRequestCount

    " }, "PredictiveScalingPredefinedMetricPairSpecification":{ "type":"structure", @@ -1143,7 +1140,7 @@ "documentation":"

    A label that uniquely identifies a specific target group from which to determine the total and average request count.

    " } }, - "documentation":"

    Represents a metric pair for a predictive scaling policy.

    " + "documentation":"

    Represents a metric pair for a predictive scaling policy.

    The following predefined metrics are available for predictive scaling:

    • ECSServiceAverageCPUUtilization

    • ECSServiceAverageMemoryUtilization

    • ECSServiceCPUUtilization

    • ECSServiceMemoryUtilization

    • ECSServiceTotalCPUUtilization

    • ECSServiceTotalMemoryUtilization

    • ALBRequestCount

    • ALBRequestCountPerTarget

    • TotalALBRequestCount

    " }, "PredictiveScalingPredefinedScalingMetricSpecification":{ "type":"structure", @@ -1158,7 +1155,7 @@ "documentation":"

    A label that uniquely identifies a specific target group from which to determine the average request count.

    " } }, - "documentation":"

    Describes a scaling metric for a predictive scaling policy.

    When returned in the output of DescribePolicies, it indicates that a predictive scaling policy uses individually specified load and scaling metrics instead of a metric pair.

    " + "documentation":"

    Describes a scaling metric for a predictive scaling policy.

    When returned in the output of DescribePolicies, it indicates that a predictive scaling policy uses individually specified load and scaling metrics instead of a metric pair.

    The following predefined metrics are available for predictive scaling:

    • ECSServiceAverageCPUUtilization

    • ECSServiceAverageMemoryUtilization

    • ECSServiceCPUUtilization

    • ECSServiceMemoryUtilization

    • ECSServiceTotalCPUUtilization

    • ECSServiceTotalMemoryUtilization

    • ALBRequestCount

    • ALBRequestCountPerTarget

    • TotalALBRequestCount

    " }, "PredictiveScalingSchedulingBufferTime":{ "type":"integer", @@ -1271,8 +1268,7 @@ }, "PutScheduledActionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "RegisterScalableTargetRequest":{ "type":"structure", @@ -1795,8 +1791,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -1989,8 +1984,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ValidationException":{ "type":"structure", diff --git a/services/applicationcostprofiler/pom.xml b/services/applicationcostprofiler/pom.xml index 640e6727291d..9a1b9e1b324d 100644 --- a/services/applicationcostprofiler/pom.xml +++ b/services/applicationcostprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT applicationcostprofiler AWS Java SDK :: Services :: Application Cost Profiler diff --git a/services/applicationcostprofiler/src/main/resources/codegen-resources/customization.config b/services/applicationcostprofiler/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/applicationcostprofiler/src/main/resources/codegen-resources/customization.config +++ b/services/applicationcostprofiler/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/applicationdiscovery/pom.xml b/services/applicationdiscovery/pom.xml index 6c8d0e9fe508..f9d78734e54f 100644 --- a/services/applicationdiscovery/pom.xml +++ b/services/applicationdiscovery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT applicationdiscovery AWS Java SDK :: Services :: AWS Application Discovery Service diff --git a/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config b/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config index e81d0e1aab6a..dfddca8bbd59 100644 --- a/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config +++ b/services/applicationdiscovery/src/main/resources/codegen-resources/customization.config @@ -17,6 +17,5 @@ "DescribeExportConfigurations", "ExportConfigurations" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/applicationinsights/pom.xml b/services/applicationinsights/pom.xml index 314333b162be..ac8c0754cda7 100644 --- a/services/applicationinsights/pom.xml +++ b/services/applicationinsights/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT applicationinsights AWS Java SDK :: Services :: Application Insights diff --git a/services/applicationinsights/src/main/resources/codegen-resources/customization.config b/services/applicationinsights/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/applicationinsights/src/main/resources/codegen-resources/customization.config +++ b/services/applicationinsights/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/applicationsignals/pom.xml b/services/applicationsignals/pom.xml index ea825b05a81b..c0bedeb97a3e 100644 --- a/services/applicationsignals/pom.xml +++ b/services/applicationsignals/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT applicationsignals AWS Java SDK :: Services :: Application Signals diff --git a/services/applicationsignals/src/main/resources/codegen-resources/customization.config b/services/applicationsignals/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/applicationsignals/src/main/resources/codegen-resources/customization.config +++ b/services/applicationsignals/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/appmesh/pom.xml b/services/appmesh/pom.xml index 83e1faa80041..78e6e09b4720 100644 --- a/services/appmesh/pom.xml +++ b/services/appmesh/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT appmesh AWS Java SDK :: Services :: App Mesh diff --git a/services/appmesh/src/main/resources/codegen-resources/customization.config b/services/appmesh/src/main/resources/codegen-resources/customization.config index 24ce7b081e76..e7aad41e84d7 100644 --- a/services/appmesh/src/main/resources/codegen-resources/customization.config +++ b/services/appmesh/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listMeshes" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/apprunner/pom.xml b/services/apprunner/pom.xml index 0ef473b3a2d1..5acc03a862c0 100644 --- a/services/apprunner/pom.xml +++ b/services/apprunner/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT apprunner AWS Java SDK :: Services :: App Runner diff --git a/services/apprunner/src/main/resources/codegen-resources/customization.config b/services/apprunner/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/apprunner/src/main/resources/codegen-resources/customization.config +++ b/services/apprunner/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/appstream/pom.xml b/services/appstream/pom.xml index 073898c15477..96469598bc68 100644 --- a/services/appstream/pom.xml +++ b/services/appstream/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT appstream AWS Java SDK :: Services :: Amazon AppStream diff --git a/services/appstream/src/main/resources/codegen-resources/customization.config b/services/appstream/src/main/resources/codegen-resources/customization.config index ea44408284cb..8f6624671cda 100644 --- a/services/appstream/src/main/resources/codegen-resources/customization.config +++ b/services/appstream/src/main/resources/codegen-resources/customization.config @@ -10,6 +10,5 @@ "describeImages", "describeStacks" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/appsync/pom.xml b/services/appsync/pom.xml index 430bd1c32378..a8d65197c801 100644 --- a/services/appsync/pom.xml +++ b/services/appsync/pom.xml @@ -21,7 +21,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT appsync diff --git a/services/appsync/src/main/resources/codegen-resources/customization.config b/services/appsync/src/main/resources/codegen-resources/customization.config index 98c20cfa5d4e..1ab9852214b6 100644 --- a/services/appsync/src/main/resources/codegen-resources/customization.config +++ b/services/appsync/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listGraphqlApis" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/appsync/src/main/resources/codegen-resources/service-2.json b/services/appsync/src/main/resources/codegen-resources/service-2.json index 2d2ad5fce51b..723f353a0db8 100644 --- a/services/appsync/src/main/resources/codegen-resources/service-2.json +++ b/services/appsync/src/main/resources/codegen-resources/service-2.json @@ -1964,11 +1964,17 @@ }, "transitEncryptionEnabled":{ "shape":"Boolean", - "documentation":"

    Transit encryption flag when connecting to cache. You cannot update this setting after creation.

    " + "documentation":"

    Transit encryption flag when connecting to cache. You cannot update this setting after creation.

    ", + "deprecated":true, + "deprecatedMessage":"transitEncryptionEnabled attribute is deprecated. Encryption in transit is always enabled.", + "deprecatedSince":"5/15/2025" }, "atRestEncryptionEnabled":{ "shape":"Boolean", - "documentation":"

    At-rest encryption flag for cache. You cannot update this setting after creation.

    " + "documentation":"

    At-rest encryption flag for cache. You cannot update this setting after creation.

    ", + "deprecated":true, + "deprecatedMessage":"atRestEncryptionEnabled attribute is deprecated. Encryption at rest is always enabled.", + "deprecatedSince":"5/15/2025" }, "apiCachingBehavior":{ "shape":"ApiCachingBehavior", @@ -2690,8 +2696,7 @@ }, "DeleteApiCacheResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the output of a DeleteApiCache operation.

    " }, "DeleteApiKeyRequest":{ @@ -2717,8 +2722,7 @@ }, "DeleteApiKeyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteApiRequest":{ "type":"structure", @@ -2734,8 +2738,7 @@ }, "DeleteApiResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteChannelNamespaceRequest":{ "type":"structure", @@ -2760,8 +2763,7 @@ }, "DeleteChannelNamespaceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDataSourceRequest":{ "type":"structure", @@ -2786,8 +2788,7 @@ }, "DeleteDataSourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDomainNameRequest":{ "type":"structure", @@ -2803,8 +2804,7 @@ }, "DeleteDomainNameResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteFunctionRequest":{ "type":"structure", @@ -2829,8 +2829,7 @@ }, "DeleteFunctionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteGraphqlApiRequest":{ "type":"structure", @@ -2846,8 +2845,7 @@ }, "DeleteGraphqlApiResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteResolverRequest":{ "type":"structure", @@ -2879,8 +2877,7 @@ }, "DeleteResolverResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteTypeRequest":{ "type":"structure", @@ -2905,8 +2902,7 @@ }, "DeleteTypeResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeltaSyncConfig":{ "type":"structure", @@ -2946,8 +2942,7 @@ }, "DisassociateApiResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateMergedGraphqlApiRequest":{ "type":"structure", @@ -3359,8 +3354,7 @@ }, "FlushApiCacheResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the output of a FlushApiCache operation.

    " }, "FunctionConfiguration":{ @@ -5294,8 +5288,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -5379,8 +5372,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateApiCacheRequest":{ "type":"structure", diff --git a/services/apptest/pom.xml b/services/apptest/pom.xml index d48249bc21f7..d4ddeb21cda4 100644 --- a/services/apptest/pom.xml +++ b/services/apptest/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT apptest AWS Java SDK :: Services :: App Test diff --git a/services/apptest/src/main/resources/codegen-resources/customization.config b/services/apptest/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/apptest/src/main/resources/codegen-resources/customization.config +++ b/services/apptest/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/arczonalshift/pom.xml b/services/arczonalshift/pom.xml index 4113a461b230..6873c078467e 100644 --- a/services/arczonalshift/pom.xml +++ b/services/arczonalshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT arczonalshift AWS Java SDK :: Services :: ARC Zonal Shift diff --git a/services/arczonalshift/src/main/resources/codegen-resources/customization.config b/services/arczonalshift/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/arczonalshift/src/main/resources/codegen-resources/customization.config +++ b/services/arczonalshift/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/arczonalshift/src/main/resources/codegen-resources/service-2.json b/services/arczonalshift/src/main/resources/codegen-resources/service-2.json index cfc2ffa374c5..6156d1875361 100644 --- a/services/arczonalshift/src/main/resources/codegen-resources/service-2.json +++ b/services/arczonalshift/src/main/resources/codegen-resources/service-2.json @@ -2,18 +2,37 @@ "version":"2.0", "metadata":{ "apiVersion":"2022-10-30", + "auth":["aws.auth#sigv4"], "endpointPrefix":"arc-zonal-shift", - "jsonVersion":"1.1", "protocol":"rest-json", "protocols":["rest-json"], "serviceFullName":"AWS ARC - Zonal Shift", "serviceId":"ARC Zonal Shift", "signatureVersion":"v4", "signingName":"arc-zonal-shift", - "uid":"arc-zonal-shift-2022-10-30", - "auth":["aws.auth#sigv4"] + "uid":"arc-zonal-shift-2022-10-30" }, "operations":{ + "CancelPracticeRun":{ + "name":"CancelPracticeRun", + "http":{ + "method":"DELETE", + "requestUri":"/practiceruns/{zonalShiftId}", + "responseCode":200 + }, + "input":{"shape":"CancelPracticeRunRequest"}, + "output":{"shape":"CancelPracticeRunResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Cancel an in-progress practice run zonal shift in Amazon Application Recovery Controller.

    ", + "idempotent":true + }, "CancelZonalShift":{ "name":"CancelZonalShift", "http":{ @@ -28,10 +47,10 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], - "documentation":"

    Cancel a zonal shift in Amazon Route 53 Application Recovery Controller. To cancel the zonal shift, specify the zonal shift ID.

    A zonal shift can be one that you've started for a resource in your Amazon Web Services account in an Amazon Web Services Region, or it can be a zonal shift started by a practice run with zonal autoshift.

    " + "documentation":"

    Cancel a zonal shift in Amazon Application Recovery Controller. To cancel the zonal shift, specify the zonal shift ID.

    A zonal shift can be one that you've started for a resource in your Amazon Web Services account in an Amazon Web Services Region, or it can be a zonal shift started by a practice run with zonal autoshift.

    " }, "CreatePracticeRunConfiguration":{ "name":"CreatePracticeRunConfiguration", @@ -47,10 +66,10 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], - "documentation":"

    A practice run configuration for zonal autoshift is required when you enable zonal autoshift. A practice run configuration includes specifications for blocked dates and blocked time windows, and for Amazon CloudWatch alarms that you create to use with practice runs. The alarms that you specify are an outcome alarm, to monitor application health during practice runs and, optionally, a blocking alarm, to block practice runs from starting.

    When a resource has a practice run configuration, ARC starts zonal shifts for the resource weekly, to shift traffic for practice runs. Practice runs help you to ensure that shifting away traffic from an Availability Zone during an autoshift is safe for your application.

    For more information, see Considerations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.

    " + "documentation":"

    A practice run configuration for zonal autoshift is required when you enable zonal autoshift. A practice run configuration includes specifications for blocked dates and blocked time windows, and for Amazon CloudWatch alarms that you create to use with practice runs. The alarms that you specify are an outcome alarm, to monitor application health during practice runs and, optionally, a blocking alarm, to block practice runs from starting.

    When a resource has a practice run configuration, ARC starts zonal shifts for the resource weekly, to shift traffic for practice runs. Practice runs help you to ensure that shifting away traffic from an Availability Zone during an autoshift is safe for your application.

    For more information, see Considerations when you configure zonal autoshift in the Amazon Application Recovery Controller Developer Guide.

    " }, "DeletePracticeRunConfiguration":{ "name":"DeletePracticeRunConfiguration", @@ -66,8 +85,8 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], "documentation":"

    Deletes the practice run configuration for a resource. Before you can delete a practice run configuration for a resource., you must disable zonal autoshift for the resource. Practice runs must be configured for zonal autoshift to be enabled.

    ", "idempotent":true @@ -101,10 +120,10 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], - "documentation":"

    Get information about a resource that's been registered for zonal shifts with Amazon Route 53 Application Recovery Controller in this Amazon Web Services Region. Resources that are registered for zonal shifts are managed resources in ARC. You can start zonal shifts and configure zonal autoshift for managed resources.

    " + "documentation":"

    Get information about a resource that's been registered for zonal shifts with Amazon Application Recovery Controller in this Amazon Web Services Region. Resources that are registered for zonal shifts are managed resources in ARC. You can start zonal shifts and configure zonal autoshift for managed resources.

    " }, "ListAutoshifts":{ "name":"ListAutoshifts", @@ -118,8 +137,8 @@ "errors":[ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], "documentation":"

    Returns the autoshifts for an Amazon Web Services Region. By default, the call returns only ACTIVE autoshifts. Optionally, you can specify the status parameter to return COMPLETED autoshifts.

    " }, @@ -135,10 +154,10 @@ "errors":[ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], - "documentation":"

    Lists all the resources in your Amazon Web Services account in this Amazon Web Services Region that are managed for zonal shifts in Amazon Route 53 Application Recovery Controller, and information about them. The information includes the zonal autoshift status for the resource, as well as the Amazon Resource Name (ARN), the Availability Zones that each resource is deployed in, and the resource name.

    " + "documentation":"

    Lists all the resources in your Amazon Web Services account in this Amazon Web Services Region that are managed for zonal shifts in Amazon Application Recovery Controller, and information about them. The information includes the zonal autoshift status for the resource, as well as the Amazon Resource Name (ARN), the Availability Zones that each resource is deployed in, and the resource name.

    " }, "ListZonalShifts":{ "name":"ListZonalShifts", @@ -152,10 +171,29 @@ "errors":[ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], - "documentation":"

    Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region.

    " + "documentation":"

    Lists all active and completed zonal shifts in Amazon Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region. ListZonalShifts returns customer-initiated zonal shifts, as well as practice run zonal shifts that ARC started on your behalf for zonal autoshift.

    For more information about listing autoshifts, see \">ListAutoshifts.

    " + }, + "StartPracticeRun":{ + "name":"StartPracticeRun", + "http":{ + "method":"POST", + "requestUri":"/practiceruns", + "responseCode":200 + }, + "input":{"shape":"StartPracticeRunRequest"}, + "output":{"shape":"StartPracticeRunResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Start an on-demand practice run zonal shift in Amazon Application Recovery Controller. With zonal autoshift enabled, you can start an on-demand practice run to verify preparedness at any time. Amazon Web Services also runs automated practice runs about weekly when you have enabled zonal autoshift.

    For more information, see Considerations when you configure zonal autoshift in the Amazon Application Recovery Controller Developer Guide.

    " }, "StartZonalShift":{ "name":"StartZonalShift", @@ -171,10 +209,10 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], - "documentation":"

    You start a zonal shift to temporarily move load balancer traffic away from an Availability Zone in an Amazon Web Services Region, to help your application recover immediately, for example, from a developer's bad code deployment or from an Amazon Web Services infrastructure failure in a single Availability Zone. You can start a zonal shift in ARC only for managed resources in your Amazon Web Services account in an Amazon Web Services Region. Resources are automatically registered with ARC by Amazon Web Services services.

    At this time, you can only start a zonal shift for Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

    When you start a zonal shift, traffic for the resource is no longer routed to the Availability Zone. The zonal shift is created immediately in ARC. However, it can take a short time, typically up to a few minutes, for existing, in-progress connections in the Availability Zone to complete.

    For more information, see Zonal shift in the Amazon Route 53 Application Recovery Controller Developer Guide.

    " + "documentation":"

    You start a zonal shift to temporarily move load balancer traffic away from an Availability Zone in an Amazon Web Services Region, to help your application recover immediately, for example, from a developer's bad code deployment or from an Amazon Web Services infrastructure failure in a single Availability Zone. You can start a zonal shift in ARC only for managed resources in your Amazon Web Services account in an Amazon Web Services Region. Resources are automatically registered with ARC by Amazon Web Services services.

    Amazon Application Recovery Controller currently supports enabling the following resources for zonal shift and zonal autoshift:

    When you start a zonal shift, traffic for the resource is no longer routed to the Availability Zone. The zonal shift is created immediately in ARC. However, it can take a short time, typically up to a few minutes, for existing, in-progress connections in the Availability Zone to complete.

    For more information, see Zonal shift in the Amazon Application Recovery Controller Developer Guide.

    " }, "UpdateAutoshiftObserverNotificationStatus":{ "name":"UpdateAutoshiftObserverNotificationStatus", @@ -188,10 +226,10 @@ "errors":[ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], - "documentation":"

    Update the status of autoshift observer notification. Autoshift observer notification enables you to be notified, through Amazon EventBridge, when there is an autoshift event for zonal autoshift.

    If the status is ENABLED, ARC includes all autoshift events when you use the EventBridge pattern Autoshift In Progress. When the status is DISABLED, ARC includes only autoshift events for autoshifts when one or more of your resources is included in the autoshift.

    For more information, see Notifications for practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller Developer Guide.

    ", + "documentation":"

    Update the status of autoshift observer notification. Autoshift observer notification enables you to be notified, through Amazon EventBridge, when there is an autoshift event for zonal autoshift.

    If the status is ENABLED, ARC includes all autoshift events when you use the EventBridge pattern Autoshift In Progress. When the status is DISABLED, ARC includes only autoshift events for autoshifts when one or more of your resources is included in the autoshift.

    For more information, see Notifications for practice runs and autoshifts in the Amazon Application Recovery Controller Developer Guide.

    ", "idempotent":true }, "UpdatePracticeRunConfiguration":{ @@ -208,8 +246,8 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], "documentation":"

    Update a practice run configuration to change one or more of the following: add, change, or remove the blocking alarm; change the outcome alarm; or add, change, or remove blocking dates or time windows.

    " }, @@ -227,10 +265,10 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], - "documentation":"

    The zonal autoshift configuration for a resource includes the practice run configuration and the status for running autoshifts, zonal autoshift status. When a resource has a practice run configuation, Route 53 ARC starts weekly zonal shifts for the resource, to shift traffic away from an Availability Zone. Weekly practice runs help you to make sure that your application can continue to operate normally with the loss of one Availability Zone.

    You can update the zonal autoshift autoshift status to enable or disable zonal autoshift. When zonal autoshift is ENABLED, you authorize Amazon Web Services to shift away resource traffic for an application from an Availability Zone during events, on your behalf, to help reduce time to recovery. Traffic is also shifted away for the required weekly practice runs.

    ", + "documentation":"

    The zonal autoshift configuration for a resource includes the practice run configuration and the status for running autoshifts, zonal autoshift status. When a resource has a practice run configuation, ARC starts weekly zonal shifts for the resource, to shift traffic away from an Availability Zone. Weekly practice runs help you to make sure that your application can continue to operate normally with the loss of one Availability Zone.

    You can update the zonal autoshift autoshift status to enable or disable zonal autoshift. When zonal autoshift is ENABLED, you authorize Amazon Web Services to shift away resource traffic for an application from an Availability Zone during events, on your behalf, to help reduce time to recovery. Traffic is also shifted away for the required weekly practice runs.

    ", "idempotent":true }, "UpdateZonalShift":{ @@ -247,10 +285,10 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], - "documentation":"

    Update an active zonal shift in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account. You can update a zonal shift to set a new expiration, or edit or replace the comment for the zonal shift.

    " + "documentation":"

    Update an active zonal shift in Amazon Application Recovery Controller in your Amazon Web Services account. You can update a zonal shift to set a new expiration, or edit or replace the comment for the zonal shift.

    " } }, "shapes":{ @@ -302,7 +340,7 @@ "members":{ "appliedStatus":{ "shape":"AutoshiftAppliedStatus", - "documentation":"

    The appliedStatus field specifies which application traffic shift is in effect for a resource when there is more than one active traffic shift. There can be more than one application traffic shift in progress at the same time - that is, practice run zonal shifts, customer-initiated zonal shifts, or an autoshift. The appliedStatus field for a shift that is in progress for a resource can have one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift that is currently in effect for the resource has an appliedStatus set to APPLIED.

    The overall principle for precedence is that zonal shifts that you start as a customer take precedence autoshifts, which take precedence over practice runs. That is, customer-initiated zonal shifts > autoshifts > practice run zonal shifts.

    For more information, see How zonal autoshift and practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

    " + "documentation":"

    The appliedStatus field specifies which application traffic shift is in effect for a resource when there is more than one active traffic shift. There can be more than one application traffic shift in progress at the same time - that is, practice run zonal shifts, customer-initiated zonal shifts, or an autoshift. The appliedStatus field for a shift that is in progress for a resource can have one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift that is currently in effect for the resource has an appliedStatus set to APPLIED.

    The overall principle for precedence is that zonal shifts that you start as a customer take precedence autoshifts, which take precedence over practice runs. That is, customer-initiated zonal shifts > autoshifts > practice run zonal shifts.

    For more information, see How zonal autoshift and practice runs work in the Amazon Application Recovery Controller Developer Guide.

    " }, "awayFrom":{ "shape":"AvailabilityZone", @@ -313,7 +351,7 @@ "documentation":"

    The time (UTC) when the autoshift started.

    " } }, - "documentation":"

    A complex structure that lists an autoshift that is currently active for a managed resource and information about the autoshift.

    For more information, see How zonal autoshift and practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

    " + "documentation":"

    A complex structure that lists an autoshift that is currently active for a managed resource and information about the autoshift.

    For more information, see How zonal autoshift and practice runs work in the Amazon Application Recovery Controller Developer Guide.

    " }, "AutoshiftObserverNotificationStatus":{ "type":"string", @@ -351,7 +389,7 @@ "documentation":"

    The status for an autoshift.

    " } }, - "documentation":"

    Information about an autoshift. Amazon Web Services starts an autoshift to temporarily move traffic for a resource away from an Availability Zone in an Amazon Web Services Region when Amazon Web Services determines that there's an issue in the Availability Zone that could potentially affect customers. You can configure zonal autoshift in ARC for managed resources in your Amazon Web Services account in a Region. Supported Amazon Web Services resources are automatically registered with ARC.

    Autoshifts are temporary. When the Availability Zone recovers, Amazon Web Services ends the autoshift, and traffic for the resource is no longer directed to the other Availability Zones in the Region.

    " + "documentation":"

    Information about an autoshift. Amazon Web Services starts an autoshift to temporarily move traffic for a resource away from an Availability Zone in an Amazon Web Services Region when Amazon Web Services determines that there's an issue in the Availability Zone that could potentially affect customers. You can configure zonal autoshift in ARC for managed resources in your Amazon Web Services account in a Region. Supported Amazon Web Services resources are automatically registered with ARC.

    Autoshifts are temporary. When the Availability Zone recovers, Amazon Web Services ends the autoshift, and traffic for the resource is no longer directed to the other Availability Zones in the Region.

    You can stop an autoshift for a resource by disabling zonal autoshift.

    " }, "AutoshiftsInResource":{ "type":"list", @@ -370,7 +408,7 @@ "type":"string", "max":10, "min":10, - "pattern":"^[0-9]{4}-[0-9]{2}-[0-9]{2}$" + "pattern":"[0-9]{4}-[0-9]{2}-[0-9]{2}" }, "BlockedDates":{ "type":"list", @@ -382,7 +420,7 @@ "type":"string", "max":19, "min":19, - "pattern":"^(Mon|Tue|Wed|Thu|Fri|Sat|Sun):[0-9]{2}:[0-9]{2}-(Mon|Tue|Wed|Thu|Fri|Sat|Sun):[0-9]{2}:[0-9]{2}$" + "pattern":"(Mon|Tue|Wed|Thu|Fri|Sat|Sun):[0-9]{2}:[0-9]{2}-(Mon|Tue|Wed|Thu|Fri|Sat|Sun):[0-9]{2}:[0-9]{2}" }, "BlockedWindows":{ "type":"list", @@ -390,6 +428,60 @@ "max":15, "min":0 }, + "CancelPracticeRunRequest":{ + "type":"structure", + "required":["zonalShiftId"], + "members":{ + "zonalShiftId":{ + "shape":"ZonalShiftId", + "documentation":"

    The identifier of a practice run zonal shift in Amazon Application Recovery Controller that you want to cancel.

    ", + "location":"uri", + "locationName":"zonalShiftId" + } + } + }, + "CancelPracticeRunResponse":{ + "type":"structure", + "required":[ + "zonalShiftId", + "resourceIdentifier", + "awayFrom", + "expiryTime", + "startTime", + "status", + "comment" + ], + "members":{ + "zonalShiftId":{ + "shape":"ZonalShiftId", + "documentation":"

    The identifier of the practice run zonal shift in Amazon Application Recovery Controller that was canceled.

    " + }, + "resourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

    The identifier for the resource that you canceled a practice run zonal shift for. The identifier is the Amazon Resource Name (ARN) for the resource.

    " + }, + "awayFrom":{ + "shape":"AvailabilityZone", + "documentation":"

    The Availability Zone (for example, use1-az1) that traffic was moved away from for a resource that you specified for the practice run.

    " + }, + "expiryTime":{ + "shape":"ExpiryTime", + "documentation":"

    The expiry time (expiration time) for an on-demand practice run zonal shift is 30 minutes from the time when you start the practice run, unless you cancel it before that time. However, be aware that the expiryTime field for practice run zonal shifts always has a value of 1 minute.

    " + }, + "startTime":{ + "shape":"StartTime", + "documentation":"

    The time (UTC) when the zonal shift starts.

    " + }, + "status":{ + "shape":"ZonalShiftStatus", + "documentation":"

    A status for the practice run that you canceled (expected status is CANCELED).

    The Status for a practice run zonal shift can have one of the following values:

    " + }, + "comment":{ + "shape":"ZonalShiftComment", + "documentation":"

    The initial comment that you entered about the practice run. Be aware that this comment can be overwritten by Amazon Web Services if the automatic check for balanced capacity fails. For more information, see Capacity checks for practice runs in the Amazon Application Recovery Controller Developer Guide.

    " + } + } + }, "CancelZonalShiftRequest":{ "type":"structure", "required":["zonalShiftId"], @@ -435,26 +527,30 @@ "PracticeConfigurationAlreadyExists", "AutoShiftEnabled", "PracticeConfigurationDoesNotExist", - "ZonalAutoshiftActive" + "ZonalAutoshiftActive", + "PracticeOutcomeAlarmsRed", + "PracticeBlockingAlarmsRed", + "PracticeInBlockedDates", + "PracticeInBlockedWindows" ] }, "ControlCondition":{ "type":"structure", "required":[ - "alarmIdentifier", - "type" + "type", + "alarmIdentifier" ], "members":{ - "alarmIdentifier":{ - "shape":"MetricIdentifier", - "documentation":"

    The Amazon Resource Name (ARN) for an Amazon CloudWatch alarm that you specify as a control condition for a practice run.

    " - }, "type":{ "shape":"ControlConditionType", "documentation":"

    The type of alarm specified for a practice run. You can only specify Amazon CloudWatch alarms for practice runs, so the only valid value is CLOUDWATCH.

    " + }, + "alarmIdentifier":{ + "shape":"MetricIdentifier", + "documentation":"

    The Amazon Resource Name (ARN) for an Amazon CloudWatch alarm that you specify as a control condition for a practice run.

    " } }, - "documentation":"

    A control condition is an alarm that you specify for a practice run. When you configure practice runs with zonal autoshift for a resource, you specify Amazon CloudWatch alarms, which you create in CloudWatch to use with the practice run. The alarms that you specify are an outcome alarm, to monitor application health during practice runs and, optionally, a blocking alarm, to block practice runs from starting or to interrupt a practice run in progress.

    Control condition alarms do not apply for autoshifts.

    For more information, see Considerations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.

    " + "documentation":"

    A control condition is an alarm that you specify for a practice run. When you configure practice runs with zonal autoshift for a resource, you specify Amazon CloudWatch alarms, which you create in CloudWatch to use with the practice run. The alarms that you specify are an outcome alarm, to monitor application health during practice runs and, optionally, a blocking alarm, to block practice runs from starting or to interrupt a practice run in progress.

    Control condition alarms do not apply for autoshifts.

    For more information, see Considerations when you configure zonal autoshift in the Amazon Application Recovery Controller Developer Guide.

    " }, "ControlConditionType":{ "type":"string", @@ -469,18 +565,22 @@ "CreatePracticeRunConfigurationRequest":{ "type":"structure", "required":[ - "outcomeAlarms", - "resourceIdentifier" + "resourceIdentifier", + "outcomeAlarms" ], "members":{ - "blockedDates":{ - "shape":"BlockedDates", - "documentation":"

    Optionally, you can block ARC from starting practice runs for a resource on specific calendar dates.

    The format for blocked dates is: YYYY-MM-DD. Keep in mind, when you specify dates, that dates and times for practice runs are in UTC. Separate multiple blocked dates with spaces.

    For example, if you have an application update scheduled to launch on May 1, 2024, and you don't want practice runs to shift traffic away at that time, you could set a blocked date for 2024-05-01.

    " + "resourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

    The identifier of the resource that Amazon Web Services shifts traffic for with a practice run zonal shift. The identifier is the Amazon Resource Name (ARN) for the resource.

    Amazon Application Recovery Controller currently supports enabling the following resources for zonal shift and zonal autoshift:

    " }, "blockedWindows":{ "shape":"BlockedWindows", "documentation":"

    Optionally, you can block ARC from starting practice runs for specific windows of days and times.

    The format for blocked windows is: DAY:HH:SS-DAY:HH:SS. Keep in mind, when you specify dates, that dates and times for practice runs are in UTC. Also, be aware of potential time adjustments that might be required for daylight saving time differences. Separate multiple blocked windows with spaces.

    For example, say you run business report summaries three days a week. For this scenario, you might set the following recurring days and times as blocked windows, for example: MON-20:30-21:30 WED-20:30-21:30 FRI-20:30-21:30.

    " }, + "blockedDates":{ + "shape":"BlockedDates", + "documentation":"

    Optionally, you can block ARC from starting practice runs for a resource on specific calendar dates.

    The format for blocked dates is: YYYY-MM-DD. Keep in mind, when you specify dates, that dates and times for practice runs are in UTC. Separate multiple blocked dates with spaces.

    For example, if you have an application update scheduled to launch on May 1, 2024, and you don't want practice runs to shift traffic away at that time, you could set a blocked date for 2024-05-01.

    " + }, "blockingAlarms":{ "shape":"ControlConditions", "documentation":"

    An Amazon CloudWatch alarm that you can specify for zonal autoshift practice runs. This alarm blocks ARC from starting practice run zonal shifts, and ends a practice run that's in progress, when the alarm is in an ALARM state.

    " @@ -488,10 +588,6 @@ "outcomeAlarms":{ "shape":"ControlConditions", "documentation":"

    The outcome alarm for practice runs is a required Amazon CloudWatch alarm that you specify that ends a practice run when the alarm is in an ALARM state.

    Configure the alarm to monitor the health of your application when traffic is shifted away from an Availability Zone during each practice run. You should configure the alarm to go into an ALARM state if your application is impacted by the zonal shift, and you want to stop the zonal shift, to let traffic for the resource return to the Availability Zone.

    " - }, - "resourceIdentifier":{ - "shape":"ResourceIdentifier", - "documentation":"

    The identifier of the resource that Amazon Web Services shifts traffic for with a practice run zonal shift. The identifier is the Amazon Resource Name (ARN) for the resource.

    At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

    " } } }, @@ -500,8 +596,8 @@ "required":[ "arn", "name", - "practiceRunConfiguration", - "zonalAutoshiftStatus" + "zonalAutoshiftStatus", + "practiceRunConfiguration" ], "members":{ "arn":{ @@ -512,13 +608,13 @@ "shape":"ResourceName", "documentation":"

    The name of the resource that you configured the practice run for.

    " }, - "practiceRunConfiguration":{ - "shape":"PracticeRunConfiguration", - "documentation":"

    A practice run configuration for a resource. Configurations include the outcome alarm that you specify for practice runs, and, optionally, a blocking alarm and blocking dates and windows.

    " - }, "zonalAutoshiftStatus":{ "shape":"ZonalAutoshiftStatus", "documentation":"

    The status for zonal autoshift for a resource. When you specify ENABLED for the autoshift status, Amazon Web Services shifts traffic away from shifts away application resource traffic from an Availability Zone, on your behalf, when internal telemetry indicates that there is an Availability Zone impairment that could potentially impact customers.

    When you enable zonal autoshift, you must also configure practice runs for the resource.

    " + }, + "practiceRunConfiguration":{ + "shape":"PracticeRunConfiguration", + "documentation":"

    A practice run configuration for a resource. Configurations include the outcome alarm that you specify for practice runs, and, optionally, a blocking alarm and blocking dates and windows.

    " } } }, @@ -560,13 +656,12 @@ "type":"string", "max":5, "min":2, - "pattern":"^([1-9][0-9]*)(m|h)$" + "pattern":"([1-9][0-9]*)(m|h)" }, "ExpiryTime":{"type":"timestamp"}, "GetAutoshiftObserverNotificationStatusRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetAutoshiftObserverNotificationStatusResponse":{ "type":"structure", @@ -584,7 +679,7 @@ "members":{ "resourceIdentifier":{ "shape":"ResourceIdentifier", - "documentation":"

    The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

    At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

    ", + "documentation":"

    The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

    Amazon Application Recovery Controller currently supports enabling the following resources for zonal shift and zonal autoshift:

    ", "location":"uri", "locationName":"resourceIdentifier" } @@ -597,22 +692,26 @@ "zonalShifts" ], "members":{ + "arn":{ + "shape":"ResourceArn", + "documentation":"

    The Amazon Resource Name (ARN) for the resource.

    " + }, + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the resource.

    " + }, "appliedWeights":{ "shape":"AppliedWeights", "documentation":"

    A collection of key-value pairs that indicate whether resources are active in Availability Zones or not. The key name is the Availability Zone where the resource is deployed. The value is 1 or 0.

    " }, - "arn":{ - "shape":"ResourceArn", - "documentation":"

    The Amazon Resource Name (ARN) for the resource.

    " + "zonalShifts":{ + "shape":"ZonalShiftsInResource", + "documentation":"

    The zonal shifts that are currently active for a resource.

    " }, "autoshifts":{ "shape":"AutoshiftsInResource", "documentation":"

    An array of the autoshifts that are active for the resource.

    " }, - "name":{ - "shape":"ResourceName", - "documentation":"

    The name of the resource.

    " - }, "practiceRunConfiguration":{ "shape":"PracticeRunConfiguration", "documentation":"

    The practice run configuration for zonal autoshift that's associated with the resource.

    " @@ -620,10 +719,6 @@ "zonalAutoshiftStatus":{ "shape":"ZonalAutoshiftStatus", "documentation":"

    The status for zonal autoshift for a resource. When the autoshift status is ENABLED, Amazon Web Services shifts traffic for a resource away from an Availability Zone, on your behalf, when Amazon Web Services determines that there's an issue in the Availability Zone that could potentially affect customers.

    " - }, - "zonalShifts":{ - "shape":"ZonalShiftsInResource", - "documentation":"

    The zonal shifts that are currently active for a resource.

    " } } }, @@ -640,12 +735,6 @@ "ListAutoshiftsRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxResults", - "documentation":"

    The number of objects that you want to return with this call.

    ", - "location":"querystring", - "locationName":"maxResults" - }, "nextToken":{ "shape":"String", "documentation":"

    Specifies that you want to receive the next page of results. Valid only if you received a nextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's nextToken response to request the next page of results.

    ", @@ -657,6 +746,12 @@ "documentation":"

    The status of the autoshift.

    ", "location":"querystring", "locationName":"status" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of objects that you want to return with this call.

    ", + "location":"querystring", + "locationName":"maxResults" } } }, @@ -676,17 +771,17 @@ "ListManagedResourcesRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxResults", - "documentation":"

    The number of objects that you want to return with this call.

    ", - "location":"querystring", - "locationName":"maxResults" - }, "nextToken":{ "shape":"String", "documentation":"

    Specifies that you want to receive the next page of results. Valid only if you received a nextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's nextToken response to request the next page of results.

    ", "location":"querystring", "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of objects that you want to return with this call.

    ", + "location":"querystring", + "locationName":"maxResults" } } }, @@ -707,29 +802,29 @@ "ListZonalShiftsRequest":{ "type":"structure", "members":{ - "maxResults":{ - "shape":"MaxResults", - "documentation":"

    The number of objects that you want to return with this call.

    ", - "location":"querystring", - "locationName":"maxResults" - }, "nextToken":{ "shape":"String", "documentation":"

    Specifies that you want to receive the next page of results. Valid only if you received a nextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's nextToken response to request the next page of results.

    ", "location":"querystring", "locationName":"nextToken" }, + "status":{ + "shape":"ZonalShiftStatus", + "documentation":"

    A status for a zonal shift.

    The Status for a zonal shift can have one of the following values:

    • ACTIVE: The zonal shift has been started and is active.

    • EXPIRED: The zonal shift has expired (the expiry time was exceeded).

    • CANCELED: The zonal shift was canceled.

    ", + "location":"querystring", + "locationName":"status" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The number of objects that you want to return with this call.

    ", + "location":"querystring", + "locationName":"maxResults" + }, "resourceIdentifier":{ "shape":"ResourceIdentifier", "documentation":"

    The identifier for the resource that you want to list zonal shifts for. The identifier is the Amazon Resource Name (ARN) for the resource.

    ", "location":"querystring", "locationName":"resourceIdentifier" - }, - "status":{ - "shape":"ZonalShiftStatus", - "documentation":"

    A status for a zonal shift.

    The Status for a zonal shift can have one of the following values:

    • ACTIVE: The zonal shift has been started and active.

    • EXPIRED: The zonal shift has expired (the expiry time was exceeded).

    • CANCELED: The zonal shift was canceled.

    ", - "location":"querystring", - "locationName":"status" } } }, @@ -754,40 +849,40 @@ "type":"structure", "required":["availabilityZones"], "members":{ - "appliedWeights":{ - "shape":"AppliedWeights", - "documentation":"

    A collection of key-value pairs that indicate whether resources are active in Availability Zones or not. The key name is the Availability Zone where the resource is deployed. The value is 1 or 0.

    " - }, "arn":{ "shape":"ResourceArn", "documentation":"

    The Amazon Resource Name (ARN) for the managed resource.

    " }, - "autoshifts":{ - "shape":"AutoshiftsInResource", - "documentation":"

    An array of the autoshifts that have been completed for a resource.

    " + "name":{ + "shape":"ResourceName", + "documentation":"

    The name of the managed resource.

    " }, "availabilityZones":{ "shape":"AvailabilityZones", "documentation":"

    The Availability Zones that a resource is deployed in.

    " }, - "name":{ - "shape":"ResourceName", - "documentation":"

    The name of the managed resource.

    " + "appliedWeights":{ + "shape":"AppliedWeights", + "documentation":"

    A collection of key-value pairs that indicate whether resources are active in Availability Zones or not. The key name is the Availability Zone where the resource is deployed. The value is 1 or 0.

    " }, - "practiceRunStatus":{ - "shape":"ZonalAutoshiftStatus", - "documentation":"

    This status tracks whether a practice run configuration exists for a resource. When you configure a practice run for a resource so that a practice run configuration exists, ARC sets this value to ENABLED. If a you have not configured a practice run for the resource, or delete a practice run configuration, ARC sets the value to DISABLED.

    ARC updates this status; you can't set a practice run status to ENABLED or DISABLED.

    " + "zonalShifts":{ + "shape":"ZonalShiftsInResource", + "documentation":"

    An array of the zonal shifts for a resource.

    " + }, + "autoshifts":{ + "shape":"AutoshiftsInResource", + "documentation":"

    An array of the autoshifts that have been completed for a resource.

    " }, "zonalAutoshiftStatus":{ "shape":"ZonalAutoshiftStatus", "documentation":"

    The status of autoshift for a resource. When you configure zonal autoshift for a resource, you can set the value of the status to ENABLED or DISABLED.

    " }, - "zonalShifts":{ - "shape":"ZonalShiftsInResource", - "documentation":"

    An array of the zonal shifts for a resource.

    " + "practiceRunStatus":{ + "shape":"ZonalAutoshiftStatus", + "documentation":"

    This status tracks whether a practice run configuration exists for a resource. When you configure a practice run for a resource so that a practice run configuration exists, ARC sets this value to ENABLED. If a you have not configured a practice run for the resource, or delete a practice run configuration, ARC sets the value to DISABLED.

    ARC updates this status; you can't set a practice run status to ENABLED or DISABLED.

    " } }, - "documentation":"

    A complex structure for a managed resource in an Amazon Web Services account with information about zonal shifts and autoshifts.

    A managed resource is a load balancer that has been registered with ARC by Elastic Load Balancing. You can start a zonal shift in ARC for a managed resource to temporarily move traffic for the resource away from an Availability Zone in an Amazon Web Services Region. You can also configure zonal autoshift for a managed resource.

    At this time, managed resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

    " + "documentation":"

    A complex structure for a managed resource in an Amazon Web Services account with information about zonal shifts and autoshifts.

    You can start a zonal shift in ARC for a managed resource to temporarily move traffic for the resource away from an Availability Zone in an Amazon Web Services Region. You can also configure zonal autoshift for a managed resource.

    At this time, managed resources are Amazon EC2 Auto Scaling groups, Amazon Elastic Kubernetes Service, Network Load Balancers, and Application Load Balancer.

    " }, "MaxResults":{ "type":"integer", @@ -799,20 +894,12 @@ "type":"string", "max":1024, "min":8, - "pattern":"^.*$" + "pattern":".*" }, "PracticeRunConfiguration":{ "type":"structure", "required":["outcomeAlarms"], "members":{ - "blockedDates":{ - "shape":"BlockedDates", - "documentation":"

    An array of one or more dates that you can specify when Amazon Web Services does not start practice runs for a resource.

    Specify blocked dates, in UTC, in the format YYYY-MM-DD, separated by spaces.

    " - }, - "blockedWindows":{ - "shape":"BlockedWindows", - "documentation":"

    An array of one or more windows of days and times that you can block ARC from starting practice runs for a resource.

    Specify the blocked windows in UTC, using the format DAY:HH:MM-DAY:HH:MM, separated by spaces. For example, MON:18:30-MON:19:30 TUE:18:30-TUE:19:30.

    " - }, "blockingAlarms":{ "shape":"ControlConditions", "documentation":"

    The blocking alarm for practice runs is an optional alarm that you can specify that blocks practice runs when the alarm is in an ALARM state.

    " @@ -820,6 +907,14 @@ "outcomeAlarms":{ "shape":"ControlConditions", "documentation":"

    The outcome alarm for practice runs is an alarm that you specify that ends a practice run when the alarm is in an ALARM state.

    " + }, + "blockedWindows":{ + "shape":"BlockedWindows", + "documentation":"

    An array of one or more windows of days and times that you can block ARC from starting practice runs for a resource.

    Specify the blocked windows in UTC, using the format DAY:HH:MM-DAY:HH:MM, separated by spaces. For example, MON:18:30-MON:19:30 TUE:18:30-TUE:19:30.

    " + }, + "blockedDates":{ + "shape":"BlockedDates", + "documentation":"

    An array of one or more dates that you can specify when Amazon Web Services does not start practice runs for a resource.

    Specify blocked dates, in UTC, in the format YYYY-MM-DD, separated by spaces.

    " } }, "documentation":"

    A practice run configuration for a resource includes the Amazon CloudWatch alarms that you've specified for a practice run, as well as any blocked dates or blocked windows for the practice run. When a resource has a practice run configuration, ARC shifts traffic for the resource weekly for practice runs.

    Practice runs are required for zonal autoshift. The zonal shifts that ARC starts for practice runs help you to ensure that shifting away traffic from an Availability Zone during an autoshift is safe for your application.

    You can update or delete a practice run configuration. Before you delete a practice run configuration, you must disable zonal autoshift for the resource. A practice run configuration is required when zonal autoshift is enabled.

    " @@ -830,14 +925,15 @@ "FAILED", "INTERRUPTED", "PENDING", - "SUCCEEDED" + "SUCCEEDED", + "CAPACITY_CHECK_FAILED" ] }, "ResourceArn":{ "type":"string", "max":1024, "min":8, - "pattern":"^arn:.*$" + "pattern":"arn:.*" }, "ResourceIdentifier":{ "type":"string", @@ -871,31 +967,95 @@ "ZONAL_AUTOSHIFT" ] }, + "StartPracticeRunRequest":{ + "type":"structure", + "required":[ + "resourceIdentifier", + "awayFrom", + "comment" + ], + "members":{ + "resourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

    The identifier for the resource that you want to start a practice run zonal shift for. The identifier is the Amazon Resource Name (ARN) for the resource.

    " + }, + "awayFrom":{ + "shape":"AvailabilityZone", + "documentation":"

    The Availability Zone (for example, use1-az1) that traffic is shifted away from for the resource that you specify for the practice run.

    " + }, + "comment":{ + "shape":"ZonalShiftComment", + "documentation":"

    The initial comment that you enter about the practice run. Be aware that this comment can be overwritten by Amazon Web Services if the automatic check for balanced capacity fails. For more information, see Capacity checks for practice runs in the Amazon Application Recovery Controller Developer Guide.

    " + } + } + }, + "StartPracticeRunResponse":{ + "type":"structure", + "required":[ + "zonalShiftId", + "resourceIdentifier", + "awayFrom", + "expiryTime", + "startTime", + "status", + "comment" + ], + "members":{ + "zonalShiftId":{ + "shape":"ZonalShiftId", + "documentation":"

    The identifier of a practice run zonal shift.

    " + }, + "resourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

    The identifier for the resource that you want to shift traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

    " + }, + "awayFrom":{ + "shape":"AvailabilityZone", + "documentation":"

    The Availability Zone (for example, use1-az1) that traffic is shifted away from for the resource that you specify for the practice run.

    " + }, + "expiryTime":{ + "shape":"ExpiryTime", + "documentation":"

    The expiry time (expiration time) for an on-demand practice run zonal shift is 30 minutes from the time when you start the practice run, unless you cancel it before that time. However, be aware that the expiryTime field for practice run zonal shifts always has a value of 1 minute.

    " + }, + "startTime":{ + "shape":"StartTime", + "documentation":"

    The time (UTC) when the zonal shift starts.

    " + }, + "status":{ + "shape":"ZonalShiftStatus", + "documentation":"

    A status for the practice run (expected status is ACTIVE).

    " + }, + "comment":{ + "shape":"ZonalShiftComment", + "documentation":"

    The initial comment that you enter about the practice run. Be aware that this comment can be overwritten by Amazon Web Services if the automatic check for balanced capacity fails. For more information, see Capacity checks for practice runs in the Amazon Application Recovery Controller Developer Guide.

    " + } + } + }, "StartTime":{"type":"timestamp"}, "StartZonalShiftRequest":{ "type":"structure", "required":[ + "resourceIdentifier", "awayFrom", - "comment", "expiresIn", - "resourceIdentifier" + "comment" ], "members":{ + "resourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

    The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

    Amazon Application Recovery Controller currently supports enabling the following resources for zonal shift and zonal autoshift:

    " + }, "awayFrom":{ "shape":"AvailabilityZone", "documentation":"

    The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

    " }, - "comment":{ - "shape":"ZonalShiftComment", - "documentation":"

    A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. A new comment overwrites any existing comment string.

    " - }, "expiresIn":{ "shape":"ExpiresIn", "documentation":"

    The length of time that you want a zonal shift to be active, which ARC converts to an expiry time (expiration time). Zonal shifts are temporary. You can set a zonal shift to be active initially for up to three days (72 hours).

    If you want to still keep traffic away from an Availability Zone, you can update the zonal shift and set a new expiration. You can also cancel a zonal shift, before it expires, for example, if you're ready to restore traffic to the Availability Zone.

    To set a length of time for a zonal shift to be active, specify a whole number, and then one of the following, with no space:

    • A lowercase letter m: To specify that the value is in minutes.

    • A lowercase letter h: To specify that the value is in hours.

    For example: 20h means the zonal shift expires in 20 hours. 120m means the zonal shift expires in 120 minutes (2 hours).

    " }, - "resourceIdentifier":{ - "shape":"ResourceIdentifier", - "documentation":"

    The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

    At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

    " + "comment":{ + "shape":"ZonalShiftComment", + "documentation":"

    A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. A new comment overwrites any existing comment string.

    " } } }, @@ -936,14 +1096,20 @@ "type":"structure", "required":["resourceIdentifier"], "members":{ - "blockedDates":{ - "shape":"BlockedDates", - "documentation":"

    Add, change, or remove blocked dates for a practice run in zonal autoshift.

    Optionally, you can block practice runs for specific calendar dates. The format for blocked dates is: YYYY-MM-DD. Keep in mind, when you specify dates, that dates and times for practice runs are in UTC. Separate multiple blocked dates with spaces.

    For example, if you have an application update scheduled to launch on May 1, 2024, and you don't want practice runs to shift traffic away at that time, you could set a blocked date for 2024-05-01.

    " + "resourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

    The identifier for the resource that you want to update the practice run configuration for. The identifier is the Amazon Resource Name (ARN) for the resource.

    ", + "location":"uri", + "locationName":"resourceIdentifier" }, "blockedWindows":{ "shape":"BlockedWindows", "documentation":"

    Add, change, or remove windows of days and times for when you can, optionally, block ARC from starting a practice run for a resource.

    The format for blocked windows is: DAY:HH:SS-DAY:HH:SS. Keep in mind, when you specify dates, that dates and times for practice runs are in UTC. Also, be aware of potential time adjustments that might be required for daylight saving time differences. Separate multiple blocked windows with spaces.

    For example, say you run business report summaries three days a week. For this scenario, you might set the following recurring days and times as blocked windows, for example: MON-20:30-21:30 WED-20:30-21:30 FRI-20:30-21:30.

    " }, + "blockedDates":{ + "shape":"BlockedDates", + "documentation":"

    Add, change, or remove blocked dates for a practice run in zonal autoshift.

    Optionally, you can block practice runs for specific calendar dates. The format for blocked dates is: YYYY-MM-DD. Keep in mind, when you specify dates, that dates and times for practice runs are in UTC. Separate multiple blocked dates with spaces.

    For example, if you have an application update scheduled to launch on May 1, 2024, and you don't want practice runs to shift traffic away at that time, you could set a blocked date for 2024-05-01.

    " + }, "blockingAlarms":{ "shape":"ControlConditions", "documentation":"

    Add, change, or remove the Amazon CloudWatch alarm that you optionally specify as the blocking alarm for practice runs.

    " @@ -951,12 +1117,6 @@ "outcomeAlarms":{ "shape":"ControlConditions", "documentation":"

    Specify a new the Amazon CloudWatch alarm as the outcome alarm for practice runs.

    " - }, - "resourceIdentifier":{ - "shape":"ResourceIdentifier", - "documentation":"

    The identifier for the resource that you want to update the practice run configuration for. The identifier is the Amazon Resource Name (ARN) for the resource.

    ", - "location":"uri", - "locationName":"resourceIdentifier" } } }, @@ -965,8 +1125,8 @@ "required":[ "arn", "name", - "practiceRunConfiguration", - "zonalAutoshiftStatus" + "zonalAutoshiftStatus", + "practiceRunConfiguration" ], "members":{ "arn":{ @@ -977,13 +1137,13 @@ "shape":"ResourceName", "documentation":"

    The name of the resource that you updated the practice run for.

    " }, - "practiceRunConfiguration":{ - "shape":"PracticeRunConfiguration", - "documentation":"

    The practice run configuration that was updated.

    " - }, "zonalAutoshiftStatus":{ "shape":"ZonalAutoshiftStatus", "documentation":"

    The zonal autoshift status for the resource that you updated the practice run for.

    " + }, + "practiceRunConfiguration":{ + "shape":"PracticeRunConfiguration", + "documentation":"

    The practice run configuration that was updated.

    " } } }, @@ -1027,6 +1187,12 @@ "type":"structure", "required":["zonalShiftId"], "members":{ + "zonalShiftId":{ + "shape":"ZonalShiftId", + "documentation":"

    The identifier of a zonal shift.

    ", + "location":"uri", + "locationName":"zonalShiftId" + }, "comment":{ "shape":"ZonalShiftComment", "documentation":"

    A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. A new comment overwrites any existing comment string.

    " @@ -1034,12 +1200,6 @@ "expiresIn":{ "shape":"ExpiresIn", "documentation":"

    The length of time that you want a zonal shift to be active, which ARC converts to an expiry time (expiration time). Zonal shifts are temporary. You can set a zonal shift to be active initially for up to three days (72 hours).

    If you want to still keep traffic away from an Availability Zone, you can update the zonal shift and set a new expiration. You can also cancel a zonal shift, before it expires, for example, if you're ready to restore traffic to the Availability Zone.

    To set a length of time for a zonal shift to be active, specify a whole number, and then one of the following, with no space:

    • A lowercase letter m: To specify that the value is in minutes.

    • A lowercase letter h: To specify that the value is in hours.

    For example: 20h means the zonal shift expires in 20 hours. 120m means the zonal shift expires in 120 minutes (2 hours).

    " - }, - "zonalShiftId":{ - "shape":"ZonalShiftId", - "documentation":"

    The identifier of a zonal shift.

    ", - "location":"uri", - "locationName":"zonalShiftId" } } }, @@ -1077,7 +1237,8 @@ "InvalidConditionType", "InvalidPracticeBlocker", "FISExperimentUpdateNotAllowed", - "AutoshiftUpdateNotAllowed" + "AutoshiftUpdateNotAllowed", + "UnsupportedPracticeCancelShiftType" ] }, "Weight":{ @@ -1096,42 +1257,42 @@ "ZonalShift":{ "type":"structure", "required":[ + "zonalShiftId", + "resourceIdentifier", "awayFrom", - "comment", "expiryTime", - "resourceIdentifier", "startTime", "status", - "zonalShiftId" + "comment" ], "members":{ + "zonalShiftId":{ + "shape":"ZonalShiftId", + "documentation":"

    The identifier of a zonal shift.

    " + }, + "resourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

    The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

    Amazon Application Recovery Controller currently supports enabling the following resources for zonal shift and zonal autoshift:

    " + }, "awayFrom":{ "shape":"AvailabilityZone", "documentation":"

    The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

    " }, - "comment":{ - "shape":"ZonalShiftComment", - "documentation":"

    A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. A new comment overwrites any existing comment string.

    " - }, "expiryTime":{ "shape":"ExpiryTime", "documentation":"

    The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time.

    When you start a zonal shift, you specify how long you want it to be active, which ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

    " }, - "resourceIdentifier":{ - "shape":"ResourceIdentifier", - "documentation":"

    The identifier for the resource that Amazon Web Services shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.

    At this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

    " - }, "startTime":{ "shape":"StartTime", "documentation":"

    The time (UTC) when the zonal shift starts.

    " }, "status":{ "shape":"ZonalShiftStatus", - "documentation":"

    A status for a zonal shift.

    The Status for a zonal shift can have one of the following values:

    • ACTIVE: The zonal shift has been started and active.

    • EXPIRED: The zonal shift has expired (the expiry time was exceeded).

    • CANCELED: The zonal shift was canceled.

    " + "documentation":"

    A status for a zonal shift.

    The Status for a zonal shift can have one of the following values:

    • ACTIVE: The zonal shift has been started and is active.

    • EXPIRED: The zonal shift has expired (the expiry time was exceeded).

    • CANCELED: The zonal shift was canceled.

    " }, - "zonalShiftId":{ - "shape":"ZonalShiftId", - "documentation":"

    The identifier of a zonal shift.

    " + "comment":{ + "shape":"ZonalShiftComment", + "documentation":"

    A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. A new comment overwrites any existing comment string.

    " } } }, @@ -1144,55 +1305,55 @@ "type":"string", "max":36, "min":6, - "pattern":"^[A-Za-z0-9-]+$" + "pattern":"[A-Za-z0-9-]+" }, "ZonalShiftInResource":{ "type":"structure", "required":[ "appliedStatus", + "zonalShiftId", + "resourceIdentifier", "awayFrom", - "comment", "expiryTime", - "resourceIdentifier", "startTime", - "zonalShiftId" + "comment" ], "members":{ "appliedStatus":{ "shape":"AppliedStatus", - "documentation":"

    The appliedStatus field specifies which application traffic shift is in effect for a resource when there is more than one active traffic shift. There can be more than one application traffic shift in progress at the same time - that is, practice run zonal shifts, customer-initiated zonal shifts, or an autoshift. The appliedStatus field for a shift that is in progress for a resource can have one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift that is currently in effect for the resource has an appliedStatus set to APPLIED.

    The overall principle for precedence is that zonal shifts that you start as a customer take precedence autoshifts, which take precedence over practice runs. That is, customer-initiated zonal shifts > autoshifts > practice run zonal shifts.

    For more information, see How zonal autoshift and practice runs work in the Amazon Route 53 Application Recovery Controller Developer Guide.

    " + "documentation":"

    The appliedStatus field specifies which application traffic shift is in effect for a resource when there is more than one active traffic shift. There can be more than one application traffic shift in progress at the same time - that is, practice run zonal shifts, customer-initiated zonal shifts, or an autoshift. The appliedStatus field for a shift that is in progress for a resource can have one of two values: APPLIED or NOT_APPLIED. The zonal shift or autoshift that is currently in effect for the resource has an appliedStatus set to APPLIED.

    The overall principle for precedence is that zonal shifts that you start as a customer take precedence autoshifts, which take precedence over practice runs. That is, customer-initiated zonal shifts > autoshifts > practice run zonal shifts.

    For more information, see How zonal autoshift and practice runs work in the Amazon Application Recovery Controller Developer Guide.

    " + }, + "zonalShiftId":{ + "shape":"ZonalShiftId", + "documentation":"

    The identifier of a zonal shift.

    " + }, + "resourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

    The identifier for the resource to include in a zonal shift. The identifier is the Amazon Resource Name (ARN) for the resource.

    Amazon Application Recovery Controller currently supports enabling the following resources for zonal shift and zonal autoshift:

    " }, "awayFrom":{ "shape":"AvailabilityZone", "documentation":"

    The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

    " }, - "comment":{ - "shape":"ZonalShiftComment", - "documentation":"

    A comment that you enter for a customer-initiated zonal shift. Only the latest comment is retained; no comment history is maintained. That is, a new comment overwrites any existing comment string.

    " - }, "expiryTime":{ "shape":"ExpiryTime", "documentation":"

    The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time.

    When you start a zonal shift, you specify how long you want it to be active, which ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

    " }, - "practiceRunOutcome":{ - "shape":"PracticeRunOutcome", - "documentation":"

    The outcome, or end state, returned for a practice run. The following values can be returned:

    • PENDING: Outcome value when a practice run is in progress.

    • SUCCEEDED: Outcome value when the outcome alarm specified for the practice run configuration does not go into an ALARM state during the practice run, and the practice run was not interrupted before it completed the expected 30 minute zonal shift.

    • INTERRUPTED: Outcome value when the practice run was stopped before the expected 30 minute zonal shift duration, or there was another problem with the practice run that created an inconclusive outcome.

    • FAILED: Outcome value when the outcome alarm specified for the practice run configuration goes into an ALARM state during the practice run, and the practice run was not interrupted before it completed.

    For more information about practice run outcomes, see Considerations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.

    " + "startTime":{ + "shape":"StartTime", + "documentation":"

    The time (UTC) when the zonal shift starts.

    " }, - "resourceIdentifier":{ - "shape":"ResourceIdentifier", - "documentation":"

    The identifier for the resource to include in a zonal shift. The identifier is the Amazon Resource Name (ARN) for the resource.

    At this time, you can only start a zonal shift for Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

    " + "comment":{ + "shape":"ZonalShiftComment", + "documentation":"

    A comment that you enter for a customer-initiated zonal shift. Only the latest comment is retained; no comment history is maintained. That is, a new comment overwrites any existing comment string.

    " }, "shiftType":{ "shape":"ShiftType", "documentation":"

    Defines the zonal shift type.

    " }, - "startTime":{ - "shape":"StartTime", - "documentation":"

    The time (UTC) when the zonal shift starts.

    " - }, - "zonalShiftId":{ - "shape":"ZonalShiftId", - "documentation":"

    The identifier of a zonal shift.

    " + "practiceRunOutcome":{ + "shape":"PracticeRunOutcome", + "documentation":"

    The outcome, or end state, returned for a practice run. The following values can be returned:

    • PENDING: Outcome value when a practice run is in progress.

    • SUCCEEDED: Outcome value when the outcome alarm specified for the practice run configuration does not go into an ALARM state during the practice run, and the practice run was not interrupted before it completed the expected 30 minute zonal shift.

    • INTERRUPTED: Outcome value when the practice run was stopped before the expected 30 minute zonal shift duration, or there was another problem with the practice run that created an inconclusive outcome.

    • FAILED: Outcome value when the outcome alarm specified for the practice run configuration goes into an ALARM state during the practice run, and the practice run was not interrupted before it completed.

    • CAPACITY_CHECK_FAILED: The check for balanced capacity across Availability Zones for your load balancing and Auto Scaling group resources failed.

    For more information about practice run outcomes, see Considerations when you configure zonal autoshift in the Amazon Application Recovery Controller Developer Guide.

    " } }, "documentation":"

    A complex structure that lists the zonal shifts for a managed resource and their statuses for the resource.

    " @@ -1212,58 +1373,58 @@ "ZonalShiftSummary":{ "type":"structure", "required":[ + "zonalShiftId", + "resourceIdentifier", "awayFrom", - "comment", "expiryTime", - "resourceIdentifier", "startTime", "status", - "zonalShiftId" + "comment" ], "members":{ + "zonalShiftId":{ + "shape":"ZonalShiftId", + "documentation":"

    The identifier of a zonal shift.

    " + }, + "resourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

    The identifier for the resource to include in a zonal shift. The identifier is the Amazon Resource Name (ARN) for the resource.

    Amazon Application Recovery Controller currently supports enabling the following resources for zonal shift and zonal autoshift:

    " + }, "awayFrom":{ "shape":"AvailabilityZone", "documentation":"

    The Availability Zone (for example, use1-az1) that traffic is moved away from for a resource when you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the resource is instead moved to other Availability Zones in the Amazon Web Services Region.

    " }, - "comment":{ - "shape":"ZonalShiftComment", - "documentation":"

    A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. That is, a new comment overwrites any existing comment string.

    " - }, "expiryTime":{ "shape":"ExpiryTime", "documentation":"

    The expiry time (expiration time) for a customer-initiated zonal shift. A zonal shift is temporary and must be set to expire when you start the zonal shift. You can initially set a zonal shift to expire in a maximum of three days (72 hours). However, you can update a zonal shift to set a new expiration at any time.

    When you start a zonal shift, you specify how long you want it to be active, which ARC converts to an expiry time (expiration time). You can cancel a zonal shift when you're ready to restore traffic to the Availability Zone, or just wait for it to expire. Or you can update the zonal shift to specify another length of time to expire in.

    " }, - "practiceRunOutcome":{ - "shape":"PracticeRunOutcome", - "documentation":"

    The outcome, or end state, of a practice run. The following values can be returned:

    • PENDING: Outcome value when the practice run is in progress.

    • SUCCEEDED: Outcome value when the outcome alarm specified for the practice run configuration does not go into an ALARM state during the practice run, and the practice run was not interrupted before it completed.

    • INTERRUPTED: Outcome value when the practice run did not run for the expected 30 minutes or there was another problem with the practice run that created an inconclusive outcome.

    • FAILED: Outcome value when the outcome alarm specified for the practice run configuration goes into an ALARM state during the practice run, and the practice run was not interrupted before it completed.

    For more information about practice run outcomes, see Considerations when you configure zonal autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide.

    " - }, - "resourceIdentifier":{ - "shape":"ResourceIdentifier", - "documentation":"

    The identifier for the resource to include in a zonal shift. The identifier is the Amazon Resource Name (ARN) for the resource.

    At this time, you can only start a zonal shift for Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.

    " - }, - "shiftType":{ - "shape":"ShiftType", - "documentation":"

    Defines the zonal shift type.

    " - }, "startTime":{ "shape":"StartTime", "documentation":"

    The time (UTC) when the zonal shift starts.

    " }, "status":{ "shape":"ZonalShiftStatus", - "documentation":"

    A status for a zonal shift.

    The Status for a zonal shift can have one of the following values:

    • ACTIVE: The zonal shift has been started and active.

    • EXPIRED: The zonal shift has expired (the expiry time was exceeded).

    • CANCELED: The zonal shift was canceled.

    " + "documentation":"

    A status for a zonal shift.

    The Status for a zonal shift can have one of the following values:

    • ACTIVE: The zonal shift has been started and is active.

    • EXPIRED: The zonal shift has expired (the expiry time was exceeded).

    • CANCELED: The zonal shift was canceled.

    " }, - "zonalShiftId":{ - "shape":"ZonalShiftId", - "documentation":"

    The identifier of a zonal shift.

    " + "comment":{ + "shape":"ZonalShiftComment", + "documentation":"

    A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. That is, a new comment overwrites any existing comment string.

    " + }, + "shiftType":{ + "shape":"ShiftType", + "documentation":"

    Defines the zonal shift type.

    " + }, + "practiceRunOutcome":{ + "shape":"PracticeRunOutcome", + "documentation":"

    The outcome, or end state, of a practice run. The following values can be returned:

    • PENDING: Outcome value when the practice run is in progress.

    • SUCCEEDED: Outcome value when the outcome alarm specified for the practice run configuration does not go into an ALARM state during the practice run, and the practice run was not interrupted before it completed.

    • INTERRUPTED: Outcome value when the practice run did not run for the expected 30 minutes or there was another problem with the practice run that created an inconclusive outcome.

    • FAILED: Outcome value when the outcome alarm specified for the practice run configuration goes into an ALARM state during the practice run, and the practice run was not interrupted before it completed.

    • CAPACITY_CHECK_FAILED: The check for balanced capacity across Availability Zones for your load balancing and Auto Scaling group resources failed.

    For more information about practice run outcomes, see Considerations when you configure zonal autoshift in the Amazon Application Recovery Controller Developer Guide.

    " } }, - "documentation":"

    Lists information about zonal shifts in Amazon Route 53 Application Recovery Controller, including zonal shifts that you start yourself and zonal shifts that ARC starts on your behalf for practice runs with zonal autoshift.

    Zonal shifts are temporary, including customer-initiated zonal shifts and the zonal autoshift practice run zonal shifts that ARC starts weekly, on your behalf. A zonal shift that a customer starts can be active for up to three days (72 hours). A practice run zonal shift has a 30 minute duration.

    " + "documentation":"

    Lists information about zonal shifts in Amazon Application Recovery Controller, including zonal shifts that you start yourself and zonal shifts that ARC starts on your behalf for practice runs with zonal autoshift.

    Zonal shifts are temporary, including customer-initiated zonal shifts and the zonal autoshift practice run zonal shifts that ARC starts weekly, on your behalf. A zonal shift that a customer starts can be active for up to three days (72 hours). A practice run zonal shift has a 30 minute duration.

    " }, "ZonalShiftsInResource":{ "type":"list", "member":{"shape":"ZonalShiftInResource"} } }, - "documentation":"

    Welcome to the API Reference Guide for zonal shift and zonal autoshift in Amazon Route 53 Application Recovery Controller (ARC).

    You can start a zonal shift to move traffic for a load balancer resource away from an Availability Zone to help your application recover quickly from an impairment in an Availability Zone. For example, you can recover your application from a developer's bad code deployment or from an Amazon Web Services infrastructure failure in a single Availability Zone.

    You can also configure zonal autoshift for supported load balancer resources. Zonal autoshift is a capability in ARC where you authorize Amazon Web Services to shift away application resource traffic from an Availability Zone during events, on your behalf, to help reduce your time to recovery. Amazon Web Services starts an autoshift when internal telemetry indicates that there is an Availability Zone impairment that could potentially impact customers.

    To help make sure that zonal autoshift is safe for your application, you must also configure practice runs when you enable zonal autoshift for a resource. Practice runs start weekly zonal shifts for a resource, to shift traffic for the resource away from an Availability Zone. Practice runs help you to make sure, on a regular basis, that you have enough capacity in all the Availability Zones in an Amazon Web Services Region for your application to continue to operate normally when traffic for a resource is shifted away from one Availability Zone.

    Before you configure practice runs or enable zonal autoshift, we strongly recommend that you prescale your application resource capacity in all Availability Zones in the Region where your application resources are deployed. You should not rely on scaling on demand when an autoshift or practice run starts. Zonal autoshift, including practice runs, works independently, and does not wait for auto scaling actions to complete. Relying on auto scaling, instead of pre-scaling, can result in loss of availability.

    If you use auto scaling to handle regular cycles of traffic, we strongly recommend that you configure the minimum capacity of your auto scaling to continue operating normally with the loss of an Availability Zone.

    Be aware that ARC does not inspect the health of individual resources. Amazon Web Services only starts an autoshift when Amazon Web Services telemetry detects that there is an Availability Zone impairment that could potentially impact customers. In some cases, resources might be shifted away that are not experiencing impact.

    For more information about using zonal shift and zonal autoshift, see the Amazon Route 53 Application Recovery Controller Developer Guide.

    " + "documentation":"

    Welcome to the API Reference Guide for zonal shift and zonal autoshift in Amazon Application Recovery Controller (ARC).

    You can start a zonal shift to move traffic for a load balancer resource away from an Availability Zone to help your application recover quickly from an impairment in an Availability Zone. For example, you can recover your application from a developer's bad code deployment or from an Amazon Web Services infrastructure failure in a single Availability Zone.

    You can also configure zonal autoshift for supported load balancer resources. Zonal autoshift is a capability in ARC where you authorize Amazon Web Services to shift away application resource traffic from an Availability Zone during events, on your behalf, to help reduce your time to recovery. Amazon Web Services starts an autoshift when internal telemetry indicates that there is an Availability Zone impairment that could potentially impact customers.

    For more information about using zonal shift and zonal autoshift, see the Amazon Application Recovery Controller Developer Guide.

    " } diff --git a/services/arczonalshift/src/main/resources/codegen-resources/waiters-2.json b/services/arczonalshift/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/arczonalshift/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/artifact/pom.xml b/services/artifact/pom.xml index 03749023fe67..05fe60a7680b 100644 --- a/services/artifact/pom.xml +++ b/services/artifact/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT artifact AWS Java SDK :: Services :: Artifact diff --git a/services/artifact/src/main/resources/codegen-resources/customization.config b/services/artifact/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/artifact/src/main/resources/codegen-resources/customization.config +++ b/services/artifact/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/athena/pom.xml b/services/athena/pom.xml index 3acacd02b032..9d53f198a351 100644 --- a/services/athena/pom.xml +++ b/services/athena/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT athena AWS Java SDK :: Services :: Amazon Athena diff --git a/services/athena/src/main/resources/codegen-resources/customization.config b/services/athena/src/main/resources/codegen-resources/customization.config index 8a963ee2cf9f..a9daa25042de 100644 --- a/services/athena/src/main/resources/codegen-resources/customization.config +++ b/services/athena/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "listNamedQueries", "listQueryExecutions" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/athena/src/main/resources/codegen-resources/service-2.json b/services/athena/src/main/resources/codegen-resources/service-2.json index f0391c9e048f..5de96ff11c9e 100644 --- a/services/athena/src/main/resources/codegen-resources/service-2.json +++ b/services/athena/src/main/resources/codegen-resources/service-2.json @@ -98,7 +98,7 @@ {"shape":"InternalServerException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    Creates (registers) a data catalog with the specified name and properties. Catalogs created are visible to all users of the same Amazon Web Services account.

    This API operation creates the following resources.

    • CFN Stack Name with a maximum length of 128 characters and prefix athenafederatedcatalog-CATALOG_NAME_SANITIZED with length 23 characters.

    • Lambda Function Name with a maximum length of 64 characters and prefix athenafederatedcatalog_CATALOG_NAME_SANITIZED with length 23 characters.

    • Glue Connection Name with a maximum length of 255 characters and a prefix athenafederatedcatalog_CATALOG_NAME_SANITIZED with length 23 characters.

    " + "documentation":"

    Creates (registers) a data catalog with the specified name and properties. Catalogs created are visible to all users of the same Amazon Web Services account.

    For a FEDERATED catalog, this API operation creates the following resources.

    • CFN Stack Name with a maximum length of 128 characters and prefix athenafederatedcatalog-CATALOG_NAME_SANITIZED with length 23 characters.

    • Lambda Function Name with a maximum length of 64 characters and prefix athenafederatedcatalog_CATALOG_NAME_SANITIZED with length 23 characters.

    • Glue Connection Name with a maximum length of 255 characters and a prefix athenafederatedcatalog_CATALOG_NAME_SANITIZED with length 23 characters.

    " }, "CreateNamedQuery":{ "name":"CreateNamedQuery", @@ -1307,8 +1307,7 @@ }, "CancelCapacityReservationOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "CapacityAllocation":{ "type":"structure", @@ -1592,8 +1591,7 @@ }, "CreateCapacityReservationOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "CreateDataCatalogInput":{ "type":"structure", @@ -1608,7 +1606,7 @@ }, "Type":{ "shape":"DataCatalogType", - "documentation":"

    The type of data catalog to create: LAMBDA for a federated catalog, GLUE for an Glue Data Catalog, and HIVE for an external Apache Hive metastore. FEDERATED is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass.

    " + "documentation":"

    The type of data catalog to create: LAMBDA for a federated catalog, GLUE for an Glue Data Catalog, and HIVE for an external Apache Hive metastore. FEDERATED is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass.

    For FEDERATED type, we do not support IAM identity center.

    " }, "Description":{ "shape":"DescriptionString", @@ -1732,8 +1730,7 @@ }, "CreatePreparedStatementOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "CreatePresignedNotebookUrlRequest":{ "type":"structure", @@ -1791,8 +1788,7 @@ }, "CreateWorkGroupOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "CustomerContentEncryptionConfiguration":{ "type":"structure", @@ -1953,8 +1949,7 @@ }, "DeleteCapacityReservationOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDataCatalogInput":{ "type":"structure", @@ -1989,8 +1984,7 @@ }, "DeleteNamedQueryOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteNotebookInput":{ "type":"structure", @@ -2004,8 +1998,7 @@ }, "DeleteNotebookOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeletePreparedStatementInput":{ "type":"structure", @@ -2026,8 +2019,7 @@ }, "DeletePreparedStatementOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteWorkGroupInput":{ "type":"structure", @@ -2045,8 +2037,7 @@ }, "DeleteWorkGroupOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DescriptionString":{ "type":"string", @@ -2525,6 +2516,10 @@ "MaxResults":{ "shape":"MaxQueryResults", "documentation":"

    The maximum number of results (rows) to return in this request.

    " + }, + "QueryResultType":{ + "shape":"QueryResultType", + "documentation":"

    When you set this to DATA_ROWS or empty, GetQueryResults returns the query results in rows. If set to DATA_MANIFEST, it returns the manifest file in rows. Only the query types CREATE TABLE AS SELECT, UNLOAD, and INSERT can generate a manifest file. If you use DATA_MANIFEST for other query types, the query will fail.

    " } } }, @@ -3313,6 +3308,50 @@ } }, "Long":{"type":"long"}, + "ManagedQueryResultsConfiguration":{ + "type":"structure", + "required":["Enabled"], + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

    If set to true, allows you to store query results in Athena owned storage. If set to false, workgroup member stores query results in location specified under ResultConfiguration$OutputLocation. The default is false. A workgroup cannot have the ResultConfiguration$OutputLocation parameter when you set this field to true.

    " + }, + "EncryptionConfiguration":{ + "shape":"ManagedQueryResultsEncryptionConfiguration", + "documentation":"

    If you encrypt query and calculation results in Athena owned storage, this field indicates the encryption option (for example, SSE_KMS or CSE_KMS) and key information.

    " + } + }, + "documentation":"

    The configuration for storing results in Athena owned storage, which includes whether this feature is enabled; whether encryption configuration, if any, is used for encrypting query results.

    " + }, + "ManagedQueryResultsConfigurationUpdates":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"BoxedBoolean", + "documentation":"

    If set to true, specifies that Athena manages query results in Athena owned storage.

    " + }, + "EncryptionConfiguration":{ + "shape":"ManagedQueryResultsEncryptionConfiguration", + "documentation":"

    If you encrypt query and calculation results in Athena owned storage, this field indicates the encryption option (for example, SSE_KMS or CSE_KMS) and key information.

    " + }, + "RemoveEncryptionConfiguration":{ + "shape":"BoxedBoolean", + "documentation":"

    If set to true, it removes workgroup from Athena owned storage. The existing query results are cleaned up after 24hrs. You must provide query results in location specified under ResultConfiguration$OutputLocation.

    " + } + }, + "documentation":"

    Updates the configuration for managed query results.

    " + }, + "ManagedQueryResultsEncryptionConfiguration":{ + "type":"structure", + "required":["KmsKey"], + "members":{ + "KmsKey":{ + "shape":"KmsKey", + "documentation":"

    The ARN of an KMS key for encrypting managed query results.

    " + } + }, + "documentation":"

    If you encrypt query and calculation results in Athena owned storage, this field indicates the encryption option (for example, SSE_KMS or CSE_KMS) and key information.

    " + }, "MaxApplicationDPUSizesCount":{ "type":"integer", "max":100, @@ -3639,8 +3678,7 @@ }, "PutCapacityAssignmentConfigurationOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "QueryExecution":{ "type":"structure", @@ -3657,6 +3695,10 @@ "shape":"StatementType", "documentation":"

    The type of query statement that was run. DDL indicates DDL query statements. DML indicates DML (Data Manipulation Language) query statements, such as CREATE TABLE AS SELECT. UTILITY indicates query statements other than DDL and DML, such as SHOW CREATE TABLE, or DESCRIBE TABLE.

    " }, + "ManagedQueryResultsConfiguration":{ + "shape":"ManagedQueryResultsConfiguration", + "documentation":"

    The configuration for storing results in Athena owned storage, which includes whether this feature is enabled; whether encryption configuration, if any, is used for encrypting query results.

    " + }, "ResultConfiguration":{ "shape":"ResultConfiguration", "documentation":"

    The location in Amazon S3 where query and calculation results are stored and the encryption option, if any, used for query results. These are known as \"client-side settings\". If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup.

    " @@ -3808,6 +3850,13 @@ }, "documentation":"

    The completion date, current state, submission time, and state change reason (if applicable) for the query execution.

    " }, + "QueryResultType":{ + "type":"string", + "enum":[ + "DATA_MANIFEST", + "DATA_ROWS" + ] + }, "QueryResultsS3AccessGrantsConfiguration":{ "type":"structure", "required":[ @@ -4276,7 +4325,7 @@ "shape":"CalculationConfiguration", "documentation":"

    Contains configuration information for the calculation.

    ", "deprecated":true, - "deprecatedMessage":"Kepler Post GA Tasks : https://sim.amazon.com/issues/ATHENA-39828" + "deprecatedMessage":"Structure is deprecated." }, "CodeBlock":{ "shape":"CodeBlock", @@ -4437,8 +4486,7 @@ }, "StopQueryExecutionOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "String":{"type":"string"}, "StringList":{ @@ -4538,8 +4586,7 @@ }, "TagResourceOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -4681,8 +4728,7 @@ }, "UntagResourceOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateCapacityReservationInput":{ "type":"structure", @@ -4703,8 +4749,7 @@ }, "UpdateCapacityReservationOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateDataCatalogInput":{ "type":"structure", @@ -4733,8 +4778,7 @@ }, "UpdateDataCatalogOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateNamedQueryInput":{ "type":"structure", @@ -4764,8 +4808,7 @@ }, "UpdateNamedQueryOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateNotebookInput":{ "type":"structure", @@ -4820,13 +4863,11 @@ }, "UpdateNotebookMetadataOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateNotebookOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdatePreparedStatementInput":{ "type":"structure", @@ -4856,8 +4897,7 @@ }, "UpdatePreparedStatementOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateWorkGroupInput":{ "type":"structure", @@ -4883,8 +4923,7 @@ }, "UpdateWorkGroupOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "WorkGroup":{ "type":"structure", @@ -4924,6 +4963,10 @@ "shape":"ResultConfiguration", "documentation":"

    The configuration for the workgroup, which includes the location in Amazon S3 where query and calculation results are stored and the encryption option, if any, used for query and calculation results. To run the query, you must specify the query results location using one of the ways: either in the workgroup using this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation. If none of them is set, Athena issues an error that no output location is provided.

    " }, + "ManagedQueryResultsConfiguration":{ + "shape":"ManagedQueryResultsConfiguration", + "documentation":"

    The configuration for storing results in Athena owned storage, which includes whether this feature is enabled; whether encryption configuration, if any, is used for encrypting query results.

    " + }, "EnforceWorkGroupConfiguration":{ "shape":"BoxedBoolean", "documentation":"

    If set to \"true\", the settings for the workgroup override client-side settings. If set to \"false\", client-side settings are used. For more information, see Workgroup Settings Override Client-Side Settings.

    " @@ -4982,6 +5025,10 @@ "shape":"ResultConfigurationUpdates", "documentation":"

    The result configuration information about the queries in this workgroup that will be updated. Includes the updated results location and an updated option for encrypting query results.

    " }, + "ManagedQueryResultsConfigurationUpdates":{ + "shape":"ManagedQueryResultsConfigurationUpdates", + "documentation":"

    Updates configuration information for managed query results in the workgroup.

    " + }, "PublishCloudWatchMetricsEnabled":{ "shape":"BoxedBoolean", "documentation":"

    Indicates whether this workgroup enables publishing metrics to Amazon CloudWatch.

    " diff --git a/services/auditmanager/pom.xml b/services/auditmanager/pom.xml index 8906831de073..e8af51457ee7 100644 --- a/services/auditmanager/pom.xml +++ b/services/auditmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT auditmanager AWS Java SDK :: Services :: Audit Manager diff --git a/services/auditmanager/src/main/resources/codegen-resources/customization.config b/services/auditmanager/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/auditmanager/src/main/resources/codegen-resources/customization.config +++ b/services/auditmanager/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/auditmanager/src/main/resources/codegen-resources/service-2.json b/services/auditmanager/src/main/resources/codegen-resources/service-2.json index 4ad7117fab91..cebd3977d369 100644 --- a/services/auditmanager/src/main/resources/codegen-resources/service-2.json +++ b/services/auditmanager/src/main/resources/codegen-resources/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"auditmanager", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Audit Manager", "serviceId":"AuditManager", "signatureVersion":"v4", "signingName":"auditmanager", - "uid":"auditmanager-2017-07-25" + "uid":"auditmanager-2017-07-25", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateAssessmentReportEvidenceFolder":{ @@ -568,7 +570,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Gets a list of the Amazon Web Services from which Audit Manager can collect evidence.

    Audit Manager defines which Amazon Web Services are in scope for an assessment. Audit Manager infers this scope by examining the assessment’s controls and their data sources, and then mapping this information to one or more of the corresponding Amazon Web Services that are in this list.

    For information about why it's no longer possible to specify services in scope manually, see I can't edit the services in scope for my assessment in the Troubleshooting section of the Audit Manager user guide.

    " + "documentation":"

    Gets a list of the Amazon Web Services services from which Audit Manager can collect evidence.

    Audit Manager defines which Amazon Web Services services are in scope for an assessment. Audit Manager infers this scope by examining the assessment’s controls and their data sources, and then mapping this information to one or more of the corresponding Amazon Web Services services that are in this list.

    For information about why it's no longer possible to specify services in scope manually, see I can't edit the services in scope for my assessment in the Troubleshooting section of the Audit Manager user guide.

    " }, "GetSettings":{ "name":"GetSettings", @@ -860,7 +862,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

    Edits an Audit Manager assessment.

    " }, @@ -1025,10 +1028,10 @@ "members":{ "serviceName":{ "shape":"AWSServiceName", - "documentation":"

    The name of the Amazon Web Service.

    " + "documentation":"

    The name of the Amazon Web Services service.

    " } }, - "documentation":"

    An Amazon Web Service such as Amazon S3 or CloudTrail.

    For an example of how to find an Amazon Web Service name and how to define it in your assessment scope, see the following:

    " + "documentation":"

    An Amazon Web Services service such as Amazon S3 or CloudTrail.

    For an example of how to find an Amazon Web Services service name and how to define it in your assessment scope, see the following:

    " }, "AWSServiceName":{ "type":"string", @@ -1134,7 +1137,10 @@ }, "description":{ "shape":"ControlDescription", - "documentation":"

    The description of the control.

    " + "documentation":"

    The description of the control.

    ", + "deprecated":true, + "deprecatedMessage":"This data type will be deprecated on May 19, 2025. To view the assessment control description, use GetControl.", + "deprecatedSince":"2025-05-19" }, "status":{ "shape":"ControlStatus", @@ -1244,7 +1250,7 @@ }, "dataSource":{ "shape":"String", - "documentation":"

    The Amazon Web Service that the evidence was collected from.

    " + "documentation":"

    The Amazon Web Services service that the evidence was collected from.

    " }, "author":{ "shape":"String", @@ -1268,7 +1274,7 @@ }, "evidenceByTypeConfigurationDataCount":{ "shape":"Integer", - "documentation":"

    The number of evidence that falls under the configuration data category. This evidence is collected from configuration snapshots of other Amazon Web Services such as Amazon EC2, Amazon S3, or IAM.

    " + "documentation":"

    The number of evidence that falls under the configuration data category. This evidence is collected from configuration snapshots of other Amazon Web Services services such as Amazon EC2, Amazon S3, or IAM.

    " }, "evidenceByTypeManualCount":{ "shape":"Integer", @@ -1717,8 +1723,7 @@ }, "AssociateAssessmentReportEvidenceFolderResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "AuditManagerArn":{ "type":"string", @@ -2810,8 +2815,7 @@ }, "DeleteAssessmentFrameworkResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteAssessmentFrameworkShareRequest":{ "type":"structure", @@ -2836,8 +2840,7 @@ }, "DeleteAssessmentFrameworkShareResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteAssessmentReportRequest":{ "type":"structure", @@ -2862,8 +2865,7 @@ }, "DeleteAssessmentReportResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteAssessmentRequest":{ "type":"structure", @@ -2879,8 +2881,7 @@ }, "DeleteAssessmentResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteControlRequest":{ "type":"structure", @@ -2896,8 +2897,7 @@ }, "DeleteControlResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteResources":{ "type":"string", @@ -2908,8 +2908,7 @@ }, "DeregisterAccountRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DeregisterAccountResponse":{ "type":"structure", @@ -2931,8 +2930,7 @@ }, "DeregisterOrganizationAdminAccountResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeregistrationPolicy":{ "type":"structure", @@ -2965,8 +2963,7 @@ }, "DisassociateAssessmentReportEvidenceFolderResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "EmailAddress":{ "type":"string", @@ -3008,7 +3005,7 @@ }, "eventSource":{ "shape":"AWSServiceName", - "documentation":"

    The Amazon Web Service that the evidence is collected from.

    " + "documentation":"

    The Amazon Web Services service that the evidence is collected from.

    " }, "eventName":{ "shape":"EventName", @@ -3268,8 +3265,7 @@ }, "GetAccountStatusRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetAccountStatusResponse":{ "type":"structure", @@ -3720,8 +3716,7 @@ }, "GetInsightsRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetInsightsResponse":{ "type":"structure", @@ -3734,8 +3729,7 @@ }, "GetOrganizationAdminAccountRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetOrganizationAdminAccountResponse":{ "type":"structure", @@ -3752,15 +3746,14 @@ }, "GetServicesInScopeRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetServicesInScopeResponse":{ "type":"structure", "members":{ "serviceMetadata":{ "shape":"ServiceMetadataList", - "documentation":"

    The metadata that's associated with the Amazon Web Service.

    " + "documentation":"

    The metadata that's associated with the Amazon Web Services service.

    " } } }, @@ -4583,12 +4576,12 @@ }, "awsServices":{ "shape":"AWSServices", - "documentation":"

    The Amazon Web Services services that are included in the scope of the assessment.

    This API parameter is no longer supported. If you use this parameter to specify one or more Amazon Web Services, Audit Manager ignores this input. Instead, the value for awsServices will show as empty.

    ", + "documentation":"

    The Amazon Web Services services that are included in the scope of the assessment.

    This API parameter is no longer supported. If you use this parameter to specify one or more Amazon Web Services services, Audit Manager ignores this input. Instead, the value for awsServices will show as empty.

    ", "deprecated":true, "deprecatedMessage":"You can't specify services in scope when creating/updating an assessment. If you use the parameter to specify one or more AWS services, Audit Manager ignores the input. Instead the value of the parameter will show as empty indicating that the services are defined and managed by Audit Manager." } }, - "documentation":"

    The wrapper that contains the Amazon Web Services accounts that are in scope for the assessment.

    You no longer need to specify which Amazon Web Services are in scope when you create or update an assessment. Audit Manager infers the services in scope by examining your assessment controls and their data sources, and then mapping this information to the relevant Amazon Web Services.

    If an underlying data source changes for your assessment, we automatically update the services scope as needed to reflect the correct Amazon Web Services. This ensures that your assessment collects accurate and comprehensive evidence about all of the relevant services in your AWS environment.

    ", + "documentation":"

    The wrapper that contains the Amazon Web Services accounts that are in scope for the assessment.

    You no longer need to specify which Amazon Web Services services are in scope when you create or update an assessment. Audit Manager infers the services in scope by examining your assessment controls and their data sources, and then mapping this information to the relevant Amazon Web Services services.

    If an underlying data source changes for your assessment, we automatically update the services scope as needed to reflect the correct Amazon Web Services services. This ensures that your assessment collects accurate and comprehensive evidence about all of the relevant services in your AWS environment.

    ", "sensitive":true }, "ServiceMetadata":{ @@ -4596,22 +4589,22 @@ "members":{ "name":{ "shape":"AWSServiceName", - "documentation":"

    The name of the Amazon Web Service.

    " + "documentation":"

    The name of the Amazon Web Services service.

    " }, "displayName":{ "shape":"NonEmptyString", - "documentation":"

    The display name of the Amazon Web Service.

    " + "documentation":"

    The display name of the Amazon Web Services service.

    " }, "description":{ "shape":"NonEmptyString", - "documentation":"

    The description of the Amazon Web Service.

    " + "documentation":"

    The description of the Amazon Web Services service.

    " }, "category":{ "shape":"NonEmptyString", - "documentation":"

    The category that the Amazon Web Service belongs to, such as compute, storage, or database.

    " + "documentation":"

    The category that the Amazon Web Services service belongs to, such as compute, storage, or database.

    " } }, - "documentation":"

    The metadata that's associated with the Amazon Web Service.

    " + "documentation":"

    The metadata that's associated with the Amazon Web Services service.

    " }, "ServiceMetadataList":{ "type":"list", @@ -4739,7 +4732,7 @@ }, "keywordValue":{ "shape":"KeywordValue", - "documentation":"

    The value of the keyword that's used when mapping a control data source. For example, this can be a CloudTrail event name, a rule name for Config, a Security Hub control, or the name of an Amazon Web Services API call.

    If you’re mapping a data source to a rule in Config, the keywordValue that you specify depends on the type of rule:

    • For managed rules, you can use the rule identifier as the keywordValue. You can find the rule identifier from the list of Config managed rules. For some rules, the rule identifier is different from the rule name. For example, the rule name restricted-ssh has the following rule identifier: INCOMING_SSH_DISABLED. Make sure to use the rule identifier, not the rule name.

      Keyword example for managed rules:

    • For custom rules, you form the keywordValue by adding the Custom_ prefix to the rule name. This prefix distinguishes the custom rule from a managed rule.

      Keyword example for custom rules:

      • Custom rule name: my-custom-config-rule

        keywordValue: Custom_my-custom-config-rule

    • For service-linked rules, you form the keywordValue by adding the Custom_ prefix to the rule name. In addition, you remove the suffix ID that appears at the end of the rule name.

      Keyword examples for service-linked rules:

      • Service-linked rule name: CustomRuleForAccount-conformance-pack-szsm1uv0w

        keywordValue: Custom_CustomRuleForAccount-conformance-pack

      • Service-linked rule name: OrgConfigRule-s3-bucket-versioning-enabled-dbgzf8ba

        keywordValue: Custom_OrgConfigRule-s3-bucket-versioning-enabled

    The keywordValue is case sensitive. If you enter a value incorrectly, Audit Manager might not recognize the data source mapping. As a result, you might not successfully collect evidence from that data source as intended.

    Keep in mind the following requirements, depending on the data source type that you're using.

    1. For Config:

      • For managed rules, make sure that the keywordValue is the rule identifier in ALL_CAPS_WITH_UNDERSCORES. For example, CLOUDWATCH_LOG_GROUP_ENCRYPTED. For accuracy, we recommend that you reference the list of supported Config managed rules.

      • For custom rules, make sure that the keywordValue has the Custom_ prefix followed by the custom rule name. The format of the custom rule name itself may vary. For accuracy, we recommend that you visit the Config console to verify your custom rule name.

    2. For Security Hub: The format varies for Security Hub control names. For accuracy, we recommend that you reference the list of supported Security Hub controls.

    3. For Amazon Web Services API calls: Make sure that the keywordValue is written as serviceprefix_ActionName. For example, iam_ListGroups. For accuracy, we recommend that you reference the list of supported API calls.

    4. For CloudTrail: Make sure that the keywordValue is written as serviceprefix_ActionName. For example, cloudtrail_StartLogging. For accuracy, we recommend that you review the Amazon Web Service prefix and action names in the Service Authorization Reference.

    " + "documentation":"

    The value of the keyword that's used when mapping a control data source. For example, this can be a CloudTrail event name, a rule name for Config, a Security Hub control, or the name of an Amazon Web Services API call.

    If you’re mapping a data source to a rule in Config, the keywordValue that you specify depends on the type of rule:

    • For managed rules, you can use the rule identifier as the keywordValue. You can find the rule identifier from the list of Config managed rules. For some rules, the rule identifier is different from the rule name. For example, the rule name restricted-ssh has the following rule identifier: INCOMING_SSH_DISABLED. Make sure to use the rule identifier, not the rule name.

      Keyword example for managed rules:

    • For custom rules, you form the keywordValue by adding the Custom_ prefix to the rule name. This prefix distinguishes the custom rule from a managed rule.

      Keyword example for custom rules:

      • Custom rule name: my-custom-config-rule

        keywordValue: Custom_my-custom-config-rule

    • For service-linked rules, you form the keywordValue by adding the Custom_ prefix to the rule name. In addition, you remove the suffix ID that appears at the end of the rule name.

      Keyword examples for service-linked rules:

      • Service-linked rule name: CustomRuleForAccount-conformance-pack-szsm1uv0w

        keywordValue: Custom_CustomRuleForAccount-conformance-pack

      • Service-linked rule name: OrgConfigRule-s3-bucket-versioning-enabled-dbgzf8ba

        keywordValue: Custom_OrgConfigRule-s3-bucket-versioning-enabled

    The keywordValue is case sensitive. If you enter a value incorrectly, Audit Manager might not recognize the data source mapping. As a result, you might not successfully collect evidence from that data source as intended.

    Keep in mind the following requirements, depending on the data source type that you're using.

    1. For Config:

      • For managed rules, make sure that the keywordValue is the rule identifier in ALL_CAPS_WITH_UNDERSCORES. For example, CLOUDWATCH_LOG_GROUP_ENCRYPTED. For accuracy, we recommend that you reference the list of supported Config managed rules.

      • For custom rules, make sure that the keywordValue has the Custom_ prefix followed by the custom rule name. The format of the custom rule name itself may vary. For accuracy, we recommend that you visit the Config console to verify your custom rule name.

    2. For Security Hub: The format varies for Security Hub control names. For accuracy, we recommend that you reference the list of supported Security Hub controls.

    3. For Amazon Web Services API calls: Make sure that the keywordValue is written as serviceprefix_ActionName. For example, iam_ListGroups. For accuracy, we recommend that you reference the list of supported API calls.

    4. For CloudTrail: Make sure that the keywordValue is written as serviceprefix_ActionName. For example, cloudtrail_StartLogging. For accuracy, we recommend that you review the Amazon Web Services service prefix and action names in the Service Authorization Reference.

    " } }, "documentation":"

    A keyword that relates to the control data source.

    For manual evidence, this keyword indicates if the manual evidence is a file or text.

    For automated evidence, this keyword identifies a specific CloudTrail event, Config rule, Security Hub control, or Amazon Web Services API name.

    To learn more about the supported keywords that you can use when mapping a control data source, see the following pages in the Audit Manager User Guide:

    " @@ -4851,8 +4844,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -4938,8 +4930,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateAssessmentControlRequest":{ "type":"structure", diff --git a/services/autoscaling/pom.xml b/services/autoscaling/pom.xml index 0865bf1ea826..61591d2752e0 100644 --- a/services/autoscaling/pom.xml +++ b/services/autoscaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT autoscaling AWS Java SDK :: Services :: Auto Scaling diff --git a/services/autoscaling/src/main/resources/codegen-resources/service-2.json b/services/autoscaling/src/main/resources/codegen-resources/service-2.json index 6929ddade54a..8820a1c6af40 100644 --- a/services/autoscaling/src/main/resources/codegen-resources/service-2.json +++ b/services/autoscaling/src/main/resources/codegen-resources/service-2.json @@ -1304,8 +1304,7 @@ }, "AttachLoadBalancerTargetGroupsResultType":{ "type":"structure", - "members":{ - } + "members":{} }, "AttachLoadBalancerTargetGroupsType":{ "type":"structure", @@ -1326,8 +1325,7 @@ }, "AttachLoadBalancersResultType":{ "type":"structure", - "members":{ - } + "members":{} }, "AttachLoadBalancersType":{ "type":"structure", @@ -1348,8 +1346,7 @@ }, "AttachTrafficSourcesResultType":{ "type":"structure", - "members":{ - } + "members":{} }, "AttachTrafficSourcesType":{ "type":"structure", @@ -1555,6 +1552,10 @@ "shape":"AutoScalingGroupNames", "documentation":"

    The names of the Auto Scaling groups. By default, you can only specify up to 50 names. You can optionally increase this limit using the MaxRecords property.

    If you omit this property, all Auto Scaling groups are described.

    " }, + "IncludeInstances":{ + "shape":"IncludeInstances", + "documentation":"

    Specifies whether to include information about Amazon EC2 instances in the response. When set to true (default), the response includes instance details.

    " + }, "NextToken":{ "shape":"XmlString", "documentation":"

    The token for the next set of items to return. (You received this token from a previous call.)

    " @@ -1817,7 +1818,7 @@ }, "DeviceName":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The device name assigned to the volume (for example, /dev/sdh or xvdh). For more information, see Device naming on Linux instances in the Amazon EC2 User Guide for Linux Instances.

    To define a block device mapping, set the device name and exactly one of the following properties: Ebs, NoDevice, or VirtualName.

    " + "documentation":"

    The device name assigned to the volume (for example, /dev/sdh or xvdh). For more information, see Device naming on Linux instances in the Amazon EC2 User Guide.

    To define a block device mapping, set the device name and exactly one of the following properties: Ebs, NoDevice, or VirtualName.

    " }, "Ebs":{ "shape":"Ebs", @@ -1947,8 +1948,7 @@ }, "CompleteLifecycleActionAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "CompleteLifecycleActionType":{ "type":"structure", @@ -1987,7 +1987,8 @@ "enum":[ "intel", "amd", - "amazon-web-services" + "amazon-web-services", + "apple" ] }, "CpuManufacturers":{ @@ -2071,7 +2072,7 @@ }, "PlacementGroup":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The name of the placement group into which to launch your instances. For more information, see Placement groups in the Amazon EC2 User Guide for Linux Instances.

    A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group.

    " + "documentation":"

    The name of the placement group into which to launch your instances. For more information, see Placement groups in the Amazon EC2 User Guide.

    A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group.

    " }, "VPCZoneIdentifier":{ "shape":"XmlStringMaxLen5000", @@ -2153,11 +2154,11 @@ }, "ImageId":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see Find a Linux AMI in the Amazon EC2 User Guide for Linux Instances.

    If you specify InstanceId, an ImageId is not required.

    " + "documentation":"

    The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see Find a Linux AMI in the Amazon EC2 User Guide.

    If you specify InstanceId, an ImageId is not required.

    " }, "KeyName":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The name of the key pair. For more information, see Amazon EC2 key pairs and Amazon EC2 instances in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The name of the key pair. For more information, see Amazon EC2 key pairs and Amazon EC2 instances in the Amazon EC2 User Guide.

    " }, "SecurityGroups":{ "shape":"SecurityGroups", @@ -2181,19 +2182,19 @@ }, "InstanceType":{ "shape":"XmlStringMaxLen255", - "documentation":"

    Specifies the instance type of the EC2 instance. For information about available instance types, see Available instance types in the Amazon EC2 User Guide for Linux Instances.

    If you specify InstanceId, an InstanceType is not required.

    " + "documentation":"

    Specifies the instance type of the EC2 instance. For information about available instance types, see Available instance types in the Amazon EC2 User Guide.

    If you specify InstanceId, an InstanceType is not required.

    " }, "KernelId":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The ID of the kernel associated with the AMI.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The ID of the kernel associated with the AMI.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide.

    " }, "RamdiskId":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The ID of the RAM disk to select.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The ID of the RAM disk to select.

    We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide.

    " }, "BlockDeviceMappings":{ "shape":"BlockDeviceMappings", - "documentation":"

    The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block device mappings in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block device mappings in the Amazon EC2 User Guide.

    " }, "InstanceMonitoring":{ "shape":"InstanceMonitoring", @@ -2209,7 +2210,7 @@ }, "EbsOptimized":{ "shape":"EbsOptimized", - "documentation":"

    Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide for Linux Instances.

    The default value is false.

    " + "documentation":"

    Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide.

    The default value is false.

    " }, "AssociatePublicIpAddress":{ "shape":"AssociatePublicIpAddress", @@ -2286,8 +2287,7 @@ }, "DeleteLifecycleHookAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLifecycleHookType":{ "type":"structure", @@ -2366,8 +2366,7 @@ }, "DeleteWarmPoolAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteWarmPoolType":{ "type":"structure", @@ -2833,8 +2832,7 @@ }, "DetachLoadBalancerTargetGroupsResultType":{ "type":"structure", - "members":{ - } + "members":{} }, "DetachLoadBalancerTargetGroupsType":{ "type":"structure", @@ -2855,8 +2853,7 @@ }, "DetachLoadBalancersResultType":{ "type":"structure", - "members":{ - } + "members":{} }, "DetachLoadBalancersType":{ "type":"structure", @@ -2877,8 +2874,7 @@ }, "DetachTrafficSourcesResultType":{ "type":"structure", - "members":{ - } + "members":{} }, "DetachTrafficSourcesType":{ "type":"structure", @@ -3180,6 +3176,7 @@ ] }, "IncludeDeletedGroups":{"type":"boolean"}, + "IncludeInstances":{"type":"boolean"}, "Instance":{ "type":"structure", "required":[ @@ -3455,7 +3452,7 @@ }, "CpuManufacturers":{ "shape":"CpuManufacturers", - "documentation":"

    Lists which specific CPU manufacturers to include.

    • For instance types with Intel CPUs, specify intel.

    • For instance types with AMD CPUs, specify amd.

    • For instance types with Amazon Web Services CPUs, specify amazon-web-services.

    Don't confuse the CPU hardware manufacturer with the CPU hardware architecture. Instances will be launched with a compatible CPU architecture based on the Amazon Machine Image (AMI) that you specify in your launch template.

    Default: Any manufacturer

    " + "documentation":"

    Lists which specific CPU manufacturers to include.

    • For instance types with Intel CPUs, specify intel.

    • For instance types with AMD CPUs, specify amd.

    • For instance types with Amazon Web Services CPUs, specify amazon-web-services.

    • For instance types with Apple CPUs, specify apple.

    Don't confuse the CPU hardware manufacturer with the CPU hardware architecture. Instances will be launched with a compatible CPU architecture based on the Amazon Machine Image (AMI) that you specify in your launch template.

    Default: Any manufacturer

    " }, "MemoryGiBPerVCpu":{ "shape":"MemoryGiBPerVCpuRequest", @@ -3467,7 +3464,7 @@ }, "InstanceGenerations":{ "shape":"InstanceGenerations", - "documentation":"

    Indicates whether current or previous generation instance types are included.

    • For current generation instance types, specify current. The current generation includes EC2 instance types currently recommended for use. This typically includes the latest two to three generations in each instance family. For more information, see Instance types in the Amazon EC2 User Guide for Linux Instances.

    • For previous generation instance types, specify previous.

    Default: Any current or previous generation

    " + "documentation":"

    Indicates whether current or previous generation instance types are included.

    • For current generation instance types, specify current. The current generation includes EC2 instance types currently recommended for use. This typically includes the latest two to three generations in each instance family. For more information, see Instance types in the Amazon EC2 User Guide.

    • For previous generation instance types, specify previous.

    Default: Any current or previous generation

    " }, "SpotMaxPricePercentageOverLowestPrice":{ "shape":"NullablePositiveInteger", @@ -3487,7 +3484,7 @@ }, "BurstablePerformance":{ "shape":"BurstablePerformance", - "documentation":"

    Indicates whether burstable performance instance types are included, excluded, or required. For more information, see Burstable performance instances in the Amazon EC2 User Guide for Linux Instances.

    Default: excluded

    " + "documentation":"

    Indicates whether burstable performance instance types are included, excluded, or required. For more information, see Burstable performance instances in the Amazon EC2 User Guide.

    Default: excluded

    " }, "RequireHibernateSupport":{ "shape":"NullableBoolean", @@ -3499,7 +3496,7 @@ }, "LocalStorage":{ "shape":"LocalStorage", - "documentation":"

    Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, see Amazon EC2 instance store in the Amazon EC2 User Guide for Linux Instances.

    Default: included

    " + "documentation":"

    Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, see Amazon EC2 instance store in the Amazon EC2 User Guide.

    Default: included

    " }, "LocalStorageTypes":{ "shape":"LocalStorageTypes", @@ -3511,7 +3508,7 @@ }, "BaselineEbsBandwidthMbps":{ "shape":"BaselineEbsBandwidthMbpsRequest", - "documentation":"

    The minimum and maximum baseline bandwidth performance for an instance type, in Mbps. For more information, see Amazon EBS–optimized instances in the Amazon EC2 User Guide for Linux Instances.

    Default: No minimum or maximum limits

    " + "documentation":"

    The minimum and maximum baseline bandwidth performance for an instance type, in Mbps. For more information, see Amazon EBS–optimized instances in the Amazon EC2 User Guide.

    Default: No minimum or maximum limits

    " }, "AcceleratorTypes":{ "shape":"AcceleratorTypes", @@ -3546,7 +3543,7 @@ "documentation":"

    The baseline performance factors for the instance requirements.

    " } }, - "documentation":"

    The attributes for the instance types for a mixed instances policy. Amazon EC2 Auto Scaling uses your specified requirements to identify instance types. Then, it uses your On-Demand and Spot allocation strategies to launch instances from these instance types.

    When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.

    To limit the list of instance types from which Amazon EC2 Auto Scaling can identify matching instance types, you can use one of the following parameters, but not both in the same request:

    • AllowedInstanceTypes - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.

    • ExcludedInstanceTypes - The instance types to exclude from the list, even if they match your specified attributes.

    You must specify VCpuCount and MemoryMiB. All other attributes are optional. Any unspecified optional attribute is set to its default.

    For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide. For help determining which instance types match your attributes before you apply them to your Auto Scaling group, see Preview instance types with specified attributes in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The attributes for the instance types for a mixed instances policy. Amazon EC2 Auto Scaling uses your specified requirements to identify instance types. Then, it uses your On-Demand and Spot allocation strategies to launch instances from these instance types.

    When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.

    To limit the list of instance types from which Amazon EC2 Auto Scaling can identify matching instance types, you can use one of the following parameters, but not both in the same request:

    • AllowedInstanceTypes - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.

    • ExcludedInstanceTypes - The instance types to exclude from the list, even if they match your specified attributes.

    You must specify VCpuCount and MemoryMiB. All other attributes are optional. Any unspecified optional attribute is set to its default.

    For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide. For help determining which instance types match your attributes before you apply them to your Auto Scaling group, see Preview instance types with specified attributes in the Amazon EC2 User Guide.

    " }, "InstanceReusePolicy":{ "type":"structure", @@ -3664,11 +3661,11 @@ }, "ImageId":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. For more information, see Find a Linux AMI in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. For more information, see Find a Linux AMI in the Amazon EC2 User Guide.

    " }, "KeyName":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The name of the key pair.

    For more information, see Amazon EC2 key pairs and Amazon EC2 instances in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The name of the key pair.

    For more information, see Amazon EC2 key pairs and Amazon EC2 instances in the Amazon EC2 User Guide.

    " }, "SecurityGroups":{ "shape":"SecurityGroups", @@ -3684,11 +3681,11 @@ }, "UserData":{ "shape":"XmlStringUserData", - "documentation":"

    The user data to make available to the launched EC2 instances. For more information, see Instance metadata and user data (Linux) and Instance metadata and user data (Windows). If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB.

    " + "documentation":"

    The user data to make available to the launched EC2 instances. For more information, see Instance metadata and user data in the Amazon EC2 User Guide. If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB.

    " }, "InstanceType":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The instance type for the instances. For information about available instance types, see Available instance types in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The instance type for the instances. For information about available instance types, see Available instance types in the Amazon EC2 User Guide.

    " }, "KernelId":{ "shape":"XmlStringMaxLen255", @@ -3700,7 +3697,7 @@ }, "BlockDeviceMappings":{ "shape":"BlockDeviceMappings", - "documentation":"

    The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block device mappings in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block device mappings in the Amazon EC2 User Guide.

    " }, "InstanceMonitoring":{ "shape":"InstanceMonitoring", @@ -3720,7 +3717,7 @@ }, "EbsOptimized":{ "shape":"EbsOptimized", - "documentation":"

    Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide.

    " }, "AssociatePublicIpAddress":{ "shape":"AssociatePublicIpAddress", @@ -3811,7 +3808,7 @@ "members":{ "InstanceType":{ "shape":"XmlStringMaxLen255", - "documentation":"

    The instance type, such as m3.xlarge. You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see Instance types in the Amazon EC2 User Guide for Linux Instances.

    You can specify up to 40 instance types per Auto Scaling group.

    " + "documentation":"

    The instance type, such as m3.xlarge. You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see Instance types in the Amazon EC2 User Guide.

    You can specify up to 40 instance types per Auto Scaling group.

    " }, "WeightedCapacity":{ "shape":"XmlStringMaxLen32", @@ -4308,7 +4305,7 @@ "documentation":"

    The maximum amount of network bandwidth, in gigabits per second (Gbps).

    " } }, - "documentation":"

    Specifies the minimum and maximum for the NetworkBandwidthGbps object when you specify InstanceRequirements for an Auto Scaling group.

    Setting the minimum bandwidth does not guarantee that your instance will achieve the minimum bandwidth. Amazon EC2 will identify instance types that support the specified minimum bandwidth, but the actual bandwidth of your instance might go below the specified minimum at times. For more information, see Available instance bandwidth in the Amazon EC2 User Guide for Linux Instances.

    " + "documentation":"

    Specifies the minimum and maximum for the NetworkBandwidthGbps object when you specify InstanceRequirements for an Auto Scaling group.

    Setting the minimum bandwidth does not guarantee that your instance will achieve the minimum bandwidth. Amazon EC2 will identify instance types that support the specified minimum bandwidth, but the actual bandwidth of your instance might go below the specified minimum at times. For more information, see Available instance bandwidth in the Amazon EC2 User Guide.

    " }, "NetworkInterfaceCountRequest":{ "type":"structure", @@ -4678,8 +4675,7 @@ "ProtectedFromScaleIn":{"type":"boolean"}, "PutLifecycleHookAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "PutLifecycleHookType":{ "type":"structure", @@ -4860,8 +4856,7 @@ }, "PutWarmPoolAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "PutWarmPoolType":{ "type":"structure", @@ -4891,8 +4886,7 @@ }, "RecordLifecycleActionHeartbeatAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "RecordLifecycleActionHeartbeatType":{ "type":"structure", @@ -5368,8 +5362,7 @@ }, "SetInstanceProtectionAnswer":{ "type":"structure", - "members":{ - } + "members":{} }, "SetInstanceProtectionQuery":{ "type":"structure", @@ -5791,7 +5784,7 @@ }, "PlacementGroup":{ "shape":"UpdatePlacementGroupParam", - "documentation":"

    The name of an existing placement group into which to launch your instances. To remove the placement group setting, pass an empty string for placement-group. For more information about placement groups, see Placement groups in the Amazon EC2 User Guide for Linux Instances.

    A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group.

    " + "documentation":"

    The name of an existing placement group into which to launch your instances. To remove the placement group setting, pass an empty string for placement-group. For more information about placement groups, see Placement groups in the Amazon EC2 User Guide.

    A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group.

    " }, "VPCZoneIdentifier":{ "shape":"XmlStringMaxLen5000", @@ -5995,5 +5988,5 @@ }, "ZonalShiftEnabled":{"type":"boolean"} }, - "documentation":"Amazon EC2 Auto Scaling

    Amazon EC2 Auto Scaling is designed to automatically launch and terminate EC2 instances based on user-defined scaling policies, scheduled actions, and health checks.

    For more information, see the Amazon EC2 Auto Scaling User Guide and the Amazon EC2 Auto Scaling API Reference.

    " + "documentation":"Amazon EC2 Auto Scaling

    The DescribeAutoScalingGroups API operation might be throttled when retrieving details for an Auto Scaling group that contains many instances. By default, this operation returns details for all instances in the group. To help prevent throttling, you can set the IncludeInstances parameter to false to exclude instance details from the response.

    Amazon EC2 Auto Scaling is designed to automatically launch and terminate EC2 instances based on user-defined scaling policies, scheduled actions, and health checks.

    For more information, see the Amazon EC2 Auto Scaling User Guide and the Amazon EC2 Auto Scaling API Reference.

    " } diff --git a/services/autoscalingplans/pom.xml b/services/autoscalingplans/pom.xml index d3a718e94435..0613afac3023 100644 --- a/services/autoscalingplans/pom.xml +++ b/services/autoscalingplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT autoscalingplans AWS Java SDK :: Services :: Auto Scaling Plans diff --git a/services/autoscalingplans/src/main/resources/codegen-resources/customization.config b/services/autoscalingplans/src/main/resources/codegen-resources/customization.config index d11ae805d48b..eada5dbbe962 100644 --- a/services/autoscalingplans/src/main/resources/codegen-resources/customization.config +++ b/services/autoscalingplans/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "describeScalingPlans" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/b2bi/pom.xml b/services/b2bi/pom.xml index c9de6b0cfe52..cf468ee949df 100644 --- a/services/b2bi/pom.xml +++ b/services/b2bi/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT b2bi AWS Java SDK :: Services :: B2 Bi diff --git a/services/b2bi/src/main/resources/codegen-resources/customization.config b/services/b2bi/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/b2bi/src/main/resources/codegen-resources/customization.config +++ b/services/b2bi/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/b2bi/src/main/resources/codegen-resources/service-2.json b/services/b2bi/src/main/resources/codegen-resources/service-2.json index 3a8386c71f1f..1cb33248e688 100644 --- a/services/b2bi/src/main/resources/codegen-resources/service-2.json +++ b/services/b2bi/src/main/resources/codegen-resources/service-2.json @@ -25,8 +25,8 @@ "input":{"shape":"CreateCapabilityRequest"}, "output":{"shape":"CreateCapabilityResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, @@ -45,11 +45,11 @@ "input":{"shape":"CreatePartnershipRequest"}, "output":{"shape":"CreatePartnershipResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], @@ -65,11 +65,11 @@ "input":{"shape":"CreateProfileRequest"}, "output":{"shape":"CreateProfileResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], @@ -101,11 +101,11 @@ "input":{"shape":"CreateTransformerRequest"}, "output":{"shape":"CreateTransformerResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], @@ -120,8 +120,8 @@ }, "input":{"shape":"DeleteCapabilityRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, @@ -138,11 +138,11 @@ }, "input":{"shape":"DeletePartnershipRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], "documentation":"

    Deletes the specified partnership. A partnership represents the connection between you and your trading partner. It ties together a profile and one or more trading capabilities.

    ", @@ -159,8 +159,8 @@ {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], "documentation":"

    Deletes the specified profile. A profile is the mechanism used to create the concept of a private network.

    ", @@ -174,11 +174,11 @@ }, "input":{"shape":"DeleteTransformerRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], "documentation":"

    Deletes the specified transformer. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file.

    ", @@ -198,7 +198,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Takes sample input and output documents and uses Amazon Bedrock to generate a mapping automatically. Depending on the accuracy and other factors, you can then edit the mapping for your needs.

    Before you can use the AI-assisted feature for Amazon Web Services B2B Data Interchange you must enable models in Amazon Bedrock. For details, see AI-assisted template mapping prerequisites in the Amazon Web Services B2B Data Interchange User guide.

    ", + "documentation":"

    Takes sample input and output documents and uses Amazon Bedrock to generate a mapping automatically. Depending on the accuracy and other factors, you can then edit the mapping for your needs.

    Before you can use the AI-assisted feature for Amazon Web Services B2B Data Interchange you must enable models in Amazon Bedrock. For details, see AI-assisted template mapping prerequisites in the Amazon Web Services B2B Data Interchange User guide.

    To generate a mapping, perform the following steps:

    1. Start with an X12 EDI document to use as the input.

    2. Call TestMapping using your EDI document.

    3. Use the output from the TestMapping operation as either input or output for your GenerateMapping call, along with your sample file.

    ", "idempotent":true }, "GetCapability":{ @@ -229,8 +229,8 @@ "errors":[ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], "documentation":"

    Retrieves the details for a partnership, based on the partner and profile IDs specified. A partnership represents the connection between you and your trading partner. It ties together a profile and one or more trading capabilities.

    " @@ -246,8 +246,8 @@ "errors":[ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], "documentation":"

    Retrieves the details for the profile specified by the profile ID. A profile is the mechanism used to create the concept of a private network.

    " @@ -263,8 +263,8 @@ "errors":[ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], "documentation":"

    Retrieves the details for the transformer specified by the transformer ID. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file.

    " @@ -284,7 +284,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Returns the details of the transformer run, based on the Transformer job ID.

    " + "documentation":"

    Returns the details of the transformer run, based on the Transformer job ID.

    If 30 days have elapsed since your transformer job was started, the system deletes it. So, if you run GetTransformerJob and supply a transformerId and transformerJobId for a job that was started more than 30 days previously, you receive a 404 response.

    " }, "ListCapabilities":{ "name":"ListCapabilities", @@ -313,8 +313,8 @@ "errors":[ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], "documentation":"

    Lists the partnerships associated with your Amazon Web Services account for your current or specified region. A partnership represents the connection between you and your trading partner. It ties together a profile and one or more trading capabilities.

    " @@ -375,14 +375,14 @@ "input":{"shape":"StartTransformerJobRequest"}, "output":{"shape":"StartTransformerJobResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

    Runs a job, using a transformer, to parse input EDI (electronic data interchange) file into the output structures used by Amazon Web Services B2B Data Interchange.

    If you only want to transform EDI (electronic data interchange) documents, you don't need to create profiles, partnerships or capabilities. Just create and configure a transformer, and then run the StartTransformerJob API to process your files.

    ", + "documentation":"

    Runs a job, using a transformer, to parse input EDI (electronic data interchange) file into the output structures used by Amazon Web Services B2B Data Interchange.

    If you only want to transform EDI (electronic data interchange) documents, you don't need to create profiles, partnerships or capabilities. Just create and configure a transformer, and then run the StartTransformerJob API to process your files.

    The system stores transformer jobs for 30 days. During that period, you can run GetTransformerJob and supply its transformerId and transformerJobId to return details of the job.

    ", "idempotent":true }, "TagResource":{ @@ -394,8 +394,8 @@ "input":{"shape":"TagResourceRequest"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], "documentation":"

    Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are capability, partnership, profile, transformers and other entities.

    There is no response returned from this call.

    " @@ -478,8 +478,8 @@ "input":{"shape":"UpdateCapabilityRequest"}, "output":{"shape":"UpdateCapabilityResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, @@ -498,11 +498,11 @@ "input":{"shape":"UpdatePartnershipRequest"}, "output":{"shape":"UpdatePartnershipResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], @@ -518,11 +518,11 @@ "input":{"shape":"UpdateProfileRequest"}, "output":{"shape":"UpdateProfileResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], @@ -538,11 +538,11 @@ "input":{"shape":"UpdateTransformerRequest"}, "output":{"shape":"UpdateTransformerResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ], @@ -560,6 +560,16 @@ "documentation":"

    You do not have sufficient access to perform this action.

    ", "exception":true }, + "AdvancedOptions":{ + "type":"structure", + "members":{ + "x12":{ + "shape":"X12AdvancedOptions", + "documentation":"

    A structure that contains X12-specific advanced options, such as split options for processing X12 EDI files.

    " + } + }, + "documentation":"

    A structure that contains advanced options for EDI processing. Currently, only X12 advanced options are supported.

    " + }, "AmazonResourceName":{ "type":"string", "max":1011, @@ -614,6 +624,10 @@ "outboundEdi":{ "shape":"OutboundEdiOptions", "documentation":"

    A structure that contains the outbound EDI options.

    " + }, + "inboundEdi":{ + "shape":"InboundEdiOptions", + "documentation":"

    A structure that contains the inbound EDI options for the capability.

    " } }, "documentation":"

    Contains the details for an Outbound EDI capability.

    " @@ -1285,11 +1299,11 @@ "members":{ "inputFileContent":{ "shape":"GenerateMappingInputFileContent", - "documentation":"

    Provide the contents of a sample X12 EDI file (for inbound EDI) or JSON/XML file (for outbound EDI) to use as a starting point for the mapping.

    " + "documentation":"

    Provide the contents of a sample X12 EDI file, either in JSON or XML format, to use as a starting point for the mapping.

    " }, "outputFileContent":{ "shape":"GenerateMappingOutputFileContent", - "documentation":"

    Provide the contents of a sample X12 EDI file (for outbound EDI) or JSON/XML file (for inbound EDI) to use as a target for the mapping.

    " + "documentation":"

    Provide the contents of a sample X12 EDI file, either in JSON or XML format, to use as a target for the mapping.

    " }, "mappingType":{ "shape":"MappingType", @@ -1618,6 +1632,16 @@ } } }, + "InboundEdiOptions":{ + "type":"structure", + "members":{ + "x12":{ + "shape":"X12InboundEdiOptions", + "documentation":"

    A structure that contains X12-specific options for processing inbound X12 EDI files.

    " + } + }, + "documentation":"

    Contains options for processing inbound EDI files. These options allow for customizing how incoming EDI documents are processed.

    " + }, "InputConversion":{ "type":"structure", "required":["fromFormat"], @@ -1629,6 +1653,10 @@ "formatOptions":{ "shape":"FormatOptions", "documentation":"

    A structure that contains the formatting options for an inbound transformer.

    " + }, + "advancedOptions":{ + "shape":"AdvancedOptions", + "documentation":"

    Specifies advanced options for the input conversion process. These options provide additional control over how EDI files are processed during transformation.

    " } }, "documentation":"

    Contains the input formatting options for an inbound transformer (takes an X12-formatted EDI document as input and converts it to JSON or XML.

    " @@ -1678,6 +1706,19 @@ "type":"list", "member":{"shape":"SampleDocumentKeys"} }, + "LineLength":{ + "type":"integer", + "box":true, + "min":1 + }, + "LineTerminator":{ + "type":"string", + "enum":[ + "CRLF", + "LF", + "CR" + ] + }, "ListCapabilitiesRequest":{ "type":"structure", "members":{ @@ -1904,6 +1945,10 @@ "max":2048, "min":1 }, + "ParsedSplitFileContentsList":{ + "type":"list", + "member":{"shape":"String"} + }, "PartnerName":{ "type":"string", "max":254, @@ -2162,6 +2207,24 @@ } } }, + "StartingFunctionalGroupControlNumber":{ + "type":"integer", + "box":true, + "max":999999999, + "min":1 + }, + "StartingInterchangeControlNumber":{ + "type":"integer", + "box":true, + "max":999999999, + "min":1 + }, + "StartingTransactionSetControlNumber":{ + "type":"integer", + "box":true, + "max":999999999, + "min":1 + }, "String":{"type":"string"}, "Tag":{ "type":"structure", @@ -2315,6 +2378,10 @@ "ediType":{ "shape":"EdiType", "documentation":"

    Specifies the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

    " + }, + "advancedOptions":{ + "shape":"AdvancedOptions", + "documentation":"

    Specifies advanced options for parsing the input EDI file. These options allow for more granular control over the parsing process, including split options for X12 files.

    " } } }, @@ -2325,6 +2392,10 @@ "parsedFileContent":{ "shape":"String", "documentation":"

    Returns the contents of the input file being tested, parsed according to the specified EDI (electronic data interchange) type.

    " + }, + "parsedSplitFileContents":{ + "shape":"ParsedSplitFileContentsList", + "documentation":"

    Returns an array of parsed file contents when the input file is split according to the specified split options. Each element in the array represents a separate split file's parsed content.

    " } } }, @@ -2849,12 +2920,67 @@ "type":"list", "member":{"shape":"String"} }, + "WrapFormat":{ + "type":"string", + "enum":[ + "SEGMENT", + "ONE_LINE", + "LINE_LENGTH" + ] + }, + "WrapOptions":{ + "type":"structure", + "required":["wrapBy"], + "members":{ + "wrapBy":{ + "shape":"WrapFormat", + "documentation":"

    Specifies the method used for wrapping lines in the EDI output. Valid values:

    • SEGMENT: Wraps by segment.

    • ONE_LINE: Indicates that the entire content is on a single line.

      When you specify ONE_LINE, do not provide either the line length nor the line terminator value.

    • LINE_LENGTH: Wraps by character count, as specified by lineLength value.

    " + }, + "lineTerminator":{ + "shape":"LineTerminator", + "documentation":"

    Specifies the character sequence used to terminate lines when wrapping. Valid values:

    • CRLF: carriage return and line feed

    • LF: line feed)

    • CR: carriage return

    " + }, + "lineLength":{ + "shape":"LineLength", + "documentation":"

    Specifies the maximum length of a line before wrapping occurs. This value is used when wrapBy is set to LINE_LENGTH.

    " + } + }, + "documentation":"

    Contains options for wrapping (line folding) in X12 EDI files. Wrapping controls how long lines are handled in the EDI output.

    " + }, + "X12AcknowledgmentOptions":{ + "type":"structure", + "required":[ + "functionalAcknowledgment", + "technicalAcknowledgment" + ], + "members":{ + "functionalAcknowledgment":{ + "shape":"X12FunctionalAcknowledgment", + "documentation":"

    Specifies whether functional acknowledgments (997/999) should be generated for incoming X12 transactions. Valid values are DO_NOT_GENERATE, GENERATE_ALL_SEGMENTS and GENERATE_WITHOUT_TRANSACTION_SET_RESPONSE_LOOP.

    If you choose GENERATE_WITHOUT_TRANSACTION_SET_RESPONSE_LOOP, Amazon Web Services B2B Data Interchange skips the AK2_Loop when generating an acknowledgment document.

    " + }, + "technicalAcknowledgment":{ + "shape":"X12TechnicalAcknowledgment", + "documentation":"

    Specifies whether technical acknowledgments (TA1) should be generated for incoming X12 interchanges. Valid values are DO_NOT_GENERATE and GENERATE_ALL_SEGMENTS and.

    " + } + }, + "documentation":"

    Contains options for configuring X12 acknowledgments. These options control how functional and technical acknowledgments are handled.

    " + }, "X12AcknowledgmentRequestedCode":{ "type":"string", "max":1, "min":1, "pattern":"[a-zA-Z0-9]*" }, + "X12AdvancedOptions":{ + "type":"structure", + "members":{ + "splitOptions":{ + "shape":"X12SplitOptions", + "documentation":"

    Specifies options for splitting X12 EDI files. These options control how large X12 files are divided into smaller, more manageable units.

    " + } + }, + "documentation":"

    Contains advanced options specific to X12 EDI processing, such as splitting large X12 files into smaller units.

    " + }, "X12ApplicationReceiverCode":{ "type":"string", "max":15, @@ -2873,6 +2999,24 @@ "min":1, "pattern":"[!&'()*+,\\-./:;?=%@\\[\\]_{}|<>~^`\"]" }, + "X12ControlNumbers":{ + "type":"structure", + "members":{ + "startingInterchangeControlNumber":{ + "shape":"StartingInterchangeControlNumber", + "documentation":"

    Specifies the starting interchange control number (ISA13) to use for X12 EDI generation. This number is incremented for each new interchange. For the ISA (interchange) envelope, Amazon Web Services B2B Data Interchange generates an interchange control number that is unique for the ISA05 and ISA06 (sender) & ISA07 and ISA08 (receiver) combination.

    " + }, + "startingFunctionalGroupControlNumber":{ + "shape":"StartingFunctionalGroupControlNumber", + "documentation":"

    Specifies the starting functional group control number (GS06) to use for X12 EDI generation. This number is incremented for each new functional group. For the GS (functional group) envelope, Amazon Web Services B2B Data Interchange generates a functional group control number that is unique to the sender ID, receiver ID, and functional identifier code combination.

    " + }, + "startingTransactionSetControlNumber":{ + "shape":"StartingTransactionSetControlNumber", + "documentation":"

    Specifies the starting transaction set control number (ST02) to use for X12 EDI generation. This number is incremented for each new transaction set.

    " + } + }, + "documentation":"

    Contains configuration for X12 control numbers used in X12 EDI generation. Control numbers are used to uniquely identify interchanges, functional groups, and transaction sets.

    " + }, "X12DataElementSeparator":{ "type":"string", "max":1, @@ -2917,10 +3061,19 @@ "common":{ "shape":"X12OutboundEdiHeaders", "documentation":"

    A container for the X12 outbound EDI headers.

    " - } + }, + "wrapOptions":{"shape":"WrapOptions"} }, "documentation":"

    A wrapper structure for an X12 definition object.

    the X12 envelope ensures the integrity of the data and the efficiency of the information exchange. The X12 message structure has hierarchical levels. From highest to the lowest, they are:

    • Interchange Envelope

    • Functional Group

    • Transaction Set

    " }, + "X12FunctionalAcknowledgment":{ + "type":"string", + "enum":[ + "DO_NOT_GENERATE", + "GENERATE_ALL_SEGMENTS", + "GENERATE_WITHOUT_TRANSACTION_SET_RESPONSE_LOOP" + ] + }, "X12FunctionalGroupHeaders":{ "type":"structure", "members":{ @@ -2939,12 +3092,31 @@ }, "documentation":"

    Part of the X12 message structure. These are the functional group headers for the X12 EDI object.

    " }, + "X12GS05TimeFormat":{ + "type":"string", + "documentation":"

    Specifies the time format in the GS05 element (time) of the functional group header. The following formats use 24-hour clock time:

    • HHMM - Hours and minutes

    • HHMMSS - Hours, minutes, and seconds

    • HHMMSSDD - Hours, minutes, seconds, and decimal seconds

    Where:

    • HH - Hours (00-23)

    • MM - Minutes (00-59)

    • SS - Seconds (00-59)

    • DD - Hundredths of seconds (00-99)

    ", + "enum":[ + "HHMM", + "HHMMSS", + "HHMMSSDD" + ] + }, "X12IdQualifier":{ "type":"string", "max":2, "min":2, "pattern":"[a-zA-Z0-9]*" }, + "X12InboundEdiOptions":{ + "type":"structure", + "members":{ + "acknowledgmentOptions":{ + "shape":"X12AcknowledgmentOptions", + "documentation":"

    Specifies acknowledgment options for inbound X12 EDI files. These options control how functional and technical acknowledgments are handled.

    " + } + }, + "documentation":"

    Contains options specific to processing inbound X12 EDI files.

    " + }, "X12InterchangeControlHeaders":{ "type":"structure", "members":{ @@ -2997,7 +3169,12 @@ "validateEdi":{ "shape":"X12ValidateEdi", "documentation":"

    Specifies whether or not to validate the EDI for this X12 object: TRUE or FALSE.

    " - } + }, + "controlNumbers":{ + "shape":"X12ControlNumbers", + "documentation":"

    Specifies control number configuration for outbound X12 EDI headers. These settings determine the starting values for interchange, functional group, and transaction set control numbers.

    " + }, + "gs05TimeFormat":{"shape":"X12GS05TimeFormat"} }, "documentation":"

    A structure containing the details for an outbound EDI object.

    " }, @@ -3030,6 +3207,31 @@ "min":15, "pattern":"[a-zA-Z0-9 ]*" }, + "X12SplitBy":{ + "type":"string", + "enum":[ + "NONE", + "TRANSACTION" + ] + }, + "X12SplitOptions":{ + "type":"structure", + "required":["splitBy"], + "members":{ + "splitBy":{ + "shape":"X12SplitBy", + "documentation":"

    Specifies the method used to split X12 EDI files. Valid values include TRANSACTION (split by individual transaction sets), or NONE (no splitting).

    " + } + }, + "documentation":"

    Contains options for splitting X12 EDI files into smaller units. This is useful for processing large EDI files more efficiently.

    " + }, + "X12TechnicalAcknowledgment":{ + "type":"string", + "enum":[ + "DO_NOT_GENERATE", + "GENERATE_ALL_SEGMENTS" + ] + }, "X12TransactionSet":{ "type":"string", "enum":[ diff --git a/services/b2bi/src/main/resources/codegen-resources/waiters-2.json b/services/b2bi/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..35d86981d1c0 --- /dev/null +++ b/services/b2bi/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,21 @@ +{ + "version" : 2, + "waiters" : { + "TransformerJobSucceeded" : { + "delay" : 10, + "maxAttempts" : 12, + "operation" : "GetTransformerJob", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "succeeded" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "failed" + } ] + } + } +} \ No newline at end of file diff --git a/services/backup/pom.xml b/services/backup/pom.xml index 44212d3a58e3..27a34e03bcff 100644 --- a/services/backup/pom.xml +++ b/services/backup/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT backup AWS Java SDK :: Services :: Backup diff --git a/services/backup/src/main/resources/codegen-resources/customization.config b/services/backup/src/main/resources/codegen-resources/customization.config index caaf3cba6a0a..d259e73cd1ec 100644 --- a/services/backup/src/main/resources/codegen-resources/customization.config +++ b/services/backup/src/main/resources/codegen-resources/customization.config @@ -10,6 +10,5 @@ "listProtectedResources", "listRestoreJobs" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/backup/src/main/resources/codegen-resources/paginators-1.json b/services/backup/src/main/resources/codegen-resources/paginators-1.json index ecbba647df61..ed9c2e48a7f0 100644 --- a/services/backup/src/main/resources/codegen-resources/paginators-1.json +++ b/services/backup/src/main/resources/codegen-resources/paginators-1.json @@ -109,6 +109,12 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListRestoreAccessBackupVaults": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "RestoreAccessBackupVaults" + }, "ListRestoreJobSummaries": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/backup/src/main/resources/codegen-resources/service-2.json b/services/backup/src/main/resources/codegen-resources/service-2.json index 4727e6f741e5..ec29dc91cfb4 100644 --- a/services/backup/src/main/resources/codegen-resources/service-2.json +++ b/services/backup/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,23 @@ "auth":["aws.auth#sigv4"] }, "operations":{ + "AssociateBackupVaultMpaApprovalTeam":{ + "name":"AssociateBackupVaultMpaApprovalTeam", + "http":{ + "method":"PUT", + "requestUri":"/backup-vaults/{backupVaultName}/mpaApprovalTeam", + "responseCode":204 + }, + "input":{"shape":"AssociateBackupVaultMpaApprovalTeamInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

    Associates an MPA approval team with a backup vault.

    " + }, "CancelLegalHold":{ "name":"CancelLegalHold", "http":{ @@ -158,6 +175,26 @@ "documentation":"

    Creates a report plan. A report plan is a document that contains information about the contents of the report and where Backup will deliver it.

    If you call CreateReportPlan with a plan that already exists, you receive an AlreadyExistsException exception.

    ", "idempotent":true }, + "CreateRestoreAccessBackupVault":{ + "name":"CreateRestoreAccessBackupVault", + "http":{ + "method":"PUT", + "requestUri":"/restore-access-backup-vaults" + }, + "input":{"shape":"CreateRestoreAccessBackupVaultInput"}, + "output":{"shape":"CreateRestoreAccessBackupVaultOutput"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"LimitExceededException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

    Creates a restore access backup vault that provides temporary access to recovery points in a logically air-gapped backup vault, subject to MPA approval.

    ", + "idempotent":true + }, "CreateRestoreTestingPlan":{ "name":"CreateRestoreTestingPlan", "http":{ @@ -244,7 +281,8 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    Deletes the backup vault identified by its name. A vault can be deleted only if it is empty.

    " + "documentation":"

    Deletes the backup vault identified by its name. A vault can be deleted only if it is empty.

    ", + "idempotent":true }, "DeleteBackupVaultAccessPolicy":{ "name":"DeleteBackupVaultAccessPolicy", @@ -554,6 +592,23 @@ "documentation":"

    Returns metadata associated with a restore job that is specified by a job ID.

    ", "idempotent":true }, + "DisassociateBackupVaultMpaApprovalTeam":{ + "name":"DisassociateBackupVaultMpaApprovalTeam", + "http":{ + "method":"POST", + "requestUri":"/backup-vaults/{backupVaultName}/mpaApprovalTeam?delete", + "responseCode":204 + }, + "input":{"shape":"DisassociateBackupVaultMpaApprovalTeamInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

    Removes the association between an MPA approval team and a backup vault, disabling the MPA approval workflow for restore operations.

    " + }, "DisassociateRecoveryPoint":{ "name":"DisassociateRecoveryPoint", "http":{ @@ -1126,6 +1181,22 @@ ], "documentation":"

    Returns a list of your report plans. For detailed information about a single report plan, use DescribeReportPlan.

    " }, + "ListRestoreAccessBackupVaults":{ + "name":"ListRestoreAccessBackupVaults", + "http":{ + "method":"GET", + "requestUri":"/logically-air-gapped-backup-vaults/{backupVaultName}/restore-access-backup-vaults/" + }, + "input":{"shape":"ListRestoreAccessBackupVaultsInput"}, + "output":{"shape":"ListRestoreAccessBackupVaultsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

    Returns a list of restore access backup vaults associated with a specified backup vault.

    " + }, "ListRestoreJobSummaries":{ "name":"ListRestoreJobSummaries", "http":{ @@ -1218,7 +1289,7 @@ {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

    Returns the tags assigned to the resource, such as a target recovery point, backup plan, or backup vault.

    ", + "documentation":"

    Returns the tags assigned to the resource, such as a target recovery point, backup plan, or backup vault.

    This operation returns results depending on the resource type used in the value for resourceArn. For example, recovery points of Amazon DynamoDB with Advanced Settings have an ARN (Amazon Resource Name) that begins with arn:aws:backup. Recovery points (backups) of DynamoDB without Advanced Settings enabled have an ARN that begins with arn:aws:dynamodb.

    When this operation is called and when you include values of resourceArn that have an ARN other than arn:aws:backup, it may return one of the exceptions listed below. To prevent this exception, include only values representing resource types that are fully managed by Backup. These have an ARN that begins arn:aws:backup and they are noted in the Feature availability by resource table.

    ", "idempotent":true }, "PutBackupVaultAccessPolicy":{ @@ -1288,6 +1359,22 @@ "documentation":"

    This request allows you to send your independent self-run restore test validation results. RestoreJobId and ValidationStatus are required. Optionally, you can input a ValidationStatusMessage.

    ", "idempotent":true }, + "RevokeRestoreAccessBackupVault":{ + "name":"RevokeRestoreAccessBackupVault", + "http":{ + "method":"DELETE", + "requestUri":"/logically-air-gapped-backup-vaults/{backupVaultName}/restore-access-backup-vaults/{restoreAccessBackupVaultArn}" + }, + "input":{"shape":"RevokeRestoreAccessBackupVaultInput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"MissingParameterValueException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

    Revokes access to a restore access backup vault, removing the ability to restore from its recovery points and permanently deleting the vault.

    " + }, "StartBackupJob":{ "name":"StartBackupJob", "http":{ @@ -1375,7 +1462,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

    Attempts to cancel a job to create a one-time backup of a resource.

    This action is not supported for the following services: Amazon FSx for Windows File Server, Amazon FSx for Lustre, Amazon FSx for NetApp ONTAP, Amazon FSx for OpenZFS, Amazon DocumentDB (with MongoDB compatibility), Amazon RDS, Amazon Aurora, and Amazon Neptune.

    " + "documentation":"

    Attempts to cancel a job to create a one-time backup of a resource.

    This action is not supported for the following services:

    • Amazon Aurora

    • Amazon DocumentDB (with MongoDB compatibility)

    • Amazon FSx for Lustre

    • Amazon FSx for NetApp ONTAP

    • Amazon FSx for OpenZFS

    • Amazon FSx for Windows File Server

    • Amazon Neptune

    • SAP HANA databases on Amazon EC2 instances

    • Amazon RDS

    " }, "TagResource":{ "name":"TagResource", @@ -1391,7 +1478,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Assigns a set of key-value pairs to a recovery point, backup plan, or backup vault identified by an Amazon Resource Name (ARN).

    This API is supported for recovery points for resource types including Aurora, Amazon DocumentDB. Amazon EBS, Amazon FSx, Neptune, and Amazon RDS.

    ", + "documentation":"

    Assigns a set of key-value pairs to a resource.

    ", "idempotent":true }, "UntagResource":{ @@ -1626,6 +1713,29 @@ "documentation":"

    The required resource already exists.

    ", "exception":true }, + "AssociateBackupVaultMpaApprovalTeamInput":{ + "type":"structure", + "required":[ + "BackupVaultName", + "MpaApprovalTeamArn" + ], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

    The name of the backup vault to associate with the MPA approval team.

    ", + "location":"uri", + "locationName":"backupVaultName" + }, + "MpaApprovalTeamArn":{ + "shape":"ARN", + "documentation":"

    The Amazon Resource Name (ARN) of the MPA approval team to associate with the backup vault.

    " + }, + "RequesterComment":{ + "shape":"RequesterComment", + "documentation":"

    A comment provided by the requester explaining the association request.

    " + } + } + }, "BackupJob":{ "type":"structure", "members":{ @@ -1675,7 +1785,7 @@ }, "BackupSizeInBytes":{ "shape":"Long", - "documentation":"

    The size, in bytes, of a backup.

    " + "documentation":"

    The size, in bytes, of a backup (recovery point).

    This value can render differently depending on the resource type as Backup pulls in data information from other Amazon Web Services services. For example, the value returned may show a value of 0, which may differ from the anticipated value.

    The expected behavior for values by resource type are described as follows:

    • Amazon Aurora, Amazon DocumentDB, and Amazon Neptune do not have this value populate from the operation GetBackupJobStatus.

    • For Amazon DynamoDB with advanced features, this value refers to the size of the recovery point (backup).

    • Amazon EC2 and Amazon EBS show volume size (provisioned storage) returned as part of this value. Amazon EBS does not return backup size information; snapshot size will have the same value as the original resource that was backed up.

    • For Amazon EFS, this value refers to the delta bytes transferred during a backup.

    • Amazon FSx does not populate this value from the operation GetBackupJobStatus for FSx file systems.

    • An Amazon RDS instance will show as 0.

    • For virtual machines running VMware, this value is passed to Backup through an asynchronous workflow, which can mean this displayed value can under-represent the actual backup size.

    " }, "IamRoleArn":{ "shape":"IAMRoleArn", @@ -1956,7 +2066,7 @@ }, "ScheduleExpression":{ "shape":"CronExpression", - "documentation":"

    A cron expression in UTC specifying when Backup initiates a backup job. For more information about Amazon Web Services cron expressions, see Schedule Expressions for Rules in the Amazon CloudWatch Events User Guide.. Two examples of Amazon Web Services cron expressions are 15 * ? * * * (take a backup every hour at 15 minutes past the hour) and 0 12 * * ? * (take a backup every day at 12 noon UTC). For a table of examples, click the preceding link and scroll down the page.

    " + "documentation":"

    A cron expression in UTC specifying when Backup initiates a backup job. When no CRON expression is provided, Backup will use the default expression cron(0 5 ? * * *).

    For more information about Amazon Web Services cron expressions, see Schedule Expressions for Rules in the Amazon CloudWatch Events User Guide.

    Two examples of Amazon Web Services cron expressions are 15 * ? * * * (take a backup every hour at 15 minutes past the hour) and 0 12 * * ? * (take a backup every day at 12 noon UTC).

    For a table of examples, click the preceding link and scroll down the page.

    " }, "StartWindowMinutes":{ "shape":"WindowMinutes", @@ -2014,7 +2124,7 @@ }, "ScheduleExpression":{ "shape":"CronExpression", - "documentation":"

    A CRON expression in UTC specifying when Backup initiates a backup job.

    " + "documentation":"

    A CRON expression in UTC specifying when Backup initiates a backup job. When no CRON expression is provided, Backup will use the default expression cron(0 5 ? * * *).

    " }, "StartWindowMinutes":{ "shape":"WindowMinutes", @@ -2154,7 +2264,11 @@ "BACKUP_PLAN_CREATED", "BACKUP_PLAN_MODIFIED", "S3_BACKUP_OBJECT_FAILED", - "S3_RESTORE_OBJECT_FAILED" + "S3_RESTORE_OBJECT_FAILED", + "CONTINUOUS_BACKUP_INTERRUPTED", + "RECOVERY_POINT_INDEX_COMPLETED", + "RECOVERY_POINT_INDEX_DELETED", + "RECOVERY_POINT_INDEXING_FAILED" ] }, "BackupVaultEvents":{ @@ -2267,8 +2381,7 @@ }, "CancelLegalHoldOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "ComplianceResourceIdList":{ "type":"list", @@ -2597,7 +2710,8 @@ }, "CreatorRequestId":{ "shape":"string", - "documentation":"

    Identifies the request and allows failed requests to be retried without the risk of running the operation twice. If the request includes a CreatorRequestId that matches an existing backup plan, that plan is returned. This parameter is optional.

    If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters.

    " + "documentation":"

    Identifies the request and allows failed requests to be retried without the risk of running the operation twice. If the request includes a CreatorRequestId that matches an existing backup plan, that plan is returned. This parameter is optional.

    If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters.

    ", + "idempotencyToken":true } } }, @@ -2645,7 +2759,8 @@ }, "CreatorRequestId":{ "shape":"string", - "documentation":"

    A unique string that identifies the request and allows failed requests to be retried without the risk of running the operation twice. This parameter is optional.

    If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters.

    " + "documentation":"

    A unique string that identifies the request and allows failed requests to be retried without the risk of running the operation twice. This parameter is optional.

    If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters.

    ", + "idempotencyToken":true } } }, @@ -2686,7 +2801,8 @@ }, "CreatorRequestId":{ "shape":"string", - "documentation":"

    A unique string that identifies the request and allows failed requests to be retried without the risk of running the operation twice. This parameter is optional.

    If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters.

    " + "documentation":"

    A unique string that identifies the request and allows failed requests to be retried without the risk of running the operation twice. This parameter is optional.

    If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters.

    ", + "idempotencyToken":true } } }, @@ -2767,7 +2883,8 @@ }, "IdempotencyToken":{ "shape":"string", - "documentation":"

    This is a user-chosen string used to distinguish between otherwise identical calls. Retrying a successful request with the same idempotency token results in a success message with no action taken.

    " + "documentation":"

    This is a user-chosen string used to distinguish between otherwise identical calls. Retrying a successful request with the same idempotency token results in a success message with no action taken.

    ", + "idempotencyToken":true }, "RecoveryPointSelection":{ "shape":"RecoveryPointSelection", @@ -2832,7 +2949,8 @@ }, "CreatorRequestId":{ "shape":"string", - "documentation":"

    The ID of the creation request.

    This parameter is optional. If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters.

    " + "documentation":"

    The ID of the creation request.

    This parameter is optional. If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters.

    ", + "idempotencyToken":true }, "MinRetentionDays":{ "shape":"Long", @@ -2917,6 +3035,54 @@ } } }, + "CreateRestoreAccessBackupVaultInput":{ + "type":"structure", + "required":["SourceBackupVaultArn"], + "members":{ + "SourceBackupVaultArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the source backup vault containing the recovery points to which temporary access is requested.

    " + }, + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

    The name of the backup vault to associate with an MPA approval team.

    " + }, + "BackupVaultTags":{ + "shape":"Tags", + "documentation":"

    Optional tags to assign to the restore access backup vault.

    " + }, + "CreatorRequestId":{ + "shape":"string", + "documentation":"

    A unique string that identifies the request and allows failed requests to be retried without the risk of executing the operation twice.

    ", + "idempotencyToken":true + }, + "RequesterComment":{ + "shape":"RequesterComment", + "documentation":"

    A comment explaining the reason for requesting restore access to the backup vault.

    " + } + } + }, + "CreateRestoreAccessBackupVaultOutput":{ + "type":"structure", + "members":{ + "RestoreAccessBackupVaultArn":{ + "shape":"ARN", + "documentation":"

    The ARN that uniquely identifies the created restore access backup vault.

    " + }, + "VaultState":{ + "shape":"VaultState", + "documentation":"

    The current state of the restore access backup vault.

    " + }, + "RestoreAccessBackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

    The name of the created restore access backup vault.

    " + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

    >The date and time when the restore access backup vault was created, in Unix format and Coordinated Universal Time

    " + } + } + }, "CreateRestoreTestingPlanInput":{ "type":"structure", "required":["RestoreTestingPlan"], @@ -3285,7 +3451,7 @@ }, "BackupSizeInBytes":{ "shape":"Long", - "documentation":"

    The size, in bytes, of a backup.

    " + "documentation":"

    The size, in bytes, of a backup (recovery point).

    This value can render differently depending on the resource type as Backup pulls in data information from other Amazon Web Services services. For example, the value returned may show a value of 0, which may differ from the anticipated value.

    The expected behavior for values by resource type are described as follows:

    • Amazon Aurora, Amazon DocumentDB, and Amazon Neptune do not have this value populate from the operation GetBackupJobStatus.

    • For Amazon DynamoDB with advanced features, this value refers to the size of the recovery point (backup).

    • Amazon EC2 and Amazon EBS show volume size (provisioned storage) returned as part of this value. Amazon EBS does not return backup size information; snapshot size will have the same value as the original resource that was backed up.

    • For Amazon EFS, this value refers to the delta bytes transferred during a backup.

    • Amazon FSx does not populate this value from the operation GetBackupJobStatus for FSx file systems.

    • An Amazon RDS instance will show as 0.

    • For virtual machines running VMware, this value is passed to Backup through an asynchronous workflow, which can mean this displayed value can under-represent the actual backup size.

    " }, "IamRoleArn":{ "shape":"IAMRoleArn", @@ -3400,7 +3566,7 @@ }, "NumberOfRecoveryPoints":{ "shape":"long", - "documentation":"

    The number of recovery points that are stored in a backup vault.

    " + "documentation":"

    The number of recovery points that are stored in a backup vault.

    Recovery point count value displayed in the console can be an approximation. Use ListRecoveryPointsByBackupVault API to obtain the exact count.

    " }, "Locked":{ "shape":"Boolean", @@ -3417,6 +3583,22 @@ "LockDate":{ "shape":"timestamp", "documentation":"

    The date and time when Backup Vault Lock configuration cannot be changed or deleted.

    If you applied Vault Lock to your vault without specifying a lock date, you can change any of your Vault Lock settings, or delete Vault Lock from the vault entirely, at any time.

    This value is in Unix format, Coordinated Universal Time (UTC), and accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

    " + }, + "SourceBackupVaultArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the source backup vault from which this restore access backup vault was created.

    " + }, + "MpaApprovalTeamArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the MPA approval team associated with this backup vault.

    " + }, + "MpaSessionArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the MPA session associated with this backup vault.

    " + }, + "LatestMpaApprovalTeamUpdate":{ + "shape":"LatestMpaApprovalTeamUpdate", + "documentation":"

    Information about the latest update to the MPA approval team association for this backup vault.

    " } } }, @@ -3492,8 +3674,7 @@ }, "DescribeGlobalSettingsInput":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeGlobalSettingsOutput":{ "type":"structure", @@ -3625,7 +3806,7 @@ }, "Status":{ "shape":"RecoveryPointStatus", - "documentation":"

    A status code specifying the state of the recovery point.

    PARTIAL status indicates Backup could not create the recovery point before the backup window closed. To increase your backup plan window using the API, see UpdateBackupPlan. You can also increase your backup plan window using the Console by choosing and editing your backup plan.

    EXPIRED status indicates that the recovery point has exceeded its retention period, but Backup lacks permission or is otherwise unable to delete it. To manually delete these recovery points, see Step 3: Delete the recovery points in the Clean up resources section of Getting started.

    STOPPED status occurs on a continuous backup where a user has taken some action that causes the continuous backup to be disabled. This can be caused by the removal of permissions, turning off versioning, turning off events being sent to EventBridge, or disabling the EventBridge rules that are put in place by Backup. For recovery points of Amazon S3, Amazon RDS, and Amazon Aurora resources, this status occurs when the retention period of a continuous backup rule is changed.

    To resolve STOPPED status, ensure that all requested permissions are in place and that versioning is enabled on the S3 bucket. Once these conditions are met, the next instance of a backup rule running will result in a new continuous recovery point being created. The recovery points with STOPPED status do not need to be deleted.

    For SAP HANA on Amazon EC2 STOPPED status occurs due to user action, application misconfiguration, or backup failure. To ensure that future continuous backups succeed, refer to the recovery point status and check SAP HANA for details.

    " + "documentation":"

    A status code specifying the state of the recovery point. For more information, see Recovery point status in the Backup Developer Guide.

    • CREATING status indicates that an Backup job has been initiated for a resource. The backup process has started and is actively processing a backup job for the associated recovery point.

    • AVAILABLE status indicates that the backup was successfully created for the recovery point. The backup process has completed without any issues, and the recovery point is now ready for use.

    • PARTIAL status indicates a composite recovery point has one or more nested recovery points that were not in the backup.

    • EXPIRED status indicates that the recovery point has exceeded its retention period, but Backup lacks permission or is otherwise unable to delete it. To manually delete these recovery points, see Step 3: Delete the recovery points in the Clean up resources section of Getting started.

    • STOPPED status occurs on a continuous backup where a user has taken some action that causes the continuous backup to be disabled. This can be caused by the removal of permissions, turning off versioning, turning off events being sent to EventBridge, or disabling the EventBridge rules that are put in place by Backup. For recovery points of Amazon S3, Amazon RDS, and Amazon Aurora resources, this status occurs when the retention period of a continuous backup rule is changed.

      To resolve STOPPED status, ensure that all requested permissions are in place and that versioning is enabled on the S3 bucket. Once these conditions are met, the next instance of a backup rule running will result in a new continuous recovery point being created. The recovery points with STOPPED status do not need to be deleted.

      For SAP HANA on Amazon EC2 STOPPED status occurs due to user action, application misconfiguration, or backup failure. To ensure that future continuous backups succeed, refer to the recovery point status and check SAP HANA for details.

    " }, "StatusMessage":{ "shape":"string", @@ -3635,6 +3816,10 @@ "shape":"timestamp", "documentation":"

    The date and time that a recovery point is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

    " }, + "InitiationDate":{ + "shape":"timestamp", + "documentation":"

    The date and time when the backup job that created this recovery point was initiated, in Unix format and Coordinated Universal Time (UTC).

    " + }, "CompletionDate":{ "shape":"timestamp", "documentation":"

    The date and time that a job to create a recovery point is completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

    " @@ -3699,8 +3884,7 @@ }, "DescribeRegionSettingsInput":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeRegionSettingsOutput":{ "type":"structure", @@ -3850,6 +4034,22 @@ } } }, + "DisassociateBackupVaultMpaApprovalTeamInput":{ + "type":"structure", + "required":["BackupVaultName"], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

    The name of the backup vault from which to disassociate the MPA approval team.

    ", + "location":"uri", + "locationName":"backupVaultName" + }, + "RequesterComment":{ + "shape":"RequesterComment", + "documentation":"

    An optional comment explaining the reason for disassociating the MPA approval team from the backup vault.

    " + } + } + }, "DisassociateRecoveryPointFromParentInput":{ "type":"structure", "required":[ @@ -4643,6 +4843,58 @@ "type":"list", "member":{"shape":"KeyValue"} }, + "LatestMpaApprovalTeamUpdate":{ + "type":"structure", + "members":{ + "MpaSessionArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the MPA session associated with this update.

    " + }, + "Status":{ + "shape":"MpaSessionStatus", + "documentation":"

    The current status of the MPA approval team update.

    " + }, + "StatusMessage":{ + "shape":"string", + "documentation":"

    A message describing the current status of the MPA approval team update.

    " + }, + "InitiationDate":{ + "shape":"timestamp", + "documentation":"

    The date and time when the MPA approval team update was initiated.

    " + }, + "ExpiryDate":{ + "shape":"timestamp", + "documentation":"

    The date and time when the MPA approval team update will expire.

    " + } + }, + "documentation":"

    Contains information about the latest update to an MPA approval team association.

    " + }, + "LatestRevokeRequest":{ + "type":"structure", + "members":{ + "MpaSessionArn":{ + "shape":"string", + "documentation":"

    The ARN of the MPA session associated with this revoke request.

    " + }, + "Status":{ + "shape":"MpaRevokeSessionStatus", + "documentation":"

    The current status of the revoke request.

    " + }, + "StatusMessage":{ + "shape":"string", + "documentation":"

    A message describing the current status of the revoke request.

    " + }, + "InitiationDate":{ + "shape":"timestamp", + "documentation":"

    The date and time when the revoke request was initiated.

    " + }, + "ExpiryDate":{ + "shape":"timestamp", + "documentation":"

    The date and time when the revoke request will expire.

    " + } + }, + "documentation":"

    Contains information about the latest request to revoke access to a backup vault.

    " + }, "LegalHold":{ "type":"structure", "members":{ @@ -5667,6 +5919,43 @@ } } }, + "ListRestoreAccessBackupVaultsInput":{ + "type":"structure", + "required":["BackupVaultName"], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

    The name of the backup vault for which to list associated restore access backup vaults.

    ", + "location":"uri", + "locationName":"backupVaultName" + }, + "NextToken":{ + "shape":"string", + "documentation":"

    The pagination token from a previous request to retrieve the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of items to return in the response.

    ", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListRestoreAccessBackupVaultsOutput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

    The pagination token to use in a subsequent request to retrieve the next set of results.

    " + }, + "RestoreAccessBackupVaults":{ + "shape":"RestoreAccessBackupVaultList", + "documentation":"

    A list of restore access backup vaults associated with the specified backup vault.

    " + } + } + }, "ListRestoreJobSummariesInput":{ "type":"structure", "members":{ @@ -6013,6 +6302,21 @@ "documentation":"

    Indicates that a required parameter is missing.

    ", "exception":true }, + "MpaRevokeSessionStatus":{ + "type":"string", + "enum":[ + "PENDING", + "FAILED" + ] + }, + "MpaSessionStatus":{ + "type":"string", + "enum":[ + "PENDING", + "APPROVED", + "FAILED" + ] + }, "ParameterName":{"type":"string"}, "ParameterValue":{"type":"string"}, "ProtectedResource":{ @@ -6123,7 +6427,7 @@ }, "BackupVaultEvents":{ "shape":"BackupVaultEvents", - "documentation":"

    An array of events that indicate the status of jobs to back up resources to the backup vault.

    For common use cases and code samples, see Using Amazon SNS to track Backup events.

    The following events are supported:

    • BACKUP_JOB_STARTED | BACKUP_JOB_COMPLETED

    • COPY_JOB_STARTED | COPY_JOB_SUCCESSFUL | COPY_JOB_FAILED

    • RESTORE_JOB_STARTED | RESTORE_JOB_COMPLETED | RECOVERY_POINT_MODIFIED

    • S3_BACKUP_OBJECT_FAILED | S3_RESTORE_OBJECT_FAILED

    The list below includes both supported events and deprecated events that are no longer in use (for reference). Deprecated events do not return statuses or notifications. Refer to the list above for the supported events.

    " + "documentation":"

    An array of events that indicate the status of jobs to back up resources to the backup vault. For the list of supported events, common use cases, and code samples, see Notification options with Backup.

    " } } }, @@ -6197,6 +6501,10 @@ "shape":"timestamp", "documentation":"

    The date and time a recovery point is created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

    " }, + "InitiationDate":{ + "shape":"timestamp", + "documentation":"

    The date and time when the backup job that created this recovery point was initiated, in Unix format and Coordinated Universal Time (UTC).

    " + }, "CompletionDate":{ "shape":"timestamp", "documentation":"

    The date and time a job to restore a recovery point is completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

    " @@ -6387,7 +6695,10 @@ "COMPLETED", "PARTIAL", "DELETING", - "EXPIRED" + "EXPIRED", + "AVAILABLE", + "STOPPED", + "CREATING" ] }, "RecoveryPointsList":{ @@ -6560,6 +6871,10 @@ }, "documentation":"

    Contains detailed information about a report setting.

    " }, + "RequesterComment":{ + "type":"string", + "sensitive":true + }, "ResourceArns":{ "type":"list", "member":{"shape":"ARN"} @@ -6607,6 +6922,36 @@ "type":"list", "member":{"shape":"ResourceType"} }, + "RestoreAccessBackupVaultList":{ + "type":"list", + "member":{"shape":"RestoreAccessBackupVaultListMember"} + }, + "RestoreAccessBackupVaultListMember":{ + "type":"structure", + "members":{ + "RestoreAccessBackupVaultArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the restore access backup vault.

    " + }, + "CreationDate":{ + "shape":"timestamp", + "documentation":"

    The date and time when the restore access backup vault was created.

    " + }, + "ApprovalDate":{ + "shape":"timestamp", + "documentation":"

    The date and time when the restore access backup vault was approved.

    " + }, + "VaultState":{ + "shape":"VaultState", + "documentation":"

    The current state of the restore access backup vault.

    " + }, + "LatestRevokeRequest":{ + "shape":"LatestRevokeRequest", + "documentation":"

    Information about the latest request to revoke access to this backup vault.

    " + } + }, + "documentation":"

    Contains information about a restore access backup vault.

    " + }, "RestoreDeletionStatus":{ "type":"string", "enum":[ @@ -6791,7 +7136,7 @@ }, "ScheduleExpression":{ "shape":"String", - "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed.

    " + "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed. When no CRON expression is provided, Backup will use the default expression cron(0 5 ? * * *).

    " }, "ScheduleExpressionTimezone":{ "shape":"String", @@ -6844,7 +7189,7 @@ }, "ScheduleExpression":{ "shape":"String", - "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed.

    " + "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed. When no CRON expression is provided, Backup will use the default expression cron(0 5 ? * * *).

    " }, "ScheduleExpressionTimezone":{ "shape":"String", @@ -6888,7 +7233,7 @@ }, "ScheduleExpression":{ "shape":"String", - "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed.

    " + "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed. When no CRON expression is provided, Backup will use the default expression cron(0 5 ? * * *).

    " }, "ScheduleExpressionTimezone":{ "shape":"String", @@ -6910,7 +7255,7 @@ }, "ScheduleExpression":{ "shape":"String", - "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed.

    " + "documentation":"

    A CRON expression in specified timezone when a restore testing plan is executed. When no CRON expression is provided, Backup will use the default expression cron(0 5 ? * * *).

    " }, "ScheduleExpressionTimezone":{ "shape":"String", @@ -7005,7 +7350,7 @@ }, "ValidationWindowHours":{ "shape":"integer", - "documentation":"

    This is amount of hours (1 to 168) available to run a validation script on the data. The data will be deleted upon the completion of the validation script or the end of the specified retention period, whichever comes first.

    " + "documentation":"

    This is amount of hours (0 to 168) available to run a validation script on the data. The data will be deleted upon the completion of the validation script or the end of the specified retention period, whichever comes first.

    " } }, "documentation":"

    This contains metadata about a specific restore testing selection.

    ProtectedResourceType is required, such as Amazon EBS or Amazon EC2.

    This consists of RestoreTestingSelectionName, ProtectedResourceType, and one of the following:

    • ProtectedResourceArns

    • ProtectedResourceConditions

    Each protected resource type can have one single value.

    A restore testing selection can include a wildcard value (\"*\") for ProtectedResourceArns along with ProtectedResourceConditions. Alternatively, you can include up to 30 specific protected resource ARNs in ProtectedResourceArns.

    ProtectedResourceConditions examples include as StringEquals and StringNotEquals.

    " @@ -7139,6 +7484,33 @@ "VALIDATING" ] }, + "RevokeRestoreAccessBackupVaultInput":{ + "type":"structure", + "required":[ + "BackupVaultName", + "RestoreAccessBackupVaultArn" + ], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

    The name of the source backup vault associated with the restore access backup vault to be revoked.

    ", + "location":"uri", + "locationName":"backupVaultName" + }, + "RestoreAccessBackupVaultArn":{ + "shape":"ARN", + "documentation":"

    The ARN of the restore access backup vault to revoke.

    ", + "location":"uri", + "locationName":"restoreAccessBackupVaultArn" + }, + "RequesterComment":{ + "shape":"RequesterComment", + "documentation":"

    A comment explaining the reason for revoking access to the restore access backup vault.

    ", + "location":"querystring", + "locationName":"requesterComment" + } + } + }, "SensitiveStringMap":{ "type":"map", "key":{"shape":"String"}, @@ -7185,7 +7557,8 @@ }, "IdempotencyToken":{ "shape":"string", - "documentation":"

    A customer-chosen string that you can use to distinguish between otherwise identical calls to StartBackupJob. Retrying a successful request with the same idempotency token results in a success message with no action taken.

    " + "documentation":"

    A customer-chosen string that you can use to distinguish between otherwise identical calls to StartBackupJob. Retrying a successful request with the same idempotency token results in a success message with no action taken.

    ", + "idempotencyToken":true }, "StartWindowMinutes":{ "shape":"WindowMinutes", @@ -7261,7 +7634,8 @@ }, "IdempotencyToken":{ "shape":"string", - "documentation":"

    A customer-chosen string that you can use to distinguish between otherwise identical calls to StartCopyJob. Retrying a successful request with the same idempotency token results in a success message with no action taken.

    " + "documentation":"

    A customer-chosen string that you can use to distinguish between otherwise identical calls to StartCopyJob. Retrying a successful request with the same idempotency token results in a success message with no action taken.

    ", + "idempotencyToken":true }, "Lifecycle":{"shape":"Lifecycle"} } @@ -7330,7 +7704,8 @@ }, "IdempotencyToken":{ "shape":"string", - "documentation":"

    A customer-chosen string that you can use to distinguish between otherwise identical calls to StartRestoreJob. Retrying a successful request with the same idempotency token results in a success message with no action taken.

    " + "documentation":"

    A customer-chosen string that you can use to distinguish between otherwise identical calls to StartRestoreJob. Retrying a successful request with the same idempotency token results in a success message with no action taken.

    ", + "idempotencyToken":true }, "ResourceType":{ "shape":"ResourceType", @@ -7387,7 +7762,7 @@ "members":{ "ResourceArn":{ "shape":"ARN", - "documentation":"

    An ARN that uniquely identifies a resource. The format of the ARN depends on the type of the tagged resource.

    ARNs that do not include backup are incompatible with tagging. TagResource and UntagResource with invalid ARNs will result in an error. Acceptable ARN content can include arn:aws:backup:us-east. Invalid ARN content may look like arn:aws:ec2:us-east.

    ", + "documentation":"

    The ARN that uniquely identifies the resource.

    ", "location":"uri", "locationName":"resourceArn" }, @@ -7796,7 +8171,8 @@ "type":"string", "enum":[ "BACKUP_VAULT", - "LOGICALLY_AIR_GAPPED_BACKUP_VAULT" + "LOGICALLY_AIR_GAPPED_BACKUP_VAULT", + "RESTORE_ACCESS_BACKUP_VAULT" ] }, "WindowMinutes":{"type":"long"}, diff --git a/services/backupgateway/pom.xml b/services/backupgateway/pom.xml index 1797e6514e45..ab2039d0c89e 100644 --- a/services/backupgateway/pom.xml +++ b/services/backupgateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT backupgateway AWS Java SDK :: Services :: Backup Gateway diff --git a/services/backupgateway/src/main/resources/codegen-resources/customization.config b/services/backupgateway/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/backupgateway/src/main/resources/codegen-resources/customization.config +++ b/services/backupgateway/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/backupsearch/pom.xml b/services/backupsearch/pom.xml index eb594cda15a0..ca942b97fbcc 100644 --- a/services/backupsearch/pom.xml +++ b/services/backupsearch/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT backupsearch AWS Java SDK :: Services :: Backup Search diff --git a/services/backupsearch/src/main/resources/codegen-resources/customization.config b/services/backupsearch/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/backupsearch/src/main/resources/codegen-resources/customization.config +++ b/services/backupsearch/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/batch/pom.xml b/services/batch/pom.xml index 3063052c4a27..f1299b4fdf91 100644 --- a/services/batch/pom.xml +++ b/services/batch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT batch AWS Java SDK :: Services :: AWS Batch diff --git a/services/batch/src/main/resources/codegen-resources/customization.config b/services/batch/src/main/resources/codegen-resources/customization.config index c3019f1e3000..2a5ae03cf740 100644 --- a/services/batch/src/main/resources/codegen-resources/customization.config +++ b/services/batch/src/main/resources/codegen-resources/customization.config @@ -7,6 +7,5 @@ "excludedSimpleMethods": [ "listJobs" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/batch/src/main/resources/codegen-resources/service-2.json b/services/batch/src/main/resources/codegen-resources/service-2.json index b2102f5b196f..1924881240fe 100644 --- a/services/batch/src/main/resources/codegen-resources/service-2.json +++ b/services/batch/src/main/resources/codegen-resources/service-2.json @@ -687,8 +687,7 @@ }, "CancelJobResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ClientException":{ "type":"structure", @@ -1504,8 +1503,7 @@ }, "DeleteComputeEnvironmentResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteConsumableResourceRequest":{ "type":"structure", @@ -1519,8 +1517,7 @@ }, "DeleteConsumableResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteJobQueueRequest":{ "type":"structure", @@ -1535,8 +1532,7 @@ }, "DeleteJobQueueResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteSchedulingPolicyRequest":{ "type":"structure", @@ -1551,8 +1547,7 @@ }, "DeleteSchedulingPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeregisterJobDefinitionRequest":{ "type":"structure", @@ -1566,8 +1561,7 @@ }, "DeregisterJobDefinitionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeComputeEnvironmentsRequest":{ "type":"structure", @@ -1857,7 +1851,7 @@ "members":{ "imageType":{ "shape":"ImageType", - "documentation":"

    The image type to match with the instance type to select an AMI. The supported values are different for ECS and EKS resources.

    ECS

    If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (ECS_AL2) is used. If a new image type is specified in an update, but neither an imageId nor a imageIdOverride parameter is specified, then the latest Amazon ECS optimized AMI for that image type that's supported by Batch is used.

    ECS_AL2

    Amazon Linux 2: Default for all non-GPU instance families.

    ECS_AL2_NVIDIA

    Amazon Linux 2 (GPU): Default for all GPU instance families (for example P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

    ECS_AL2023

    Amazon Linux 2023: Batch supports Amazon Linux 2023.

    Amazon Linux 2023 does not support A1 instances.

    ECS_AL1

    Amazon Linux. Amazon Linux has reached the end-of-life of standard support. For more information, see Amazon Linux AMI.

    EKS

    If the imageIdOverride parameter isn't specified, then a recent Amazon EKS-optimized Amazon Linux AMI (EKS_AL2) is used. If a new image type is specified in an update, but neither an imageId nor a imageIdOverride parameter is specified, then the latest Amazon EKS optimized AMI for that image type that Batch supports is used.

    EKS_AL2

    Amazon Linux 2: Default for all non-GPU instance families.

    EKS_AL2_NVIDIA

    Amazon Linux 2 (accelerated): Default for all GPU instance families (for example, P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

    " + "documentation":"

    The image type to match with the instance type to select an AMI. The supported values are different for ECS and EKS resources.

    ECS

    If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (ECS_AL2) is used. If a new image type is specified in an update, but neither an imageId nor a imageIdOverride parameter is specified, then the latest Amazon ECS optimized AMI for that image type that's supported by Batch is used.

    ECS_AL2

    Amazon Linux 2: Default for all non-GPU instance families.

    ECS_AL2_NVIDIA

    Amazon Linux 2 (GPU): Default for all GPU instance families (for example P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

    ECS_AL2023

    Amazon Linux 2023: Batch supports Amazon Linux 2023.

    Amazon Linux 2023 does not support A1 instances.

    ECS_AL1

    Amazon Linux. Amazon Linux has reached the end-of-life of standard support. For more information, see Amazon Linux AMI.

    EKS

    If the imageIdOverride parameter isn't specified, then a recent Amazon EKS-optimized Amazon Linux AMI (EKS_AL2) is used. If a new image type is specified in an update, but neither an imageId nor a imageIdOverride parameter is specified, then the latest Amazon EKS optimized AMI for that image type that Batch supports is used.

    EKS_AL2

    Amazon Linux 2: Default for all non-GPU instance families.

    EKS_AL2_NVIDIA

    Amazon Linux 2 (accelerated): Default for all GPU instance families (for example, P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

    EKS_AL2023

    Amazon Linux 2023: Batch supports Amazon Linux 2023.

    Amazon Linux 2023 does not support A1 instances.

    EKS_AL2023_NVIDIA

    Amazon Linux 2023 (accelerated): GPU instance families and can be used for all non Amazon Web Services Graviton-based instance types.

    " }, "imageIdOverride":{ "shape":"ImageIdOverride", @@ -3282,6 +3276,10 @@ "overrides":{ "shape":"LaunchTemplateSpecificationOverrideList", "documentation":"

    A launch template to use in place of the default launch template. You must specify either the launch template ID or launch template name in the request, but not both.

    You can specify up to ten (10) launch template overrides that are associated to unique instance types or families for each compute environment.

    To unset all override templates for a compute environment, you can pass an empty array to the UpdateComputeEnvironment.overrides parameter, or not include the overrides parameter when submitting the UpdateComputeEnvironment API operation.

    " + }, + "userdataType":{ + "shape":"UserdataType", + "documentation":"

    The EKS node initialization process to use. You only need to specify this value if you are using a custom AMI. The default value is EKS_BOOTSTRAP_SH. If imageType is a custom AMI based on EKS_AL2023 or EKS_AL2023_NVIDIA then you must choose EKS_NODEADM.

    " } }, "documentation":"

    An object that represents a launch template that's associated with a compute resource. You must specify either the launch template ID or launch template name in the request, but not both.

    If security groups are specified using both the securityGroupIds parameter of CreateComputeEnvironment and the launch template, the values in the securityGroupIds parameter of CreateComputeEnvironment will be used.

    This object isn't applicable to jobs that are running on Fargate resources.

    " @@ -3304,6 +3302,10 @@ "targetInstanceTypes":{ "shape":"StringList", "documentation":"

    The instance type or family that this override launch template should be applied to.

    This parameter is required when defining a launch template override.

    Information included in this parameter must meet the following requirements:

    • Must be a valid Amazon EC2 instance type or family.

    • optimal isn't allowed.

    • targetInstanceTypes can target only instance types and families that are included within the ComputeResource.instanceTypes set. targetInstanceTypes doesn't need to include all of the instances from the instanceType set, but at least a subset. For example, if ComputeResource.instanceTypes includes [m5, g5], targetInstanceTypes can include [m5.2xlarge] and [m5.large] but not [c5.large].

    • targetInstanceTypes included within the same launch template override or across launch template overrides can't overlap for the same compute environment. For example, you can't define one launch template override to target an instance family and another define an instance type within this same family.

    " + }, + "userdataType":{ + "shape":"UserdataType", + "documentation":"

    The EKS node initialization process to use. You only need to specify this value if you are using a custom AMI. The default value is EKS_BOOTSTRAP_SH. If imageType is a custom AMI based on EKS_AL2023 or EKS_AL2023_NVIDIA then you must choose EKS_NODEADM.

    " } }, "documentation":"

    An object that represents a launch template to use in place of the default launch template. You must specify either the launch template ID or launch template name in the request, but not both.

    If security groups are specified using both the securityGroupIds parameter of CreateComputeEnvironment and the launch template, the values in the securityGroupIds parameter of CreateComputeEnvironment will be used.

    You can define up to ten (10) overrides for each compute environment.

    This object isn't applicable to jobs that are running on Fargate resources.

    To unset all override templates for a compute environment, you can pass an empty array to the UpdateComputeEnvironment.overrides parameter, or not include the overrides parameter when submitting the UpdateComputeEnvironment API operation.

    " @@ -4270,8 +4272,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -4524,8 +4525,7 @@ }, "TerminateJobResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "Tmpfs":{ "type":"structure", @@ -4604,8 +4604,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateComputeEnvironmentRequest":{ "type":"structure", @@ -4748,7 +4747,7 @@ "members":{ "terminateJobsOnUpdate":{ "shape":"Boolean", - "documentation":"

    Specifies whether jobs are automatically terminated when the computer environment infrastructure is updated. The default value is false.

    " + "documentation":"

    Specifies whether jobs are automatically terminated when the compute environment infrastructure is updated. The default value is false.

    " }, "jobExecutionTimeoutMinutes":{ "shape":"JobExecutionTimeoutMinutes", @@ -4774,8 +4773,14 @@ }, "UpdateSchedulingPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} + }, + "UserdataType":{ + "type":"string", + "enum":[ + "EKS_BOOTSTRAP_SH", + "EKS_NODEADM" + ] }, "Volume":{ "type":"structure", diff --git a/services/bcmdataexports/pom.xml b/services/bcmdataexports/pom.xml index 21e09b3ef44e..5337dd069563 100644 --- a/services/bcmdataexports/pom.xml +++ b/services/bcmdataexports/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT bcmdataexports AWS Java SDK :: Services :: BCM Data Exports diff --git a/services/bcmdataexports/src/main/resources/codegen-resources/customization.config b/services/bcmdataexports/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/bcmdataexports/src/main/resources/codegen-resources/customization.config +++ b/services/bcmdataexports/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/bcmpricingcalculator/pom.xml b/services/bcmpricingcalculator/pom.xml index e936c2c91ba5..d75d6a09cba8 100644 --- a/services/bcmpricingcalculator/pom.xml +++ b/services/bcmpricingcalculator/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT bcmpricingcalculator AWS Java SDK :: Services :: BCM Pricing Calculator diff --git a/services/bcmpricingcalculator/src/main/resources/codegen-resources/customization.config b/services/bcmpricingcalculator/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/bcmpricingcalculator/src/main/resources/codegen-resources/customization.config +++ b/services/bcmpricingcalculator/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/bcmpricingcalculator/src/main/resources/codegen-resources/service-2.json b/services/bcmpricingcalculator/src/main/resources/codegen-resources/service-2.json index e78df9022ce4..79d37d66ff91 100644 --- a/services/bcmpricingcalculator/src/main/resources/codegen-resources/service-2.json +++ b/services/bcmpricingcalculator/src/main/resources/codegen-resources/service-2.json @@ -3182,7 +3182,7 @@ "documentation":"

    A token to retrieve the next page of results.

    " }, "maxResults":{ - "shape":"MaxResults", + "shape":"WorkloadEstimateUsageMaxResults", "documentation":"

    The maximum number of results to return per page.

    " } } @@ -3286,7 +3286,8 @@ "MaxResults":{ "type":"integer", "box":true, - "max":25 + "max":25, + "min":1 }, "NegateReservedInstanceAction":{ "type":"structure", @@ -3331,13 +3332,14 @@ "type":"string", "enum":[ "BEFORE_DISCOUNTS", - "AFTER_DISCOUNTS" + "AFTER_DISCOUNTS", + "AFTER_DISCOUNTS_AND_COMMITMENTS" ] }, "RateTypes":{ "type":"list", "member":{"shape":"RateType"}, - "max":2, + "max":3, "min":1 }, "ReservedInstanceInstanceCount":{ @@ -3869,7 +3871,8 @@ "type":"string", "enum":[ "BEFORE_DISCOUNTS", - "AFTER_DISCOUNTS" + "AFTER_DISCOUNTS", + "AFTER_DISCOUNTS_AND_COMMITMENTS" ] }, "WorkloadEstimateStatus":{ @@ -4004,6 +4007,12 @@ "type":"list", "member":{"shape":"WorkloadEstimateUsageItem"} }, + "WorkloadEstimateUsageMaxResults":{ + "type":"integer", + "box":true, + "max":300, + "min":1 + }, "WorkloadEstimateUsageQuantity":{ "type":"structure", "members":{ diff --git a/services/bedrock/pom.xml b/services/bedrock/pom.xml index b6fc43eba35b..e95bdc24f73e 100644 --- a/services/bedrock/pom.xml +++ b/services/bedrock/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT bedrock AWS Java SDK :: Services :: Bedrock diff --git a/services/bedrock/src/main/resources/codegen-resources/customization.config b/services/bedrock/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..42aa381cf2ea 100644 --- a/services/bedrock/src/main/resources/codegen-resources/customization.config +++ b/services/bedrock/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,4 @@ { "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableEnvironmentBearerToken": true } diff --git a/services/bedrock/src/main/resources/codegen-resources/service-2.json b/services/bedrock/src/main/resources/codegen-resources/service-2.json index b3cd04ee3db8..4282efe6e6af 100644 --- a/services/bedrock/src/main/resources/codegen-resources/service-2.json +++ b/services/bedrock/src/main/resources/codegen-resources/service-2.json @@ -2,7 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2023-04-20", - "auth":["aws.auth#sigv4"], + "auth":[ + "aws.auth#sigv4", + "smithy.api#httpBearerAuth" + ], "endpointPrefix":"bedrock", "protocol":"rest-json", "protocols":["rest-json"], @@ -32,6 +35,27 @@ ], "documentation":"

    Deletes a batch of evaluation jobs. An evaluation job can only be deleted if it has following status FAILED, COMPLETED, and STOPPED. You can request up to 25 model evaluation jobs be deleted in a single request.

    " }, + "CreateCustomModel":{ + "name":"CreateCustomModel", + "http":{ + "method":"POST", + "requestUri":"/custom-models/create-custom-model", + "responseCode":200 + }, + "input":{"shape":"CreateCustomModelRequest"}, + "output":{"shape":"CreateCustomModelResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"TooManyTagsException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Creates a new custom model in Amazon Bedrock. After the model is active, you can use it for inference.

    To use the model for inference, you must purchase Provisioned Throughput for it. You can't use On-demand inference with these custom models. For more information about Provisioned Throughput, see Provisioned Throughput.

    The model appears in ListCustomModels with a customizationType of imported. To track the status of the new model, you use the GetCustomModel API operation. The model can be in the following states:

    • Creating - Initial state during validation and registration

    • Active - Model is ready for use in inference

    • Failed - Creation process encountered an error

    Related APIs

    " + }, "CreateEvaluationJob":{ "name":"CreateEvaluationJob", "http":{ @@ -53,6 +77,25 @@ "documentation":"

    Creates an evaluation job.

    ", "idempotent":true }, + "CreateFoundationModelAgreement":{ + "name":"CreateFoundationModelAgreement", + "http":{ + "method":"POST", + "requestUri":"/create-foundation-model-agreement", + "responseCode":202 + }, + "input":{"shape":"CreateFoundationModelAgreementRequest"}, + "output":{"shape":"CreateFoundationModelAgreementResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Request a model access agreement for the specified model.

    " + }, "CreateGuardrail":{ "name":"CreateGuardrail", "http":{ @@ -282,6 +325,25 @@ "documentation":"

    Deletes a custom model that you created earlier. For more information, see Custom models in the Amazon Bedrock User Guide.

    ", "idempotent":true }, + "DeleteFoundationModelAgreement":{ + "name":"DeleteFoundationModelAgreement", + "http":{ + "method":"POST", + "requestUri":"/delete-foundation-model-agreement", + "responseCode":202 + }, + "input":{"shape":"DeleteFoundationModelAgreementRequest"}, + "output":{"shape":"DeleteFoundationModelAgreementResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Delete the model access agreement for the specified model.

    " + }, "DeleteGuardrail":{ "name":"DeleteGuardrail", "http":{ @@ -452,7 +514,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Get the properties associated with a Amazon Bedrock custom model that you have created.For more information, see Custom models in the Amazon Bedrock User Guide.

    " + "documentation":"

    Get the properties associated with a Amazon Bedrock custom model that you have created. For more information, see Custom models in the Amazon Bedrock User Guide.

    " }, "GetEvaluationJob":{ "name":"GetEvaluationJob", @@ -490,6 +552,24 @@ ], "documentation":"

    Get details about a Amazon Bedrock foundation model.

    " }, + "GetFoundationModelAvailability":{ + "name":"GetFoundationModelAvailability", + "http":{ + "method":"GET", + "requestUri":"/foundation-model-availability/{modelId}", + "responseCode":200 + }, + "input":{"shape":"GetFoundationModelAvailabilityRequest"}, + "output":{"shape":"GetFoundationModelAvailabilityResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Get information about the Foundation model availability.

    " + }, "GetGuardrail":{ "name":"GetGuardrail", "http":{ @@ -686,6 +766,23 @@ ], "documentation":"

    Returns details for a Provisioned Throughput. For more information, see Provisioned Throughput in the Amazon Bedrock User Guide.

    " }, + "GetUseCaseForModelAccess":{ + "name":"GetUseCaseForModelAccess", + "http":{ + "method":"GET", + "requestUri":"/use-case-for-model-access", + "responseCode":200 + }, + "input":{"shape":"GetUseCaseForModelAccessRequest"}, + "output":{"shape":"GetUseCaseForModelAccessResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Get usecase for model access.

    " + }, "ListCustomModels":{ "name":"ListCustomModels", "http":{ @@ -720,6 +817,24 @@ ], "documentation":"

    Lists all existing evaluation jobs.

    " }, + "ListFoundationModelAgreementOffers":{ + "name":"ListFoundationModelAgreementOffers", + "http":{ + "method":"GET", + "requestUri":"/list-foundation-model-agreement-offers/{modelId}", + "responseCode":200 + }, + "input":{"shape":"ListFoundationModelAgreementOffersRequest"}, + "output":{"shape":"ListFoundationModelAgreementOffersResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Get the offers associated with the specified model.

    " + }, "ListFoundationModels":{ "name":"ListFoundationModels", "http":{ @@ -946,6 +1061,23 @@ "documentation":"

    Set the configuration values for model invocation logging.

    ", "idempotent":true }, + "PutUseCaseForModelAccess":{ + "name":"PutUseCaseForModelAccess", + "http":{ + "method":"POST", + "requestUri":"/use-case-for-model-access", + "responseCode":201 + }, + "input":{"shape":"PutUseCaseForModelAccessRequest"}, + "output":{"shape":"PutUseCaseForModelAccessResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Put usecase for model access.

    " + }, "RegisterMarketplaceModelEndpoint":{ "name":"RegisterMarketplaceModelEndpoint", "http":{ @@ -1139,6 +1271,11 @@ "type":"string", "pattern":"[0-9]{12}" }, + "AcknowledgementFormDataBody":{ + "type":"blob", + "max":16384, + "min":10 + }, "AdditionalModelRequestFields":{ "type":"map", "key":{"shape":"AdditionalModelRequestFieldsKey"}, @@ -1155,6 +1292,30 @@ }, "document":true }, + "AgreementAvailability":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"AgreementStatus", + "documentation":"

    Status of the agreement.

    " + }, + "errorMessage":{ + "shape":"String", + "documentation":"

    Error message.

    " + } + }, + "documentation":"

    Information about the agreement availability

    " + }, + "AgreementStatus":{ + "type":"string", + "enum":[ + "AVAILABLE", + "PENDING", + "NOT_AVAILABLE", + "ERROR" + ] + }, "ApplicationType":{ "type":"string", "enum":[ @@ -1167,6 +1328,22 @@ "max":2048, "min":0 }, + "AttributeType":{ + "type":"string", + "enum":[ + "STRING", + "NUMBER", + "BOOLEAN", + "STRING_LIST" + ] + }, + "AuthorizationStatus":{ + "type":"string", + "enum":[ + "AUTHORIZED", + "NOT_AUTHORIZED" + ] + }, "AutomatedEvaluationConfig":{ "type":"structure", "required":["datasetMetricConfigs"], @@ -1333,6 +1510,12 @@ "min":0, "pattern":"[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}(/[a-z0-9]{12}|)" }, + "BedrockRerankingModelArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model/(.*))?" + }, "Boolean":{ "type":"boolean", "box":true @@ -1422,6 +1605,50 @@ "type":"string", "pattern":".*[a-z]{1,20}/.{1,20}.*" }, + "CreateCustomModelRequest":{ + "type":"structure", + "required":[ + "modelName", + "modelSourceConfig" + ], + "members":{ + "modelName":{ + "shape":"CustomModelName", + "documentation":"

    A unique name for the custom model.

    " + }, + "modelSourceConfig":{ + "shape":"ModelDataSource", + "documentation":"

    The data source for the model. The Amazon S3 URI in the model source must be for the Amazon-managed Amazon S3 bucket containing your model artifacts.

    " + }, + "modelKmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of the customer managed KMS key to encrypt the custom model. If you don't provide a KMS key, Amazon Bedrock uses an Amazon Web Services-managed KMS key to encrypt the model.

    If you provide a customer managed KMS key, your Amazon Bedrock service role must have permissions to use it. For more information see Encryption of imported models.

    " + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of an IAM service role that Amazon Bedrock assumes to perform tasks on your behalf. This role must have permissions to access the Amazon S3 bucket containing your model artifacts and the KMS key (if specified). For more information, see Setting up an IAM service role for importing models in the Amazon Bedrock User Guide.

    " + }, + "modelTags":{ + "shape":"TagList", + "documentation":"

    A list of key-value pairs to associate with the custom model resource. You can use these tags to organize and identify your resources.

    For more information, see Tagging resources in the Amazon Bedrock User Guide.

    " + }, + "clientRequestToken":{ + "shape":"IdempotencyToken", + "documentation":"

    A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

    ", + "idempotencyToken":true + } + } + }, + "CreateCustomModelResponse":{ + "type":"structure", + "required":["modelArn"], + "members":{ + "modelArn":{ + "shape":"ModelArn", + "documentation":"

    The Amazon Resource Name (ARN) of the new custom model.

    " + } + } + }, "CreateEvaluationJobRequest":{ "type":"structure", "required":[ @@ -1485,6 +1712,33 @@ } } }, + "CreateFoundationModelAgreementRequest":{ + "type":"structure", + "required":[ + "offerToken", + "modelId" + ], + "members":{ + "offerToken":{ + "shape":"OfferToken", + "documentation":"

    An offer token encapsulates the information for an offer.

    " + }, + "modelId":{ + "shape":"BedrockModelId", + "documentation":"

    Model Id of the model for the access request.

    " + } + } + }, + "CreateFoundationModelAgreementResponse":{ + "type":"structure", + "required":["modelId"], + "members":{ + "modelId":{ + "shape":"BedrockModelId", + "documentation":"

    Model Id of the model for the access request.

    " + } + } + }, "CreateGuardrailRequest":{ "type":"structure", "required":[ @@ -1521,6 +1775,10 @@ "shape":"GuardrailContextualGroundingPolicyConfig", "documentation":"

    The contextual grounding policy configuration used to create a guardrail.

    " }, + "crossRegionConfig":{ + "shape":"GuardrailCrossRegionConfig", + "documentation":"

    The system-defined guardrail profile that you're using with your guardrail. Guardrail profiles define the destination Amazon Web Services Regions where guardrail inference requests can be automatically routed.

    For more information, see the Amazon Bedrock User Guide.

    " + }, "blockedInputMessaging":{ "shape":"GuardrailBlockedMessaging", "documentation":"

    The message to return when the guardrail blocks a prompt.

    " @@ -2092,7 +2350,7 @@ "type":"string", "max":1011, "min":20, - "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}/[a-z0-9]{12}" + "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:custom-model/(imported|[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2})/[a-z0-9]{12}" }, "CustomModelName":{ "type":"string", @@ -2137,6 +2395,10 @@ "ownerAccountId":{ "shape":"AccountId", "documentation":"

    The unique identifier of the account that owns the model.

    " + }, + "modelStatus":{ + "shape":"ModelStatus", + "documentation":"

    The current status of the custom model. Possible values include:

    • Creating - The model is being created and validated.

    • Active - The model has been successfully created and is ready for use.

    • Failed - The model creation process failed.

    " } }, "documentation":"

    Summary information for a custom model.

    " @@ -2179,7 +2441,8 @@ "enum":[ "FINE_TUNING", "CONTINUED_PRE_TRAINING", - "DISTILLATION" + "DISTILLATION", + "IMPORTED" ] }, "DataProcessingDetails":{ @@ -2217,6 +2480,21 @@ "members":{ } }, + "DeleteFoundationModelAgreementRequest":{ + "type":"structure", + "required":["modelId"], + "members":{ + "modelId":{ + "shape":"BedrockModelId", + "documentation":"

    Model Id of the model access to delete.

    " + } + } + }, + "DeleteFoundationModelAgreementResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteGuardrailRequest":{ "type":"structure", "required":["guardrailIdentifier"], @@ -2352,6 +2630,28 @@ "members":{ } }, + "DimensionalPriceRate":{ + "type":"structure", + "members":{ + "dimension":{ + "shape":"String", + "documentation":"

    Dimension for the price rate.

    " + }, + "price":{ + "shape":"String", + "documentation":"

    Single-dimensional rate information.

    " + }, + "description":{ + "shape":"String", + "documentation":"

    Description of the price rate.

    " + }, + "unit":{ + "shape":"String", + "documentation":"

    Unit associated with the price.

    " + } + }, + "documentation":"

    Dimensional price rate.

    " + }, "DistillationConfig":{ "type":"structure", "required":["teacherModelConfig"], @@ -2379,6 +2679,13 @@ "max":30, "min":1 }, + "EntitlementAvailability":{ + "type":"string", + "enum":[ + "AVAILABLE", + "NOT_AVAILABLE" + ] + }, "ErrorMessage":{ "type":"string", "max":2048, @@ -2939,6 +3246,29 @@ }, "documentation":"

    The configuration of the external source wrapper object in the retrieveAndGenerate function.

    " }, + "FieldForReranking":{ + "type":"structure", + "required":["fieldName"], + "members":{ + "fieldName":{ + "shape":"FieldForRerankingFieldNameString", + "documentation":"

    The name of the metadata field to be used during the reranking process.

    " + } + }, + "documentation":"

    Specifies a field to be used during the reranking process in a Knowledge Base vector search. This structure identifies metadata fields that should be considered when reordering search results to improve relevance.

    " + }, + "FieldForRerankingFieldNameString":{ + "type":"string", + "max":2000, + "min":1 + }, + "FieldsForReranking":{ + "type":"list", + "member":{"shape":"FieldForReranking"}, + "max":100, + "min":1, + "sensitive":true + }, "FilterAttribute":{ "type":"structure", "required":[ @@ -3147,10 +3477,6 @@ "required":[ "modelArn", "modelName", - "jobArn", - "baseModelArn", - "trainingDataConfig", - "outputDataConfig", "creationTime" ], "members":{ @@ -3168,7 +3494,7 @@ }, "jobArn":{ "shape":"ModelCustomizationJobArn", - "documentation":"

    Job Amazon Resource Name (ARN) associated with this model.

    " + "documentation":"

    Job Amazon Resource Name (ARN) associated with this model. For models that you create with the CreateCustomModel API operation, this is NULL.

    " }, "baseModelArn":{ "shape":"ModelArn", @@ -3213,6 +3539,14 @@ "customizationConfig":{ "shape":"CustomizationConfig", "documentation":"

    The customization configuration for the custom model.

    " + }, + "modelStatus":{ + "shape":"ModelStatus", + "documentation":"

    The current status of the custom model. Possible values include:

    • Creating - The model is being created and validated.

    • Active - The model has been successfully created and is ready for use.

    • Failed - The model creation process failed. Check the failureMessage field for details.

    " + }, + "failureMessage":{ + "shape":"ErrorMessage", + "documentation":"

    A failure message for any issues that occurred when creating the custom model. This is included for only a failed CreateCustomModel operation.

    " } } }, @@ -3300,6 +3634,50 @@ } } }, + "GetFoundationModelAvailabilityRequest":{ + "type":"structure", + "required":["modelId"], + "members":{ + "modelId":{ + "shape":"BedrockModelId", + "documentation":"

    The model Id of the foundation model.

    ", + "location":"uri", + "locationName":"modelId" + } + } + }, + "GetFoundationModelAvailabilityResponse":{ + "type":"structure", + "required":[ + "modelId", + "agreementAvailability", + "authorizationStatus", + "entitlementAvailability", + "regionAvailability" + ], + "members":{ + "modelId":{ + "shape":"BedrockModelId", + "documentation":"

    The model Id of the foundation model.

    " + }, + "agreementAvailability":{ + "shape":"AgreementAvailability", + "documentation":"

    Agreement availability.

    " + }, + "authorizationStatus":{ + "shape":"AuthorizationStatus", + "documentation":"

    Authorization status.

    " + }, + "entitlementAvailability":{ + "shape":"EntitlementAvailability", + "documentation":"

    Entitlement availability.

    " + }, + "regionAvailability":{ + "shape":"RegionAvailability", + "documentation":"

    Region availability.

    " + } + } + }, "GetFoundationModelRequest":{ "type":"structure", "required":["modelIdentifier"], @@ -3397,6 +3775,10 @@ "shape":"GuardrailContextualGroundingPolicy", "documentation":"

    The contextual grounding policy used in the guardrail.

    " }, + "crossRegionDetails":{ + "shape":"GuardrailCrossRegionDetails", + "documentation":"

    Details about the system-defined guardrail profile that you're using with your guardrail, including the guardrail profile ID and Amazon Resource Name (ARN).

    " + }, "createdAt":{ "shape":"Timestamp", "documentation":"

    The date and time at which the guardrail was created.

    " @@ -3689,14 +4071,14 @@ "shape":"ModelCustomizationJobStatus", "documentation":"

    The status of the job. A successful job transitions from in-progress to completed when the output model is ready to use. If the job failed, the failure message contains information about why the job failed.

    " }, - "failureMessage":{ - "shape":"ErrorMessage", - "documentation":"

    Information about why the job failed.

    " - }, "statusDetails":{ "shape":"StatusDetails", "documentation":"

    For a Distillation job, the details about the statuses of the sub-tasks of the customization job.

    " }, + "failureMessage":{ + "shape":"ErrorMessage", + "documentation":"

    Information about why the job failed.

    " + }, "creationTime":{ "shape":"Timestamp", "documentation":"

    Time that the resource was created.

    " @@ -4070,6 +4452,21 @@ } } }, + "GetUseCaseForModelAccessRequest":{ + "type":"structure", + "members":{ + } + }, + "GetUseCaseForModelAccessResponse":{ + "type":"structure", + "required":["formData"], + "members":{ + "formData":{ + "shape":"AcknowledgementFormDataBody", + "documentation":"

    Get customer profile Response.

    " + } + } + }, "GuardrailArn":{ "type":"string", "max":2048, @@ -4237,12 +4634,46 @@ "max":6, "min":1 }, + "GuardrailContentFiltersTier":{ + "type":"structure", + "required":["tierName"], + "members":{ + "tierName":{ + "shape":"GuardrailContentFiltersTierName", + "documentation":"

    The tier that your guardrail uses for content filters. Valid values include:

    • CLASSIC tier – Provides established guardrails functionality supporting English, French, and Spanish languages.

    • STANDARD tier – Provides a more robust solution than the CLASSIC tier and has more comprehensive language support. This tier requires that your guardrail use cross-Region inference.

    " + } + }, + "documentation":"

    The tier that your guardrail uses for content filters.

    " + }, + "GuardrailContentFiltersTierConfig":{ + "type":"structure", + "required":["tierName"], + "members":{ + "tierName":{ + "shape":"GuardrailContentFiltersTierName", + "documentation":"

    The tier that your guardrail uses for content filters. Valid values include:

    • CLASSIC tier – Provides established guardrails functionality supporting English, French, and Spanish languages.

    • STANDARD tier – Provides a more robust solution than the CLASSIC tier and has more comprehensive language support. This tier requires that your guardrail use cross-Region inference.

    " + } + }, + "documentation":"

    The tier that your guardrail uses for content filters. Consider using a tier that balances performance, accuracy, and compatibility with your existing generative AI workflows.

    " + }, + "GuardrailContentFiltersTierName":{ + "type":"string", + "enum":[ + "CLASSIC", + "STANDARD" + ], + "sensitive":true + }, "GuardrailContentPolicy":{ "type":"structure", "members":{ "filters":{ "shape":"GuardrailContentFilters", "documentation":"

    Contains the type of the content filter and how strongly it should apply to prompts and model responses.

    " + }, + "tier":{ + "shape":"GuardrailContentFiltersTier", + "documentation":"

    The tier that your guardrail uses for content filters.

    " } }, "documentation":"

    Contains details about how to handle harmful content.

    This data type is used in the following API operations:

    " @@ -4254,6 +4685,10 @@ "filtersConfig":{ "shape":"GuardrailContentFiltersConfig", "documentation":"

    Contains the type of the content filter and how strongly it should apply to prompts and model responses.

    " + }, + "tierConfig":{ + "shape":"GuardrailContentFiltersTierConfig", + "documentation":"

    The tier that your guardrail uses for content filters.

    " } }, "documentation":"

    Contains details about how to handle harmful content.

    " @@ -4367,6 +4802,49 @@ }, "documentation":"

    The policy configuration details for the guardrails contextual grounding policy.

    " }, + "GuardrailCrossRegionConfig":{ + "type":"structure", + "required":["guardrailProfileIdentifier"], + "members":{ + "guardrailProfileIdentifier":{ + "shape":"GuardrailCrossRegionGuardrailProfileIdentifier", + "documentation":"

    The ID or Amazon Resource Name (ARN) of the guardrail profile that your guardrail is using. Guardrail profile availability depends on your current Amazon Web Services Region. For more information, see the Amazon Bedrock User Guide.

    " + } + }, + "documentation":"

    The system-defined guardrail profile that you're using with your guardrail. Guardrail profiles define the destination Amazon Web Services Regions where guardrail inference requests can be automatically routed. Using guardrail profiles helps maintain guardrail performance and reliability when demand increases.

    For more information, see the Amazon Bedrock User Guide.

    " + }, + "GuardrailCrossRegionDetails":{ + "type":"structure", + "members":{ + "guardrailProfileId":{ + "shape":"GuardrailCrossRegionGuardrailProfileId", + "documentation":"

    The ID of the guardrail profile that your guardrail is using. Profile availability depends on your current Amazon Web Services Region. For more information, see the Amazon Bedrock User Guide.

    " + }, + "guardrailProfileArn":{ + "shape":"GuardrailCrossRegionGuardrailProfileArn", + "documentation":"

    The Amazon Resource Name (ARN) of the guardrail profile that you're using with your guardrail.

    " + } + }, + "documentation":"

    Contains details about the system-defined guardrail profile that you're using with your guardrail for cross-Region inference.

    For more information, see the Amazon Bedrock User Guide.

    " + }, + "GuardrailCrossRegionGuardrailProfileArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail-profile/[a-z0-9-]+[.]{1}guardrail[.]{1}v[0-9:]+" + }, + "GuardrailCrossRegionGuardrailProfileId":{ + "type":"string", + "max":30, + "min":15, + "pattern":"[a-z0-9-]+[.]{1}guardrail[.]{1}v[0-9:]+" + }, + "GuardrailCrossRegionGuardrailProfileIdentifier":{ + "type":"string", + "max":2048, + "min":15, + "pattern":"[a-z0-9-]+[.]{1}guardrail[.]{1}v[0-9:]+|arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail-profile/[a-z0-9-]+[.]{1}guardrail[.]{1}v[0-9:]+" + }, "GuardrailDescription":{ "type":"string", "max":200, @@ -4851,6 +5329,10 @@ "updatedAt":{ "shape":"Timestamp", "documentation":"

    The date and time at which the guardrail was last updated.

    " + }, + "crossRegionDetails":{ + "shape":"GuardrailCrossRegionDetails", + "documentation":"

    Details about the system-defined guardrail profile that you're using with your guardrail, including the guardrail profile ID and Amazon Resource Name (ARN).

    " } }, "documentation":"

    Contains details about a guardrail.

    This data type is used in the following API operations:

    " @@ -4980,6 +5462,10 @@ "topics":{ "shape":"GuardrailTopics", "documentation":"

    A list of policies related to topics that the guardrail should deny.

    " + }, + "tier":{ + "shape":"GuardrailTopicsTier", + "documentation":"

    The tier that your guardrail uses for denied topic filters.

    " } }, "documentation":"

    Contains details about topics that the guardrail should identify and deny.

    This data type is used in the following API operations:

    " @@ -4991,6 +5477,10 @@ "topicsConfig":{ "shape":"GuardrailTopicsConfig", "documentation":"

    A list of policies related to topics that the guardrail should deny.

    " + }, + "tierConfig":{ + "shape":"GuardrailTopicsTierConfig", + "documentation":"

    The tier that your guardrail uses for denied topic filters.

    " } }, "documentation":"

    Contains details about topics that the guardrail should identify and deny.

    " @@ -5011,6 +5501,36 @@ "max":30, "min":1 }, + "GuardrailTopicsTier":{ + "type":"structure", + "required":["tierName"], + "members":{ + "tierName":{ + "shape":"GuardrailTopicsTierName", + "documentation":"

    The tier that your guardrail uses for denied topic filters. Valid values include:

    • CLASSIC tier – Provides established guardrails functionality supporting English, French, and Spanish languages.

    • STANDARD tier – Provides a more robust solution than the CLASSIC tier and has more comprehensive language support. This tier requires that your guardrail use cross-Region inference.

    " + } + }, + "documentation":"

    The tier that your guardrail uses for denied topic filters.

    " + }, + "GuardrailTopicsTierConfig":{ + "type":"structure", + "required":["tierName"], + "members":{ + "tierName":{ + "shape":"GuardrailTopicsTierName", + "documentation":"

    The tier that your guardrail uses for denied topic filters. Valid values include:

    • CLASSIC tier – Provides established guardrails functionality supporting English, French, and Spanish languages.

    • STANDARD tier – Provides a more robust solution than the CLASSIC tier and has more comprehensive language support. This tier requires that your guardrail use cross-Region inference.

    " + } + }, + "documentation":"

    The tier that your guardrail uses for denied topic filters. Consider using a tier that balances performance, accuracy, and compatibility with your existing generative AI workflows.

    " + }, + "GuardrailTopicsTierName":{ + "type":"string", + "enum":[ + "CLASSIC", + "STANDARD" + ], + "sensitive":true + }, "GuardrailVersion":{ "type":"string", "pattern":"(([1-9][0-9]{0,7})|(DRAFT))" @@ -5208,6 +5728,24 @@ "min":1, "sensitive":true }, + "ImplicitFilterConfiguration":{ + "type":"structure", + "required":[ + "metadataAttributes", + "modelArn" + ], + "members":{ + "metadataAttributes":{ + "shape":"MetadataAttributeSchemaList", + "documentation":"

    A list of metadata attribute schemas that define the structure and properties of metadata fields used for implicit filtering. Each attribute defines a key, type, and optional description.

    " + }, + "modelArn":{ + "shape":"BedrockModelArn", + "documentation":"

    The Amazon Resource Name (ARN) of the foundation model used for implicit filtering. This model processes the query to extract relevant filtering criteria.

    " + } + }, + "documentation":"

    Configuration for implicit filtering in Knowledge Base vector searches. Implicit filtering allows you to automatically filter search results based on metadata attributes without requiring explicit filter expressions in each query.

    " + }, "ImportedModelArn":{ "type":"string", "max":1011, @@ -5579,6 +6117,14 @@ "filter":{ "shape":"RetrievalFilter", "documentation":"

    Specifies the filters to use on the metadata fields in the knowledge base data sources before returning results.

    " + }, + "implicitFilterConfiguration":{ + "shape":"ImplicitFilterConfiguration", + "documentation":"

    Configuration for implicit filtering in Knowledge Base vector searches. This allows the system to automatically apply filters based on the query context without requiring explicit filter expressions.

    " + }, + "rerankingConfiguration":{ + "shape":"VectorSearchRerankingConfiguration", + "documentation":"

    Configuration for reranking search results in Knowledge Base vector searches. Reranking improves search relevance by reordering initial vector search results using more sophisticated relevance models.

    " } }, "documentation":"

    The configuration details for returning the results from the knowledge base vector search.

    " @@ -5589,6 +6135,16 @@ "max":100, "min":1 }, + "LegalTerm":{ + "type":"structure", + "members":{ + "url":{ + "shape":"String", + "documentation":"

    URL to the legal term document.

    " + } + }, + "documentation":"

    The legal term of the agreement.

    " + }, "ListCustomModelsRequest":{ "type":"structure", "members":{ @@ -5651,6 +6207,12 @@ "documentation":"

    Return custom models depending on if the current account owns them (true) or if they were shared with the current account (false).

    ", "location":"querystring", "locationName":"isOwned" + }, + "modelStatus":{ + "shape":"ModelStatus", + "documentation":"

    The status of them model to filter results by. Possible values include:

    • Creating - Include only models that are currently being created and validated.

    • Active - Include only models that have been successfully created and are ready for use.

    • Failed - Include only models where the creation process failed.

    If you don't specify a status, the API returns models in all states.

    ", + "location":"querystring", + "locationName":"modelStatus" } } }, @@ -5739,6 +6301,41 @@ } } }, + "ListFoundationModelAgreementOffersRequest":{ + "type":"structure", + "required":["modelId"], + "members":{ + "modelId":{ + "shape":"BedrockModelId", + "documentation":"

    Model Id of the foundation model.

    ", + "location":"uri", + "locationName":"modelId" + }, + "offerType":{ + "shape":"OfferType", + "documentation":"

    Type of offer associated with the model.

    ", + "location":"querystring", + "locationName":"offerType" + } + } + }, + "ListFoundationModelAgreementOffersResponse":{ + "type":"structure", + "required":[ + "modelId", + "offers" + ], + "members":{ + "modelId":{ + "shape":"BedrockModelId", + "documentation":"

    Model Id of the foundation model.

    " + }, + "offers":{ + "shape":"Offers", + "documentation":"

    List of the offers associated with the specified model.

    " + } + } + }, "ListFoundationModelsRequest":{ "type":"structure", "members":{ @@ -6494,6 +7091,63 @@ "min":0, "sensitive":true }, + "MetadataAttributeSchema":{ + "type":"structure", + "required":[ + "key", + "type", + "description" + ], + "members":{ + "key":{ + "shape":"MetadataAttributeSchemaKeyString", + "documentation":"

    The unique identifier for the metadata attribute. This key is used to reference the attribute in filter expressions and reranking configurations.

    " + }, + "type":{ + "shape":"AttributeType", + "documentation":"

    The data type of the metadata attribute. The type determines how the attribute can be used in filter expressions and reranking.

    " + }, + "description":{ + "shape":"MetadataAttributeSchemaDescriptionString", + "documentation":"

    An optional description of the metadata attribute that provides additional context about its purpose and usage.

    " + } + }, + "documentation":"

    Defines the schema for a metadata attribute used in Knowledge Base vector searches. Metadata attributes provide additional context for documents and can be used for filtering and reranking search results.

    ", + "sensitive":true + }, + "MetadataAttributeSchemaDescriptionString":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[\\s\\S]+" + }, + "MetadataAttributeSchemaKeyString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\s\\S]+" + }, + "MetadataAttributeSchemaList":{ + "type":"list", + "member":{"shape":"MetadataAttributeSchema"}, + "max":25, + "min":1 + }, + "MetadataConfigurationForReranking":{ + "type":"structure", + "required":["selectionMode"], + "members":{ + "selectionMode":{ + "shape":"RerankingMetadataSelectionMode", + "documentation":"

    The mode for selecting which metadata fields to include in the reranking process. Valid values are ALL (use all available metadata fields) or SELECTIVE (use only specified fields).

    " + }, + "selectiveModeConfiguration":{ + "shape":"RerankingMetadataSelectiveModeConfiguration", + "documentation":"

    Configuration for selective mode, which allows you to explicitly include or exclude specific metadata fields during reranking. This is only used when selectionMode is set to SELECTIVE.

    " + } + }, + "documentation":"

    Configuration for how metadata should be used during the reranking process in Knowledge Base vector searches. This determines which metadata fields are included or excluded when reordering search results.

    " + }, "MetricFloat":{ "type":"float", "box":true @@ -6510,7 +7164,7 @@ "type":"string", "max":1011, "min":20, - "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}))" + "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/((imported)|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}))(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}))" }, "ModelCopyJobArn":{ "type":"string", @@ -6653,14 +7307,14 @@ "shape":"ModelCustomizationJobStatus", "documentation":"

    Status of the customization job.

    " }, - "lastModifiedTime":{ - "shape":"Timestamp", - "documentation":"

    Time that the customization job was last modified.

    " - }, "statusDetails":{ "shape":"StatusDetails", "documentation":"

    Details about the status of the data processing sub-task of the job.

    " }, + "lastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

    Time that the customization job was last modified.

    " + }, "creationTime":{ "shape":"Timestamp", "documentation":"

    Creation time of the custom model.

    " @@ -6693,10 +7347,10 @@ "members":{ "s3DataSource":{ "shape":"S3DataSource", - "documentation":"

    The Amazon S3 data source of the imported model.

    " + "documentation":"

    The Amazon S3 data source of the model to import.

    " } }, - "documentation":"

    Data source for the imported model.

    ", + "documentation":"

    The data source of the model to import.

    ", "union":true }, "ModelId":{ @@ -6709,7 +7363,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)" + "pattern":"arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/((imported)|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}))(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)" }, "ModelImportJobArn":{ "type":"string", @@ -6987,10 +7641,53 @@ "min":0, "pattern":".*arn:aws:sagemaker:.*:hub-content/SageMakerPublicHub/Model/.*" }, + "ModelStatus":{ + "type":"string", + "enum":[ + "Active", + "Creating", + "Failed" + ] + }, "NonBlankString":{ "type":"string", "pattern":"[\\s\\S]*" }, + "Offer":{ + "type":"structure", + "required":[ + "offerToken", + "termDetails" + ], + "members":{ + "offerId":{ + "shape":"OfferId", + "documentation":"

    Offer Id for a model offer.

    " + }, + "offerToken":{ + "shape":"OfferToken", + "documentation":"

    Offer token.

    " + }, + "termDetails":{ + "shape":"TermDetails", + "documentation":"

    Details about the terms of the offer.

    " + } + }, + "documentation":"

    An offer dictates usage terms for the model.

    " + }, + "OfferId":{"type":"string"}, + "OfferToken":{"type":"string"}, + "OfferType":{ + "type":"string", + "enum":[ + "ALL", + "PUBLIC" + ] + }, + "Offers":{ + "type":"list", + "member":{"shape":"Offer"} + }, "OrchestrationConfiguration":{ "type":"structure", "required":["queryTransformationConfiguration"], @@ -7041,6 +7738,17 @@ "box":true, "min":1 }, + "PricingTerm":{ + "type":"structure", + "required":["rateCard"], + "members":{ + "rateCard":{ + "shape":"RateCard", + "documentation":"

    Describes a usage price for each dimension.

    " + } + }, + "documentation":"

    Describes the usage-based pricing term.

    " + }, "PromptRouterArn":{ "type":"string", "max":2048, @@ -7273,6 +7981,21 @@ "members":{ } }, + "PutUseCaseForModelAccessRequest":{ + "type":"structure", + "required":["formData"], + "members":{ + "formData":{ + "shape":"AcknowledgementFormDataBody", + "documentation":"

    Put customer profile Request.

    " + } + } + }, + "PutUseCaseForModelAccessResponse":{ + "type":"structure", + "members":{ + } + }, "QueryTransformationConfiguration":{ "type":"structure", "required":["type"], @@ -7320,6 +8043,10 @@ "max":1, "min":1 }, + "RateCard":{ + "type":"list", + "member":{"shape":"DimensionalPriceRate"} + }, "RatingScale":{ "type":"list", "member":{"shape":"RatingScaleItem"}, @@ -7369,6 +8096,13 @@ "max":100, "min":1 }, + "RegionAvailability":{ + "type":"string", + "enum":[ + "AVAILABLE", + "NOT_AVAILABLE" + ] + }, "RegisterMarketplaceModelEndpointRequest":{ "type":"structure", "required":[ @@ -7461,6 +8195,28 @@ "min":0, "pattern":"[a-zA-Z0-9\\s._:/=+$@-]{0,256}" }, + "RerankingMetadataSelectionMode":{ + "type":"string", + "enum":[ + "SELECTIVE", + "ALL" + ] + }, + "RerankingMetadataSelectiveModeConfiguration":{ + "type":"structure", + "members":{ + "fieldsToInclude":{ + "shape":"FieldsForReranking", + "documentation":"

    A list of metadata field names to explicitly include in the reranking process. Only these fields will be considered when reordering search results. This parameter cannot be used together with fieldsToExclude.

    " + }, + "fieldsToExclude":{ + "shape":"FieldsForReranking", + "documentation":"

    A list of metadata field names to explicitly exclude from the reranking process. All metadata fields except these will be considered when reordering search results. This parameter cannot be used together with fieldsToInclude.

    " + } + }, + "documentation":"

    Configuration for selectively including or excluding metadata fields during the reranking process. This allows you to control which metadata attributes are considered when reordering search results.

    ", + "union":true + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -7630,7 +8386,7 @@ "documentation":"

    The URI of the Amazon S3 data source.

    " } }, - "documentation":"

    The Amazon S3 data source of the imported job.

    " + "documentation":"

    The Amazon S3 data source of the model to import.

    " }, "S3InputFormat":{ "type":"string", @@ -7839,6 +8595,16 @@ "max":16, "min":1 }, + "SupportTerm":{ + "type":"structure", + "members":{ + "refundPolicyDescription":{ + "shape":"String", + "documentation":"

    Describes the refund policy.

    " + } + }, + "documentation":"

    Describes a support term.

    " + }, "Tag":{ "type":"structure", "required":[ @@ -7907,7 +8673,7 @@ "type":"string", "max":1011, "min":20, - "pattern":".*(^[a-zA-Z0-9][a-zA-Z0-9\\-]*$)|(^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:([0-9]{12}|)((:(fine-tuning-job|model-customization-job|custom-model)/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}(/[a-z0-9]{12})$)|(:guardrail/[a-z0-9]+$)|(:automated-reasoning-policy/[a-zA-Z0-9]+(:[a-zA-Z0-9]+)?$)|(:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+$)|(:(provisioned-model|model-invocation-job|model-evaluation-job|evaluation-job|model-import-job|imported-model|async-invoke|provisioned-model-v2|provisioned-model-reservation|prompt-router)/[a-z0-9]{12}$))).*" + "pattern":".*(^[a-zA-Z0-9][a-zA-Z0-9\\-]*$)|(^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:custom-model/([a-z0-9-]{1,63}[.][a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?|imported)/[a-z0-9]{12}$)|(^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:([0-9]{12}|)((:(fine-tuning-job|model-customization-job)/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([a-z0-9-]{1,63}[.]){0,2}[a-z0-9-]{1,63}([:][a-z0-9-]{1,63}){0,2}(/[a-z0-9]{12})$)|(:guardrail/[a-z0-9]+$)|(:automated-reasoning-policy/[a-zA-Z0-9]+(:[a-zA-Z0-9]+)?$)|(:(inference-profile|application-inference-profile)/[a-zA-Z0-9-:.]+$)|(:(provisioned-model|model-invocation-job|model-evaluation-job|evaluation-job|model-import-job|imported-model|async-invoke|provisioned-model-v2|provisioned-model-reservation|prompt-router|custom-model-deployment)/[a-z0-9]{12}$))).*" }, "TeacherModelConfig":{ "type":"structure", @@ -7934,6 +8700,30 @@ "max":1, "min":0 }, + "TermDetails":{ + "type":"structure", + "required":[ + "usageBasedPricingTerm", + "legalTerm", + "supportTerm" + ], + "members":{ + "usageBasedPricingTerm":{"shape":"PricingTerm"}, + "legalTerm":{ + "shape":"LegalTerm", + "documentation":"

    Describes the legal terms.

    " + }, + "supportTerm":{ + "shape":"SupportTerm", + "documentation":"

    Describes the support terms.

    " + }, + "validityTerm":{ + "shape":"ValidityTerm", + "documentation":"

    Describes the validity terms.

    " + } + }, + "documentation":"

    Describes the usage terms of an offer.

    " + }, "TextInferenceConfig":{ "type":"structure", "members":{ @@ -8107,6 +8897,10 @@ "shape":"GuardrailContextualGroundingPolicyConfig", "documentation":"

    The contextual grounding policy configuration used to update a guardrail.

    " }, + "crossRegionConfig":{ + "shape":"GuardrailCrossRegionConfig", + "documentation":"

    The system-defined guardrail profile that you're using with your guardrail. Guardrail profiles define the destination Amazon Web Services Regions where guardrail inference requests can be automatically routed.

    For more information, see the Amazon Bedrock User Guide.

    " + }, "blockedInputMessaging":{ "shape":"GuardrailBlockedMessaging", "documentation":"

    The message to return when the guardrail blocks a prompt.

    " @@ -8280,6 +9074,75 @@ "max":10, "min":0 }, + "ValidityTerm":{ + "type":"structure", + "members":{ + "agreementDuration":{ + "shape":"String", + "documentation":"

    Describes the agreement duration.

    " + } + }, + "documentation":"

    Describes the validity terms.

    " + }, + "VectorSearchBedrockRerankingConfiguration":{ + "type":"structure", + "required":["modelConfiguration"], + "members":{ + "modelConfiguration":{ + "shape":"VectorSearchBedrockRerankingModelConfiguration", + "documentation":"

    Configuration for the Amazon Bedrock foundation model used for reranking. This includes the model ARN and any additional request fields required by the model.

    " + }, + "numberOfRerankedResults":{ + "shape":"VectorSearchBedrockRerankingConfigurationNumberOfRerankedResultsInteger", + "documentation":"

    The maximum number of results to rerank. This limits how many of the initial vector search results will be processed by the reranking model. A smaller number improves performance but may exclude potentially relevant results.

    " + }, + "metadataConfiguration":{ + "shape":"MetadataConfigurationForReranking", + "documentation":"

    Configuration for how document metadata should be used during the reranking process. This determines which metadata fields are included when reordering search results.

    " + } + }, + "documentation":"

    Configuration for using Amazon Bedrock foundation models to rerank Knowledge Base vector search results. This enables more sophisticated relevance ranking using large language models.

    " + }, + "VectorSearchBedrockRerankingConfigurationNumberOfRerankedResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "VectorSearchBedrockRerankingModelConfiguration":{ + "type":"structure", + "required":["modelArn"], + "members":{ + "modelArn":{ + "shape":"BedrockRerankingModelArn", + "documentation":"

    The Amazon Resource Name (ARN) of the foundation model to use for reranking. This model processes the query and search results to determine a more relevant ordering.

    " + }, + "additionalModelRequestFields":{ + "shape":"AdditionalModelRequestFields", + "documentation":"

    A list of additional fields to include in the model request during reranking. These fields provide extra context or configuration options specific to the selected foundation model.

    " + } + }, + "documentation":"

    Configuration for the Amazon Bedrock foundation model used for reranking vector search results. This specifies which model to use and any additional parameters required by the model.

    " + }, + "VectorSearchRerankingConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"VectorSearchRerankingConfigurationType", + "documentation":"

    The type of reranking to apply to vector search results. Currently, the only supported value is BEDROCK, which uses Amazon Bedrock foundation models for reranking.

    " + }, + "bedrockRerankingConfiguration":{ + "shape":"VectorSearchBedrockRerankingConfiguration", + "documentation":"

    Configuration for using Amazon Bedrock foundation models to rerank search results. This is required when the reranking type is set to BEDROCK.

    " + } + }, + "documentation":"

    Configuration for reranking vector search results to improve relevance. Reranking applies additional relevance models to reorder the initial vector search results based on more sophisticated criteria.

    " + }, + "VectorSearchRerankingConfigurationType":{ + "type":"string", + "enum":["BEDROCK_RERANKING_MODEL"] + }, "VpcConfig":{ "type":"structure", "required":[ diff --git a/services/bedrockagent/pom.xml b/services/bedrockagent/pom.xml index fcf011e38311..bf4c859fe183 100644 --- a/services/bedrockagent/pom.xml +++ b/services/bedrockagent/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT bedrockagent AWS Java SDK :: Services :: Bedrock Agent diff --git a/services/bedrockagent/src/main/resources/codegen-resources/customization.config b/services/bedrockagent/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/bedrockagent/src/main/resources/codegen-resources/customization.config +++ b/services/bedrockagent/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/bedrockagent/src/main/resources/codegen-resources/service-2.json b/services/bedrockagent/src/main/resources/codegen-resources/service-2.json index 0095b7ba4d8e..087457238c07 100644 --- a/services/bedrockagent/src/main/resources/codegen-resources/service-2.json +++ b/services/bedrockagent/src/main/resources/codegen-resources/service-2.json @@ -1519,6 +1519,22 @@ }, "documentation":"

    Contains details about an action group.

    " }, + "AdditionalModelRequestFields":{ + "type":"map", + "key":{"shape":"AdditionalModelRequestFieldsKey"}, + "value":{"shape":"AdditionalModelRequestFieldsValue"} + }, + "AdditionalModelRequestFieldsKey":{ + "type":"string", + "max":100, + "min":1 + }, + "AdditionalModelRequestFieldsValue":{ + "type":"structure", + "members":{ + }, + "document":true + }, "Agent":{ "type":"structure", "required":[ @@ -1736,6 +1752,10 @@ "shape":"Id", "documentation":"

    The unique identifier of the agent.

    " }, + "aliasInvocationState":{ + "shape":"AliasInvocationState", + "documentation":"

    The invocation state for the agent alias. If the agent alias is running, the value is ACCEPT_INVOCATIONS. If the agent alias is paused, the value is REJECT_INVOCATIONS. Use the UpdateAgentAlias operation to change the invocation state.

    " + }, "clientToken":{ "shape":"ClientToken", "documentation":"

    A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

    " @@ -1858,6 +1878,10 @@ "shape":"AgentAliasStatus", "documentation":"

    The status of the alias.

    " }, + "aliasInvocationState":{ + "shape":"AliasInvocationState", + "documentation":"

    The invocation state for the agent alias. If the agent alias is running, the value is ACCEPT_INVOCATIONS. If the agent alias is paused, the value is REJECT_INVOCATIONS. Use the UpdateAgentAlias operation to change the invocation state.

    " + }, "createdAt":{ "shape":"DateTimestamp", "documentation":"

    The time at which the alias of the agent was created.

    " @@ -2025,7 +2049,7 @@ "documentation":"

    The Amazon Resource Name (ARN) of the alias of the agent to invoke.

    " } }, - "documentation":"

    Defines an agent node in your flow. You specify the agent to invoke at this point in the flow. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

    " + "documentation":"

    Defines an agent node in your flow. You specify the agent to invoke at this point in the flow. For more information, see Node types in a flow in the Amazon Bedrock User Guide.

    " }, "AgentKnowledgeBase":{ "type":"structure", @@ -2308,6 +2332,14 @@ }, "documentation":"

    Contains details about a version of an agent.

    " }, + "AliasInvocationState":{ + "type":"string", + "documentation":"

    Enum representing the invocation state of an agent alias

    ", + "enum":[ + "ACCEPT_INVOCATIONS", + "REJECT_INVOCATIONS" + ] + }, "AnyToolChoice":{ "type":"structure", "members":{ @@ -2511,6 +2543,12 @@ "min":1, "pattern":"^(arn:aws(-[^:]{1,12})?:(bedrock):[a-z0-9-]{1,20}:([0-9]{12})?:([a-z-]+/)?)?([a-zA-Z0-9.-]{1,63}){0,2}(([:][a-z0-9-]{1,63}){0,2})?(/[a-z0-9]{1,12})?$" }, + "BedrockRerankingModelArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model/(.*))?$" + }, "Boolean":{ "type":"boolean", "box":true @@ -2636,7 +2674,7 @@ "type":"structure", "members":{ }, - "documentation":"

    Defines a collector node in your flow. This node takes an iteration of inputs and consolidates them into an array in the output. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

    " + "documentation":"

    Defines a collector node in your flow. This node takes an iteration of inputs and consolidates them into an array in the output. For more information, see Node types in a flow in the Amazon Bedrock User Guide.

    " }, "ColumnName":{ "type":"string", @@ -2644,6 +2682,13 @@ "min":0, "pattern":"^[a-zA-Z0-9_\\-]+$" }, + "ConcurrencyType":{ + "type":"string", + "enum":[ + "Automatic", + "Manual" + ] + }, "ConditionFlowNodeConfiguration":{ "type":"structure", "required":["conditions"], @@ -2653,7 +2698,7 @@ "documentation":"

    An array of conditions. Each member contains the name of a condition and an expression that defines the condition.

    " } }, - "documentation":"

    Defines a condition node in your flow. You can specify conditions that determine which node comes next in the flow. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

    " + "documentation":"

    Defines a condition node in your flow. You can specify conditions that determine which node comes next in the flow. For more information, see Node types in a flow in the Amazon Bedrock User Guide.

    " }, "ConflictException":{ "type":"structure", @@ -3063,6 +3108,10 @@ "documentation":"

    A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

    ", "idempotencyToken":true }, + "concurrencyConfiguration":{ + "shape":"FlowAliasConcurrencyConfiguration", + "documentation":"

    The configuration that specifies how nodes in the flow are executed in parallel.

    " + }, "description":{ "shape":"Description", "documentation":"

    A description for the alias.

    " @@ -3103,6 +3152,10 @@ "shape":"FlowAliasArn", "documentation":"

    The Amazon Resource Name (ARN) of the alias.

    " }, + "concurrencyConfiguration":{ + "shape":"FlowAliasConcurrencyConfiguration", + "documentation":"

    The configuration that specifies how nodes in the flow are executed in parallel.

    " + }, "createdAt":{ "shape":"DateTimestamp", "documentation":"

    The time at which the alias was created.

    " @@ -4521,12 +4574,35 @@ "max":2048, "min":0 }, + "FieldForReranking":{ + "type":"structure", + "required":["fieldName"], + "members":{ + "fieldName":{ + "shape":"FieldForRerankingFieldNameString", + "documentation":"

    The name of the metadata field to include or exclude during reranking.

    " + } + }, + "documentation":"

    Specifies a metadata field to include or exclude during the reranking process.

    " + }, + "FieldForRerankingFieldNameString":{ + "type":"string", + "max":2000, + "min":1 + }, "FieldName":{ "type":"string", "max":2048, "min":0, "pattern":"^.*$" }, + "FieldsForReranking":{ + "type":"list", + "member":{"shape":"FieldForReranking"}, + "max":100, + "min":1, + "sensitive":true + }, "FilterList":{ "type":"list", "member":{"shape":"FilterPattern"}, @@ -4579,6 +4655,27 @@ "type":"string", "pattern":"^arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/(TSTALIASID|[0-9a-zA-Z]{10})$" }, + "FlowAliasConcurrencyConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "maxConcurrency":{ + "shape":"FlowAliasConcurrencyConfigurationMaxConcurrencyInteger", + "documentation":"

    The maximum number of nodes that can be executed concurrently in the flow.

    " + }, + "type":{ + "shape":"ConcurrencyType", + "documentation":"

    The type of concurrency to use for parallel node execution. Specify one of the following options:

    • Automatic - Amazon Bedrock determines which nodes can be executed in parallel based on the flow definition and its dependencies.

    • Manual - You specify which nodes can be executed in parallel.

    " + } + }, + "documentation":"

    Determines how multiple nodes in a flow can run in parallel. Running nodes concurrently can improve your flow's performance.

    " + }, + "FlowAliasConcurrencyConfigurationMaxConcurrencyInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, "FlowAliasId":{ "type":"string", "pattern":"^(TSTALIASID|[0-9a-zA-Z]{10})$" @@ -4625,6 +4722,10 @@ "shape":"FlowAliasArn", "documentation":"

    The Amazon Resource Name (ARN) of the alias.

    " }, + "concurrencyConfiguration":{ + "shape":"FlowAliasConcurrencyConfiguration", + "documentation":"

    The configuration that specifies how nodes in the flow are executed concurrently.

    " + }, "createdAt":{ "shape":"DateTimestamp", "documentation":"

    The time at which the alias was created.

    " @@ -4891,6 +4992,18 @@ "shape":"LexFlowNodeConfiguration", "documentation":"

    Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output.

    " }, + "loop":{ + "shape":"LoopFlowNodeConfiguration", + "documentation":"

    Contains configurations for a DoWhile loop in your flow.

    " + }, + "loopController":{ + "shape":"LoopControllerFlowNodeConfiguration", + "documentation":"

    Contains controller node configurations for a DoWhile loop in your flow.

    " + }, + "loopInput":{ + "shape":"LoopInputFlowNodeConfiguration", + "documentation":"

    Contains input node configurations for a DoWhile loop in your flow.

    " + }, "output":{ "shape":"OutputFlowNodeConfiguration", "documentation":"

    Contains configurations for an output flow node in your flow. The last node in the flow. outputs can't be specified for this node.

    " @@ -4908,7 +5021,7 @@ "documentation":"

    Contains configurations for a storage node in your flow. Stores an input in an Amazon S3 location.

    " } }, - "documentation":"

    Contains configurations for a node in your flow. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

    ", + "documentation":"

    Contains configurations for a node in your flow. For more information, see Node types in a flow in the Amazon Bedrock User Guide.

    ", "union":true }, "FlowNodeIODataType":{ @@ -4929,20 +5042,32 @@ "type" ], "members":{ + "category":{ + "shape":"FlowNodeInputCategory", + "documentation":"

    Specifies how input data flows between iterations in a DoWhile loop.

    • LoopCondition - Controls whether the loop continues by evaluating condition expressions against the input data. Use this category to define the condition that determines if the loop should continue.

    • ReturnValueToLoopStart - Defines data to pass back to the start of the loop's next iteration. Use this category for variables that you want to update for each loop iteration.

    • ExitLoop - Defines the value that's available once the loop ends. Use this category to expose loop results to nodes outside the loop.

    " + }, "expression":{ "shape":"FlowNodeInputExpression", "documentation":"

    An expression that formats the input for the node. For an explanation of how to create expressions, see Expressions in Prompt flows in Amazon Bedrock.

    " }, "name":{ "shape":"FlowNodeInputName", - "documentation":"

    A name for the input that you can reference.

    " + "documentation":"

    Specifies a name for the input that you can reference.

    " }, "type":{ "shape":"FlowNodeIODataType", - "documentation":"

    The data type of the input. If the input doesn't match this type at runtime, a validation error will be thrown.

    " + "documentation":"

    Specifies the data type of the input. If the input doesn't match this type at runtime, a validation error will be thrown.

    " } }, - "documentation":"

    Contains configurations for an input to a node.

    " + "documentation":"

    Contains configurations for an input in an Amazon Bedrock Flows node.

    " + }, + "FlowNodeInputCategory":{ + "type":"string", + "enum":[ + "LoopCondition", + "ReturnValueToLoopStart", + "ExitLoop" + ] }, "FlowNodeInputExpression":{ "type":"string", @@ -5007,7 +5132,10 @@ "Retrieval", "Iterator", "Collector", - "InlineCode" + "InlineCode", + "Loop", + "LoopInput", + "LoopController" ] }, "FlowNodes":{ @@ -5123,6 +5251,14 @@ "shape":"IncompatibleConnectionDataTypeFlowValidationDetails", "documentation":"

    Details about incompatible data types in a connection.

    " }, + "invalidLoopBoundary":{ + "shape":"InvalidLoopBoundaryFlowValidationDetails", + "documentation":"

    Details about a flow that includes connections that violate loop boundary rules.

    " + }, + "loopIncompatibleNodeType":{ + "shape":"LoopIncompatibleNodeTypeFlowValidationDetails", + "documentation":"

    Details about a flow that includes incompatible node types in a DoWhile loop.

    " + }, "malformedConditionExpression":{ "shape":"MalformedConditionExpressionFlowValidationDetails", "documentation":"

    Details about a malformed condition expression in a node.

    " @@ -5151,6 +5287,14 @@ "shape":"MissingEndingNodesFlowValidationDetails", "documentation":"

    Details about missing ending nodes in the flow.

    " }, + "missingLoopControllerNode":{ + "shape":"MissingLoopControllerNodeFlowValidationDetails", + "documentation":"

    Details about a flow that's missing a required LoopController node in a DoWhile loop.

    " + }, + "missingLoopInputNode":{ + "shape":"MissingLoopInputNodeFlowValidationDetails", + "documentation":"

    Details about a flow that's missing a required LoopInput node in a DoWhile loop.

    " + }, "missingNodeConfiguration":{ "shape":"MissingNodeConfigurationFlowValidationDetails", "documentation":"

    Details about missing configuration for a node.

    " @@ -5167,6 +5311,14 @@ "shape":"MissingStartingNodesFlowValidationDetails", "documentation":"

    Details about missing starting nodes in the flow.

    " }, + "multipleLoopControllerNodes":{ + "shape":"MultipleLoopControllerNodesFlowValidationDetails", + "documentation":"

    Details about a flow that contains multiple LoopController nodes in a DoWhile loop.

    " + }, + "multipleLoopInputNodes":{ + "shape":"MultipleLoopInputNodesFlowValidationDetails", + "documentation":"

    Details about a flow that contains multiple LoopInput nodes in a DoWhile loop.

    " + }, "multipleNodeInputConnections":{ "shape":"MultipleNodeInputConnectionsFlowValidationDetails", "documentation":"

    Details about multiple connections to a single node input.

    " @@ -5255,7 +5407,13 @@ "UnsatisfiedConnectionConditions", "Unspecified", "UnknownNodeInput", - "UnknownNodeOutput" + "UnknownNodeOutput", + "MissingLoopInputNode", + "MissingLoopControllerNode", + "MultipleLoopInputNodes", + "MultipleLoopControllerNodes", + "LoopIncompatibleNodeType", + "InvalidLoopBoundary" ] }, "FlowValidations":{ @@ -5612,6 +5770,10 @@ "shape":"FlowAliasArn", "documentation":"

    The Amazon Resource Name (ARN) of the flow.

    " }, + "concurrencyConfiguration":{ + "shape":"FlowAliasConcurrencyConfiguration", + "documentation":"

    The configuration that specifies how nodes in the flow are executed in parallel.

    " + }, "createdAt":{ "shape":"DateTimestamp", "documentation":"

    The time at which the flow was created.

    " @@ -6062,6 +6224,15 @@ }, "documentation":"

    Details about incompatible data types in a connection between nodes.

    " }, + "IncompatibleLoopNodeType":{ + "type":"string", + "enum":[ + "Input", + "Condition", + "Iterator", + "Collector" + ] + }, "InferenceConfiguration":{ "type":"structure", "members":{ @@ -6430,6 +6601,29 @@ "exception":true, "fault":true }, + "InvalidLoopBoundaryFlowValidationDetails":{ + "type":"structure", + "required":[ + "connection", + "source", + "target" + ], + "members":{ + "connection":{ + "shape":"FlowConnectionName", + "documentation":"

    The name of the connection that violates loop boundary rules.

    " + }, + "source":{ + "shape":"FlowNodeName", + "documentation":"

    The source node of the connection that violates DoWhile loop boundary rules.

    " + }, + "target":{ + "shape":"FlowNodeName", + "documentation":"

    The target node of the connection that violates DoWhile loop boundary rules.

    " + } + }, + "documentation":"

    Details about a flow that contains connections that violate loop boundary rules.

    " + }, "IteratorFlowNodeConfiguration":{ "type":"structure", "members":{ @@ -6621,6 +6815,10 @@ "shape":"GuardrailConfiguration", "documentation":"

    Contains configurations for a guardrail to apply during query and response generation for the knowledge base in this configuration.

    " }, + "inferenceConfiguration":{ + "shape":"PromptInferenceConfiguration", + "documentation":"

    Contains inference configurations for the prompt.

    " + }, "knowledgeBaseId":{ "shape":"KnowledgeBaseId", "documentation":"

    The unique identifier of the knowledge base to query.

    " @@ -6628,9 +6826,31 @@ "modelId":{ "shape":"KnowledgeBaseModelIdentifier", "documentation":"

    The unique identifier of the model or inference profile to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array.

    " + }, + "numberOfResults":{ + "shape":"KnowledgeBaseFlowNodeConfigurationNumberOfResultsInteger", + "documentation":"

    The number of results to retrieve from the knowledge base.

    " + }, + "orchestrationConfiguration":{ + "shape":"KnowledgeBaseOrchestrationConfiguration", + "documentation":"

    The configuration for orchestrating the retrieval and generation process in the knowledge base node.

    " + }, + "promptTemplate":{ + "shape":"KnowledgeBasePromptTemplate", + "documentation":"

    A custom prompt template to use with the knowledge base for generating responses.

    " + }, + "rerankingConfiguration":{ + "shape":"VectorSearchRerankingConfiguration", + "documentation":"

    The configuration for reranking the retrieved results from the knowledge base to improve relevance.

    " } }, - "documentation":"

    Contains configurations for a knowledge base node in a flow. This node takes a query as the input and returns, as the output, the retrieved responses directly (as an array) or a response generated based on the retrieved responses. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

    " + "documentation":"

    Contains configurations for a knowledge base node in a flow. This node takes a query as the input and returns, as the output, the retrieved responses directly (as an array) or a response generated based on the retrieved responses. For more information, see Node types in a flow in the Amazon Bedrock User Guide.

    " + }, + "KnowledgeBaseFlowNodeConfigurationNumberOfResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 }, "KnowledgeBaseId":{ "type":"string", @@ -6644,6 +6864,38 @@ "min":1, "pattern":"^(arn:aws(-[^:]{1,12})?:(bedrock|sagemaker):[a-z0-9-]{1,20}:([0-9]{12})?:([a-z-]+/)?)?([a-zA-Z0-9.-]{1,63}){0,2}(([:][a-z0-9-]{1,63}){0,2})?(/[a-z0-9]{1,12})?$" }, + "KnowledgeBaseOrchestrationConfiguration":{ + "type":"structure", + "members":{ + "additionalModelRequestFields":{ + "shape":"AdditionalModelRequestFields", + "documentation":"

    The additional model-specific request parameters as key-value pairs to be included in the request to the foundation model.

    " + }, + "inferenceConfig":{ + "shape":"PromptInferenceConfiguration", + "documentation":"

    Contains inference configurations for the prompt.

    " + }, + "performanceConfig":{ + "shape":"PerformanceConfiguration", + "documentation":"

    The performance configuration options for the knowledge base retrieval and generation process.

    " + }, + "promptTemplate":{ + "shape":"KnowledgeBasePromptTemplate", + "documentation":"

    A custom prompt template for orchestrating the retrieval and generation process.

    " + } + }, + "documentation":"

    Configures how the knowledge base orchestrates the retrieval and generation process, allowing for customization of prompts, inference parameters, and performance settings.

    " + }, + "KnowledgeBasePromptTemplate":{ + "type":"structure", + "members":{ + "textPromptTemplate":{ + "shape":"KnowledgeBaseTextPrompt", + "documentation":"

    The text of the prompt template.

    " + } + }, + "documentation":"

    Defines a custom prompt template for orchestrating the retrieval and generation process.

    " + }, "KnowledgeBaseRoleArn":{ "type":"string", "max":2048, @@ -6716,6 +6968,12 @@ }, "documentation":"

    Contains details about a knowledge base.

    " }, + "KnowledgeBaseTextPrompt":{ + "type":"string", + "max":100000, + "min":1, + "sensitive":true + }, "KnowledgeBaseType":{ "type":"string", "enum":[ @@ -6739,7 +6997,7 @@ "documentation":"

    The Amazon Resource Name (ARN) of the Lambda function to invoke.

    " } }, - "documentation":"

    Contains configurations for a Lambda function node in the flow. You specify the Lambda function to invoke and the inputs into the function. The output is the response that is defined in the Lambda function. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

    " + "documentation":"

    Contains configurations for a Lambda function node in the flow. You specify the Lambda function to invoke and the inputs into the function. The output is the response that is defined in the Lambda function. For more information, see Node types in a flow in the Amazon Bedrock User Guide.

    " }, "LexBotAliasArn":{ "type":"string", @@ -6768,7 +7026,7 @@ "documentation":"

    The Region to invoke the Amazon Lex bot in.

    " } }, - "documentation":"

    Contains configurations for a Lex node in the flow. You specify a Amazon Lex bot to invoke. This node takes an utterance as the input and returns as the output the intent identified by the Amazon Lex bot. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

    " + "documentation":"

    Contains configurations for a Lex node in the flow. You specify a Amazon Lex bot to invoke. This node takes an utterance as the input and returns as the output the intent identified by the Amazon Lex bot. For more information, see Node types in a flow in the Amazon Bedrock User Guide.

    " }, "ListAgentActionGroupsRequest":{ "type":"structure", @@ -7314,6 +7572,67 @@ } } }, + "LoopControllerFlowNodeConfiguration":{ + "type":"structure", + "required":["continueCondition"], + "members":{ + "continueCondition":{ + "shape":"FlowCondition", + "documentation":"

    Specifies the condition that determines when the flow exits the DoWhile loop. The loop executes until this condition evaluates to true.

    " + }, + "maxIterations":{ + "shape":"LoopControllerFlowNodeConfigurationMaxIterationsInteger", + "documentation":"

    Specifies the maximum number of times the DoWhile loop can iterate before the flow exits the loop.

    " + } + }, + "documentation":"

    Contains configurations for the controller node of a DoWhile loop in the flow.

    " + }, + "LoopControllerFlowNodeConfigurationMaxIterationsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "LoopFlowNodeConfiguration":{ + "type":"structure", + "required":["definition"], + "members":{ + "definition":{ + "shape":"FlowDefinition", + "documentation":"

    The definition of the DoWhile loop nodes and connections between nodes in the flow.

    " + } + }, + "documentation":"

    Contains configurations for the nodes of a DoWhile loop in your flow.

    A DoWhile loop is made up of the following nodes:

    • Loop - The container node that holds the loop's flow definition. This node encompasses the entire loop structure.

    • LoopInput - The entry point node for the loop. This node receives inputs from nodes outside the loop and from previous loop iterations.

    • Body nodes - The processing nodes that execute within each loop iteration. These can be nodes for handling data in your flow, such as a prompt or Lambda function nodes. Some node types aren't supported inside a DoWhile loop body. For more information, see LoopIncompatibleNodeTypeFlowValidationDetails.

    • LoopController - The node that evaluates whether the loop should continue or exit based on a condition.

    These nodes work together to create a loop that runs at least once and continues until a specified condition is met or a maximum number of iterations is reached.

    " + }, + "LoopIncompatibleNodeTypeFlowValidationDetails":{ + "type":"structure", + "required":[ + "incompatibleNodeName", + "incompatibleNodeType", + "node" + ], + "members":{ + "incompatibleNodeName":{ + "shape":"FlowNodeName", + "documentation":"

    The node that's incompatible in the DoWhile loop.

    " + }, + "incompatibleNodeType":{ + "shape":"IncompatibleLoopNodeType", + "documentation":"

    The node type of the incompatible node in the DoWhile loop. Some node types, like a condition node, aren't allowed in a DoWhile loop.

    " + }, + "node":{ + "shape":"FlowNodeName", + "documentation":"

    The Loop container node that contains an incompatible node.

    " + } + }, + "documentation":"

    Details about a flow that contains an incompatible node in a DoWhile loop.

    " + }, + "LoopInputFlowNodeConfiguration":{ + "type":"structure", + "members":{ + }, + "documentation":"

    Contains configurations for the input node of a DoWhile loop in the flow.

    " + }, "MalformedConditionExpressionFlowValidationDetails":{ "type":"structure", "required":[ @@ -7472,6 +7791,21 @@ "max":10, "min":1 }, + "MetadataConfigurationForReranking":{ + "type":"structure", + "required":["selectionMode"], + "members":{ + "selectionMode":{ + "shape":"RerankingMetadataSelectionMode", + "documentation":"

    The mode for selecting metadata fields for reranking.

    " + }, + "selectiveModeConfiguration":{ + "shape":"RerankingMetadataSelectiveModeConfiguration", + "documentation":"

    The configuration for selective metadata field inclusion or exclusion during reranking.

    " + } + }, + "documentation":"

    Specifies how metadata fields should be handled during the reranking process.

    " + }, "MetadataSourceType":{ "type":"string", "enum":[ @@ -7568,16 +7902,38 @@ }, "documentation":"

    Details about missing ending nodes (such as FlowOutputNode) in the flow.

    " }, + "MissingLoopControllerNodeFlowValidationDetails":{ + "type":"structure", + "required":["loopNode"], + "members":{ + "loopNode":{ + "shape":"FlowNodeName", + "documentation":"

    The DoWhile loop in a flow that's missing a required LoopController node.

    " + } + }, + "documentation":"

    Details about a flow that's missing a required LoopController node in a DoWhile loop.

    " + }, + "MissingLoopInputNodeFlowValidationDetails":{ + "type":"structure", + "required":["loopNode"], + "members":{ + "loopNode":{ + "shape":"FlowNodeName", + "documentation":"

    The DoWhile loop in a flow that's missing a required LoopInput node.

    " + } + }, + "documentation":"

    Details about a flow that's missing a required LoopInput node in a DoWhile loop.

    " + }, "MissingNodeConfigurationFlowValidationDetails":{ "type":"structure", "required":["node"], "members":{ "node":{ "shape":"FlowNodeName", - "documentation":"

    The name of the node missing configuration.

    " + "documentation":"

    The name of the node missing a required configuration.

    " } }, - "documentation":"

    Details about a node missing required configuration.

    " + "documentation":"

    Details about a node missing a required configuration.

    " }, "MissingNodeInputFlowValidationDetails":{ "type":"structure", @@ -7726,6 +8082,28 @@ "min":0, "pattern":"^.*$" }, + "MultipleLoopControllerNodesFlowValidationDetails":{ + "type":"structure", + "required":["loopNode"], + "members":{ + "loopNode":{ + "shape":"FlowNodeName", + "documentation":"

    The DoWhile loop in a flow that contains multiple LoopController nodes.

    " + } + }, + "documentation":"

    Details about a flow that contains multiple LoopController nodes in a DoWhile loop.

    " + }, + "MultipleLoopInputNodesFlowValidationDetails":{ + "type":"structure", + "required":["loopNode"], + "members":{ + "loopNode":{ + "shape":"FlowNodeName", + "documentation":"

    The DoWhile loop in a flow that contains multiple LoopInput nodes.

    " + } + }, + "documentation":"

    Details about a flow that contains multiple LoopInput nodes in a DoWhile loop.

    " + }, "MultipleNodeInputConnectionsFlowValidationDetails":{ "type":"structure", "required":[ @@ -8080,6 +8458,23 @@ "type":"string", "sensitive":true }, + "PerformanceConfigLatency":{ + "type":"string", + "enum":[ + "standard", + "optimized" + ] + }, + "PerformanceConfiguration":{ + "type":"structure", + "members":{ + "latency":{ + "shape":"PerformanceConfigLatency", + "documentation":"

    The latency optimization setting.

    " + } + }, + "documentation":"

    The performance-related configuration options for the knowledge base retrieval and generation process.

    " + }, "PineconeConfiguration":{ "type":"structure", "required":[ @@ -8284,7 +8679,7 @@ "documentation":"

    Specifies whether the prompt is from Prompt management or defined inline.

    " } }, - "documentation":"

    Contains configurations for a prompt node in the flow. You can use a prompt from Prompt management or you can define one in this node. If the prompt contains variables, the inputs into this node will fill in the variables. The output from this node is the response generated by the model. For more information, see Node types in Amazon Bedrock works in the Amazon Bedrock User Guide.

    " + "documentation":"

    Contains configurations for a prompt node in the flow. You can use a prompt from Prompt management or you can define one in this node. If the prompt contains variables, the inputs into this node will fill in the variables. The output from this node is the response generated by the model. For more information, see Node types in a flow in the Amazon Bedrock User Guide.

    " }, "PromptFlowNodeInlineConfiguration":{ "type":"structure", @@ -9097,6 +9492,28 @@ "DISABLED" ] }, + "RerankingMetadataSelectionMode":{ + "type":"string", + "enum":[ + "SELECTIVE", + "ALL" + ] + }, + "RerankingMetadataSelectiveModeConfiguration":{ + "type":"structure", + "members":{ + "fieldsToExclude":{ + "shape":"FieldsForReranking", + "documentation":"

    Specifies the metadata fields to exclude from the reranking process.

    " + }, + "fieldsToInclude":{ + "shape":"FieldsForReranking", + "documentation":"

    Specifies the metadata fields to include in the reranking process.

    " + } + }, + "documentation":"

    Configures the metadata fields to include or exclude during the reranking process when using selective mode.

    ", + "union":true + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -10272,6 +10689,10 @@ "location":"uri", "locationName":"agentId" }, + "aliasInvocationState":{ + "shape":"AliasInvocationState", + "documentation":"

    The invocation state for the agent alias. To pause the agent alias, set the value to REJECT_INVOCATIONS. To start the agent alias running again, set the value to ACCEPT_INVOCATIONS. Use the GetAgentAlias, or ListAgentAliases, operation to get the invocation state of an agent alias.

    " + }, "description":{ "shape":"Description", "documentation":"

    Specifies a new description for the alias.

    " @@ -10546,6 +10967,10 @@ "location":"uri", "locationName":"aliasIdentifier" }, + "concurrencyConfiguration":{ + "shape":"FlowAliasConcurrencyConfiguration", + "documentation":"

    The configuration that specifies how nodes in the flow are executed in parallel.

    " + }, "description":{ "shape":"Description", "documentation":"

    A description for the alias.

    " @@ -10582,6 +11007,10 @@ "shape":"FlowAliasArn", "documentation":"

    The Amazon Resource Name (ARN) of the flow.

    " }, + "concurrencyConfiguration":{ + "shape":"FlowAliasConcurrencyConfiguration", + "documentation":"

    The configuration that specifies how nodes in the flow are executed in parallel.

    " + }, "createdAt":{ "shape":"DateTimestamp", "documentation":"

    The time at which the flow was created.

    " @@ -10967,6 +11396,65 @@ }, "documentation":"

    Contains details about the model used to create vector embeddings for the knowledge base.

    " }, + "VectorSearchBedrockRerankingConfiguration":{ + "type":"structure", + "required":["modelConfiguration"], + "members":{ + "metadataConfiguration":{ + "shape":"MetadataConfigurationForReranking", + "documentation":"

    Specifies how metadata fields should be handled during the reranking process.

    " + }, + "modelConfiguration":{ + "shape":"VectorSearchBedrockRerankingModelConfiguration", + "documentation":"

    Specifies the configuration for the Amazon Bedrock reranker model.

    " + }, + "numberOfRerankedResults":{ + "shape":"VectorSearchBedrockRerankingConfigurationNumberOfRerankedResultsInteger", + "documentation":"

    Specifies the number of results to return after reranking.

    " + } + }, + "documentation":"

    Configures the Amazon Bedrock reranker model to improve the relevance of retrieved results.

    " + }, + "VectorSearchBedrockRerankingConfigurationNumberOfRerankedResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "VectorSearchBedrockRerankingModelConfiguration":{ + "type":"structure", + "required":["modelArn"], + "members":{ + "additionalModelRequestFields":{ + "shape":"AdditionalModelRequestFields", + "documentation":"

    Specifies additional model-specific request parameters as key-value pairs that are included in the request to the Amazon Bedrock reranker model.

    " + }, + "modelArn":{ + "shape":"BedrockRerankingModelArn", + "documentation":"

    The Amazon Resource Name (ARN) of the Amazon Bedrock reranker model.

    " + } + }, + "documentation":"

    Configures the Amazon Bedrock model used for reranking retrieved results.

    " + }, + "VectorSearchRerankingConfiguration":{ + "type":"structure", + "required":["type"], + "members":{ + "bedrockRerankingConfiguration":{ + "shape":"VectorSearchBedrockRerankingConfiguration", + "documentation":"

    Specifies the configuration for using an Amazon Bedrock reranker model to rerank retrieved results.

    " + }, + "type":{ + "shape":"VectorSearchRerankingConfigurationType", + "documentation":"

    Specifies the type of reranking model to use. Currently, the only supported value is BEDROCK_RERANKING_MODEL.

    " + } + }, + "documentation":"

    Specifies how retrieved results from a knowledge base are reranked to improve relevance.

    " + }, + "VectorSearchRerankingConfigurationType":{ + "type":"string", + "enum":["BEDROCK_RERANKING_MODEL"] + }, "Version":{ "type":"string", "max":5, diff --git a/services/bedrockagentruntime/pom.xml b/services/bedrockagentruntime/pom.xml index 177e86aeb39d..0a5264aa552f 100644 --- a/services/bedrockagentruntime/pom.xml +++ b/services/bedrockagentruntime/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT bedrockagentruntime AWS Java SDK :: Services :: Bedrock Agent Runtime diff --git a/services/bedrockagentruntime/src/main/resources/codegen-resources/customization.config b/services/bedrockagentruntime/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/bedrockagentruntime/src/main/resources/codegen-resources/customization.config +++ b/services/bedrockagentruntime/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/bedrockagentruntime/src/main/resources/codegen-resources/paginators-1.json b/services/bedrockagentruntime/src/main/resources/codegen-resources/paginators-1.json index 028bbbe316f0..5b2fd1d0a2fd 100644 --- a/services/bedrockagentruntime/src/main/resources/codegen-resources/paginators-1.json +++ b/services/bedrockagentruntime/src/main/resources/codegen-resources/paginators-1.json @@ -6,6 +6,18 @@ "limit_key": "maxItems", "result_key": "memoryContents" }, + "ListFlowExecutionEvents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "flowExecutionEvents" + }, + "ListFlowExecutions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "flowExecutionSummaries" + }, "ListInvocationSteps": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/bedrockagentruntime/src/main/resources/codegen-resources/service-2.json b/services/bedrockagentruntime/src/main/resources/codegen-resources/service-2.json index dea1859c1119..e56955988f23 100644 --- a/services/bedrockagentruntime/src/main/resources/codegen-resources/service-2.json +++ b/services/bedrockagentruntime/src/main/resources/codegen-resources/service-2.json @@ -162,6 +162,42 @@ ], "documentation":"

    Gets the sessions stored in the memory of the agent.

    " }, + "GetExecutionFlowSnapshot":{ + "name":"GetExecutionFlowSnapshot", + "http":{ + "method":"GET", + "requestUri":"/flows/{flowIdentifier}/aliases/{flowAliasIdentifier}/executions/{executionIdentifier}/flowsnapshot", + "responseCode":200 + }, + "input":{"shape":"GetExecutionFlowSnapshotRequest"}, + "output":{"shape":"GetExecutionFlowSnapshotResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves the flow definition snapshot used for a flow execution. The snapshot represents the flow metadata and definition as it existed at the time the execution was started. Note that even if the flow is edited after an execution starts, the snapshot connected to the execution remains unchanged.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    " + }, + "GetFlowExecution":{ + "name":"GetFlowExecution", + "http":{ + "method":"GET", + "requestUri":"/flows/{flowIdentifier}/aliases/{flowAliasIdentifier}/executions/{executionIdentifier}", + "responseCode":200 + }, + "input":{"shape":"GetFlowExecutionRequest"}, + "output":{"shape":"GetFlowExecutionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Retrieves details about a specific flow execution, including its status, start and end times, and any errors that occurred during execution.

    " + }, "GetInvocationStep":{ "name":"GetInvocationStep", "http":{ @@ -265,6 +301,42 @@ ], "documentation":"

    Invokes an inline Amazon Bedrock agent using the configurations you provide with the request.

    • Specify the following fields for security purposes.

      • (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent.

      • (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time expires, the subsequent InvokeInlineAgent request begins a new session.

    • To override the default prompt behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts.

    • The agent instructions will not be honored if your agent has only one knowledge base, uses default prompts, has no action group, and user input is disabled.

    " }, + "ListFlowExecutionEvents":{ + "name":"ListFlowExecutionEvents", + "http":{ + "method":"GET", + "requestUri":"/flows/{flowIdentifier}/aliases/{flowAliasIdentifier}/executions/{executionIdentifier}/events", + "responseCode":200 + }, + "input":{"shape":"ListFlowExecutionEventsRequest"}, + "output":{"shape":"ListFlowExecutionEventsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists events that occurred during a flow execution. Events provide detailed information about the execution progress, including node inputs and outputs, flow inputs and outputs, condition results, and failure events.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    " + }, + "ListFlowExecutions":{ + "name":"ListFlowExecutions", + "http":{ + "method":"GET", + "requestUri":"/flows/{flowIdentifier}/executions", + "responseCode":200 + }, + "input":{"shape":"ListFlowExecutionsRequest"}, + "output":{"shape":"ListFlowExecutionsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Lists all executions of a flow. Results can be paginated and include summary information about each execution, such as status, start and end times, and the execution's Amazon Resource Name (ARN).

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    " + }, "ListInvocationSteps":{ "name":"ListInvocationSteps", "http":{ @@ -464,6 +536,49 @@ ], "documentation":"

    Queries a knowledge base and generates responses based on the retrieved results, with output in streaming format.

    The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeModelWithResponseStream.

    This operation requires permission for the bedrock:RetrieveAndGenerate action.

    " }, + "StartFlowExecution":{ + "name":"StartFlowExecution", + "http":{ + "method":"POST", + "requestUri":"/flows/{flowIdentifier}/aliases/{flowAliasIdentifier}/executions", + "responseCode":200 + }, + "input":{"shape":"StartFlowExecutionRequest"}, + "output":{"shape":"StartFlowExecutionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"DependencyFailedException"}, + {"shape":"BadGatewayException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

    Starts an execution of an Amazon Bedrock flow. Unlike flows that run until completion or time out after five minutes, flow executions let you run flows asynchronously for longer durations. Flow executions also yield control so that your application can perform other tasks.

    This operation returns an Amazon Resource Name (ARN) that you can use to track and manage your flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    " + }, + "StopFlowExecution":{ + "name":"StopFlowExecution", + "http":{ + "method":"POST", + "requestUri":"/flows/{flowIdentifier}/aliases/{flowAliasIdentifier}/executions/{executionIdentifier}/stop", + "responseCode":200 + }, + "input":{"shape":"StopFlowExecutionRequest"}, + "output":{"shape":"StopFlowExecutionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"DependencyFailedException"}, + {"shape":"BadGatewayException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Stops an Amazon Bedrock flow's execution. This operation prevents further processing of the flow and changes the execution status to Aborted.

    " + }, "TagResource":{ "name":"TagResource", "http":{ @@ -613,6 +728,10 @@ "ActionGroupInvocationOutput":{ "type":"structure", "members":{ + "metadata":{ + "shape":"Metadata", + "documentation":"

    Contains information about the action group output.

    " + }, "text":{ "shape":"ActionGroupOutputString", "documentation":"

    The JSON-formatted string returned by the API invoked by the action group.

    " @@ -783,6 +902,10 @@ "shape":"String", "documentation":"

    The output's agent collaborator name.

    " }, + "metadata":{ + "shape":"Metadata", + "documentation":"

    Contains information about the output from the agent collaborator.

    " + }, "output":{ "shape":"AgentCollaboratorOutputPayload", "documentation":"

    The output's output.

    " @@ -1215,6 +1338,10 @@ "files":{ "shape":"Files", "documentation":"

    Contains output files, if generated by code execution.

    " + }, + "metadata":{ + "shape":"Metadata", + "documentation":"

    Contains information about the output from the code interpreter.

    " } }, "documentation":"

    Contains the JSON-formatted string returned by the API invoked by the code interpreter.

    " @@ -1313,6 +1440,30 @@ "type":"list", "member":{"shape":"Collaborator"} }, + "ConditionResultEvent":{ + "type":"structure", + "required":[ + "nodeName", + "satisfiedConditions", + "timestamp" + ], + "members":{ + "nodeName":{ + "shape":"NodeName", + "documentation":"

    The name of the condition node that evaluated the conditions.

    " + }, + "satisfiedConditions":{ + "shape":"SatisfiedConditions", + "documentation":"

    A list of conditions that were satisfied during the evaluation.

    " + }, + "timestamp":{ + "shape":"DateTimestamp", + "documentation":"

    The timestamp when the condition evaluation occurred.

    " + } + }, + "documentation":"

    Contains information about a condition evaluation result during a flow execution. This event is generated when a condition node in the flow evaluates its conditions.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true + }, "ConfirmationState":{ "type":"string", "enum":[ @@ -1735,10 +1886,18 @@ "FailureTrace":{ "type":"structure", "members":{ + "failureCode":{ + "shape":"Integer", + "documentation":"

    The failure code for the trace.

    " + }, "failureReason":{ "shape":"FailureReasonString", "documentation":"

    The reason the interaction failed.

    " }, + "metadata":{ + "shape":"Metadata", + "documentation":"

    Information about the failure that occurred.

    " + }, "traceId":{ "shape":"TraceId", "documentation":"

    The unique identifier of the trace.

    " @@ -1856,6 +2015,10 @@ "FinalResponse":{ "type":"structure", "members":{ + "metadata":{ + "shape":"Metadata", + "documentation":"

    Contains information about the invoke agent operation.

    " + }, "text":{ "shape":"FinalResponseString", "documentation":"

    The text in the response to the user.

    " @@ -1897,12 +2060,254 @@ "INPUT_REQUIRED" ] }, + "FlowErrorCode":{ + "type":"string", + "enum":[ + "VALIDATION", + "INTERNAL_SERVER", + "NODE_EXECUTION_FAILED" + ] + }, + "FlowExecutionContent":{ + "type":"structure", + "members":{ + "document":{ + "shape":"Document", + "documentation":"

    The document content of the field, which can contain text or structured data.

    " + } + }, + "documentation":"

    Contains the content of an flow execution input or output field.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true, + "union":true + }, + "FlowExecutionError":{ + "type":"structure", + "members":{ + "error":{ + "shape":"FlowExecutionErrorType", + "documentation":"

    The error code for the type of error that occurred.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    A descriptive message that provides details about the error.

    " + }, + "nodeName":{ + "shape":"NodeName", + "documentation":"

    The name of the node in the flow where the error occurred (if applicable).

    " + } + }, + "documentation":"

    Contains information about an error that occurred during an flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    " + }, + "FlowExecutionErrorType":{ + "type":"string", + "enum":["ExecutionTimedOut"] + }, + "FlowExecutionErrors":{ + "type":"list", + "member":{"shape":"FlowExecutionError"} + }, + "FlowExecutionEvent":{ + "type":"structure", + "members":{ + "conditionResultEvent":{ + "shape":"ConditionResultEvent", + "documentation":"

    Contains information about a condition evaluation result during the flow execution. This event is generated when a condition node in the flow evaluates its conditions.

    " + }, + "flowFailureEvent":{ + "shape":"FlowFailureEvent", + "documentation":"

    Contains information about a failure that occurred at the flow level during execution.

    " + }, + "flowInputEvent":{ + "shape":"FlowExecutionInputEvent", + "documentation":"

    Contains information about the inputs provided to the flow at the start of execution.

    " + }, + "flowOutputEvent":{ + "shape":"FlowExecutionOutputEvent", + "documentation":"

    Contains information about the outputs produced by the flow at the end of execution.

    " + }, + "nodeFailureEvent":{ + "shape":"NodeFailureEvent", + "documentation":"

    Contains information about a failure that occurred at a specific node during execution.

    " + }, + "nodeInputEvent":{ + "shape":"NodeInputEvent", + "documentation":"

    Contains information about the inputs provided to a specific node during execution.

    " + }, + "nodeOutputEvent":{ + "shape":"NodeOutputEvent", + "documentation":"

    Contains information about the outputs produced by a specific node during execution.

    " + } + }, + "documentation":"

    Represents an event that occurred during an flow execution. This is a union type that can contain one of several event types, such as node input and output events; flow input and output events; condition node result events, or failure events.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "union":true + }, + "FlowExecutionEventType":{ + "type":"string", + "enum":[ + "Node", + "Flow" + ] + }, + "FlowExecutionEvents":{ + "type":"list", + "member":{"shape":"FlowExecutionEvent"}, + "max":10, + "min":0 + }, "FlowExecutionId":{ "type":"string", "max":100, "min":2, "pattern":"^[0-9a-zA-Z._:-]+$" }, + "FlowExecutionIdentifier":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^[a-zA-Z0-9-]{1,36}$|^(arn:aws:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:flow/[0-9a-zA-Z]{10}/alias/[0-9a-zA-Z]{10}/execution/[a-zA-Z0-9-]{1,36})$" + }, + "FlowExecutionInputEvent":{ + "type":"structure", + "required":[ + "fields", + "nodeName", + "timestamp" + ], + "members":{ + "fields":{ + "shape":"FlowInputFields", + "documentation":"

    A list of input fields provided to the flow.

    " + }, + "nodeName":{ + "shape":"NodeName", + "documentation":"

    The name of the node that receives the inputs.

    " + }, + "timestamp":{ + "shape":"DateTimestamp", + "documentation":"

    The timestamp when the inputs are provided.

    " + } + }, + "documentation":"

    Contains information about the inputs provided to the flow at the start of a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true + }, + "FlowExecutionName":{ + "type":"string", + "max":36, + "min":0, + "pattern":"^[a-zA-Z0-9-]{1,36}$" + }, + "FlowExecutionOutputEvent":{ + "type":"structure", + "required":[ + "fields", + "nodeName", + "timestamp" + ], + "members":{ + "fields":{ + "shape":"FlowOutputFields", + "documentation":"

    A list of output fields produced by the flow.

    " + }, + "nodeName":{ + "shape":"NodeName", + "documentation":"

    The name of the node that produces the outputs.

    " + }, + "timestamp":{ + "shape":"DateTimestamp", + "documentation":"

    The timestamp when the outputs are produced.

    " + } + }, + "documentation":"

    Contains information about the outputs produced by the flow during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true + }, + "FlowExecutionRoleArn":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/(service-role/)?.+$" + }, + "FlowExecutionStatus":{ + "type":"string", + "enum":[ + "Running", + "Succeeded", + "Failed", + "TimedOut", + "Aborted" + ] + }, + "FlowExecutionSummaries":{ + "type":"list", + "member":{"shape":"FlowExecutionSummary"}, + "max":10, + "min":0 + }, + "FlowExecutionSummary":{ + "type":"structure", + "required":[ + "createdAt", + "executionArn", + "flowAliasIdentifier", + "flowIdentifier", + "flowVersion", + "status" + ], + "members":{ + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

    The timestamp when the flow execution was created.

    " + }, + "endedAt":{ + "shape":"DateTimestamp", + "documentation":"

    The timestamp when the flow execution ended. This field is only populated when the execution has completed, failed, timed out, or been aborted.

    " + }, + "executionArn":{ + "shape":"FlowExecutionIdentifier", + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the flow execution.

    " + }, + "flowAliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

    The unique identifier of the flow alias used for the execution.

    " + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

    The unique identifier of the flow.

    " + }, + "flowVersion":{ + "shape":"Version", + "documentation":"

    The version of the flow used for the execution.

    " + }, + "status":{ + "shape":"FlowExecutionStatus", + "documentation":"

    The current status of the flow execution.

    Flow executions time out after 24 hours.

    " + } + }, + "documentation":"

    Contains summary information about a flow execution, including its status, timestamps, and identifiers.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    " + }, + "FlowFailureEvent":{ + "type":"structure", + "required":[ + "errorCode", + "errorMessage", + "timestamp" + ], + "members":{ + "errorCode":{ + "shape":"FlowErrorCode", + "documentation":"

    The error code that identifies the type of failure that occurred.

    " + }, + "errorMessage":{ + "shape":"String", + "documentation":"

    A descriptive message that provides details about the failure.

    " + }, + "timestamp":{ + "shape":"DateTimestamp", + "documentation":"

    The timestamp when the failure occurred.

    " + } + }, + "documentation":"

    Contains information about a failure that occurred at the flow level during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true + }, "FlowIdentifier":{ "type":"string", "max":2048, @@ -1947,6 +2352,31 @@ "sensitive":true, "union":true }, + "FlowInputField":{ + "type":"structure", + "required":[ + "content", + "name" + ], + "members":{ + "content":{ + "shape":"FlowExecutionContent", + "documentation":"

    The content of the input field, which can contain text or structured data.

    " + }, + "name":{ + "shape":"NodeInputName", + "documentation":"

    The name of the input field as defined in the flow's input schema.

    " + } + }, + "documentation":"

    Represents an input field provided to a flow during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true + }, + "FlowInputFields":{ + "type":"list", + "member":{"shape":"FlowInputField"}, + "max":5, + "min":1 + }, "FlowInputs":{ "type":"list", "member":{"shape":"FlowInput"}, @@ -2025,6 +2455,31 @@ "event":true, "sensitive":true }, + "FlowOutputField":{ + "type":"structure", + "required":[ + "content", + "name" + ], + "members":{ + "content":{ + "shape":"FlowExecutionContent", + "documentation":"

    The content of the output field, which can contain text or structured data.

    " + }, + "name":{ + "shape":"NodeOutputName", + "documentation":"

    The name of the output field as defined in the flow's output schema.

    " + } + }, + "documentation":"

    Represents an output field produced by a flow during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true + }, + "FlowOutputFields":{ + "type":"list", + "member":{"shape":"FlowOutputField"}, + "max":5, + "min":1 + }, "FlowResponseStream":{ "type":"structure", "members":{ @@ -2594,6 +3049,143 @@ } } }, + "GetExecutionFlowSnapshotRequest":{ + "type":"structure", + "required":[ + "executionIdentifier", + "flowAliasIdentifier", + "flowIdentifier" + ], + "members":{ + "executionIdentifier":{ + "shape":"FlowExecutionIdentifier", + "documentation":"

    The unique identifier of the flow execution.

    ", + "location":"uri", + "locationName":"executionIdentifier" + }, + "flowAliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

    The unique identifier of the flow alias used for the flow execution.

    ", + "location":"uri", + "locationName":"flowAliasIdentifier" + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

    The unique identifier of the flow.

    ", + "location":"uri", + "locationName":"flowIdentifier" + } + } + }, + "GetExecutionFlowSnapshotResponse":{ + "type":"structure", + "required":[ + "definition", + "executionRoleArn", + "flowAliasIdentifier", + "flowIdentifier", + "flowVersion" + ], + "members":{ + "customerEncryptionKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

    The Amazon Resource Name (ARN) of the customer managed KMS key that's used to encrypt the flow snapshot.

    " + }, + "definition":{ + "shape":"String", + "documentation":"

    The flow definition used for the flow execution, including the nodes, connections, and configuration at the time when the execution started.

    The definition returns as a string that follows the structure of a FlowDefinition object.

    " + }, + "executionRoleArn":{ + "shape":"FlowExecutionRoleArn", + "documentation":"

    The Amazon Resource Name (ARN) of the IAM service role that's used by the flow execution.

    " + }, + "flowAliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

    The unique identifier of the flow alias used for the flow execution.

    " + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

    The unique identifier of the flow.

    " + }, + "flowVersion":{ + "shape":"Version", + "documentation":"

    The version of the flow used for the flow execution.

    " + } + } + }, + "GetFlowExecutionRequest":{ + "type":"structure", + "required":[ + "executionIdentifier", + "flowAliasIdentifier", + "flowIdentifier" + ], + "members":{ + "executionIdentifier":{ + "shape":"FlowExecutionIdentifier", + "documentation":"

    The unique identifier of the flow execution to retrieve.

    ", + "location":"uri", + "locationName":"executionIdentifier" + }, + "flowAliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

    The unique identifier of the flow alias used for the execution.

    ", + "location":"uri", + "locationName":"flowAliasIdentifier" + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

    The unique identifier of the flow.

    ", + "location":"uri", + "locationName":"flowIdentifier" + } + } + }, + "GetFlowExecutionResponse":{ + "type":"structure", + "required":[ + "executionArn", + "flowAliasIdentifier", + "flowIdentifier", + "flowVersion", + "startedAt", + "status" + ], + "members":{ + "endedAt":{ + "shape":"DateTimestamp", + "documentation":"

    The timestamp when the flow execution ended. This field is only populated when the execution has completed, failed, timed out, or been aborted.

    " + }, + "errors":{ + "shape":"FlowExecutionErrors", + "documentation":"

    A list of errors that occurred during the flow execution. Each error includes an error code, message, and the node where the error occurred, if applicable.

    " + }, + "executionArn":{ + "shape":"FlowExecutionIdentifier", + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the flow execution.

    " + }, + "flowAliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

    The unique identifier of the flow alias used for the execution.

    " + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

    The unique identifier of the flow.

    " + }, + "flowVersion":{ + "shape":"Version", + "documentation":"

    The version of the flow used for the execution.

    " + }, + "startedAt":{ + "shape":"DateTimestamp", + "documentation":"

    The timestamp when the flow execution started.

    " + }, + "status":{ + "shape":"FlowExecutionStatus", + "documentation":"

    The current status of the flow execution.

    Flow executions time out after 24 hours.

    " + } + } + }, "GetInvocationStepRequest":{ "type":"structure", "required":[ @@ -3059,6 +3651,10 @@ "shape":"GuardrailAssessmentList", "documentation":"

    The details of the input assessments used in the Guardrail Trace.

    " }, + "metadata":{ + "shape":"Metadata", + "documentation":"

    Contains information about the Guardrail output.

    " + }, "outputAssessments":{ "shape":"GuardrailAssessmentList", "documentation":"

    The details of the output assessments used in the Guardrail Trace.

    " @@ -3718,6 +4314,10 @@ "shape":"MemoryId", "documentation":"

    The unique identifier of the agent memory.

    " }, + "promptCreationConfigurations":{ + "shape":"PromptCreationConfigurations", + "documentation":"

    Specifies parameters that control how the service populates the agent prompt for an InvokeAgent request. You can control which aspects of previous invocations in the same agent session the service uses to populate the agent prompt. This gives you more granular control over the contextual history that is used to process the current request.

    " + }, "sessionId":{ "shape":"SessionId", "documentation":"

    The unique identifier of the session. Use the same value across requests to continue the same conversation.

    ", @@ -3908,6 +4508,10 @@ "shape":"OrchestrationType", "documentation":"

    Specifies the type of orchestration strategy for the agent. This is set to DEFAULT orchestration type, by default.

    " }, + "promptCreationConfigurations":{ + "shape":"PromptCreationConfigurations", + "documentation":"

    Specifies parameters that control how the service populates the agent prompt for an InvokeInlineAgent request. You can control which aspects of previous invocations in the same agent session the service uses to populate the agent prompt. This gives you more granular control over the contextual history that is used to process the current request.

    " + }, "promptOverrideConfiguration":{ "shape":"PromptOverrideConfiguration", "documentation":"

    Configurations for advanced prompts used to override the default prompts to enhance the accuracy of the inline agent.

    " @@ -4035,6 +4639,10 @@ "KnowledgeBaseLookupOutput":{ "type":"structure", "members":{ + "metadata":{ + "shape":"Metadata", + "documentation":"

    Contains information about the knowledge base output.

    " + }, "retrievedReferences":{ "shape":"RetrievedReferences", "documentation":"

    Contains metadata about the sources cited for the generated response.

    " @@ -4117,60 +4725,165 @@ "shape":"BedrockModelArn", "documentation":"

    The ARN of the foundation model or inference profile used to generate a response.

    " }, - "orchestrationConfiguration":{ - "shape":"OrchestrationConfiguration", - "documentation":"

    Settings for how the model processes the prompt prior to retrieval and generation.

    " + "orchestrationConfiguration":{ + "shape":"OrchestrationConfiguration", + "documentation":"

    Settings for how the model processes the prompt prior to retrieval and generation.

    " + }, + "retrievalConfiguration":{ + "shape":"KnowledgeBaseRetrievalConfiguration", + "documentation":"

    Contains configurations for how to retrieve and return the knowledge base query.

    " + } + }, + "documentation":"

    Contains details about the resource being queried.

    This data type is used in the following API operations:

    " + }, + "KnowledgeBaseVectorSearchConfiguration":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"RetrievalFilter", + "documentation":"

    Specifies the filters to use on the metadata in the knowledge base data sources before returning results. For more information, see Query configurations.

    " + }, + "implicitFilterConfiguration":{ + "shape":"ImplicitFilterConfiguration", + "documentation":"

    Settings for implicit filtering.

    " + }, + "numberOfResults":{ + "shape":"KnowledgeBaseVectorSearchConfigurationNumberOfResultsInteger", + "documentation":"

    The number of source chunks to retrieve.

    ", + "box":true + }, + "overrideSearchType":{ + "shape":"SearchType", + "documentation":"

    By default, Amazon Bedrock decides a search strategy for you. If you're using an Amazon OpenSearch Serverless vector store that contains a filterable text field, you can specify whether to query the knowledge base with a HYBRID search using both vector embeddings and raw text, or SEMANTIC search using only vector embeddings. For other vector store configurations, only SEMANTIC search is available. For more information, see Test a knowledge base.

    " + }, + "rerankingConfiguration":{ + "shape":"VectorSearchRerankingConfiguration", + "documentation":"

    Contains configurations for reranking the retrieved results. For more information, see Improve the relevance of query responses with a reranker model.

    " + } + }, + "documentation":"

    Configurations for how to perform the search query and return results. For more information, see Query configurations.

    This data type is used in the following API operations:

    " + }, + "KnowledgeBaseVectorSearchConfigurationNumberOfResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "KnowledgeBases":{ + "type":"list", + "member":{"shape":"KnowledgeBase"} + }, + "LambdaArn":{"type":"string"}, + "LambdaResourceArn":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$" + }, + "ListFlowExecutionEventsRequest":{ + "type":"structure", + "required":[ + "eventType", + "executionIdentifier", + "flowAliasIdentifier", + "flowIdentifier" + ], + "members":{ + "eventType":{ + "shape":"FlowExecutionEventType", + "documentation":"

    The type of events to retrieve. Specify Node for node-level events or Flow for flow-level events.

    ", + "location":"querystring", + "locationName":"eventType" + }, + "executionIdentifier":{ + "shape":"FlowExecutionIdentifier", + "documentation":"

    The unique identifier of the flow execution.

    ", + "location":"uri", + "locationName":"executionIdentifier" + }, + "flowAliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

    The unique identifier of the flow alias used for the execution.

    ", + "location":"uri", + "locationName":"flowAliasIdentifier" + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

    The unique identifier of the flow.

    ", + "location":"uri", + "locationName":"flowIdentifier" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of events to return in a single response. If more events exist than the specified maxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token to retrieve the next set of results. This value is returned in the response if more results are available.

    ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListFlowExecutionEventsResponse":{ + "type":"structure", + "required":["flowExecutionEvents"], + "members":{ + "flowExecutionEvents":{ + "shape":"FlowExecutionEvents", + "documentation":"

    A list of events that occurred during the flow execution. Events can include node inputs and outputs, flow inputs and outputs, condition results, and failure events.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token to retrieve the next set of results. This value is returned if more results are available.

    " + } + } + }, + "ListFlowExecutionsRequest":{ + "type":"structure", + "required":["flowIdentifier"], + "members":{ + "flowAliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

    The unique identifier of the flow alias to list executions for.

    ", + "location":"querystring", + "locationName":"flowAliasIdentifier" + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

    The unique identifier of the flow to list executions for.

    ", + "location":"uri", + "locationName":"flowIdentifier" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of flow executions to return in a single response. If more executions exist than the specified maxResults value, a token is included in the response so that the remaining results can be retrieved.

    ", + "location":"querystring", + "locationName":"maxResults" }, - "retrievalConfiguration":{ - "shape":"KnowledgeBaseRetrievalConfiguration", - "documentation":"

    Contains configurations for how to retrieve and return the knowledge base query.

    " + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token to retrieve the next set of results. This value is returned in the response if more results are available.

    ", + "location":"querystring", + "locationName":"nextToken" } - }, - "documentation":"

    Contains details about the resource being queried.

    This data type is used in the following API operations:

    " + } }, - "KnowledgeBaseVectorSearchConfiguration":{ + "ListFlowExecutionsResponse":{ "type":"structure", + "required":["flowExecutionSummaries"], "members":{ - "filter":{ - "shape":"RetrievalFilter", - "documentation":"

    Specifies the filters to use on the metadata in the knowledge base data sources before returning results. For more information, see Query configurations.

    " - }, - "implicitFilterConfiguration":{ - "shape":"ImplicitFilterConfiguration", - "documentation":"

    Settings for implicit filtering.

    " - }, - "numberOfResults":{ - "shape":"KnowledgeBaseVectorSearchConfigurationNumberOfResultsInteger", - "documentation":"

    The number of source chunks to retrieve.

    ", - "box":true - }, - "overrideSearchType":{ - "shape":"SearchType", - "documentation":"

    By default, Amazon Bedrock decides a search strategy for you. If you're using an Amazon OpenSearch Serverless vector store that contains a filterable text field, you can specify whether to query the knowledge base with a HYBRID search using both vector embeddings and raw text, or SEMANTIC search using only vector embeddings. For other vector store configurations, only SEMANTIC search is available. For more information, see Test a knowledge base.

    " + "flowExecutionSummaries":{ + "shape":"FlowExecutionSummaries", + "documentation":"

    A list of flow execution summaries. Each summary includes the execution ARN, flow identifier, flow alias identifier, flow version, status, and timestamps.

    " }, - "rerankingConfiguration":{ - "shape":"VectorSearchRerankingConfiguration", - "documentation":"

    Contains configurations for reranking the retrieved results. For more information, see Improve the relevance of query responses with a reranker model.

    " + "nextToken":{ + "shape":"NextToken", + "documentation":"

    A token to retrieve the next set of results. This value is returned if more results are available.

    " } - }, - "documentation":"

    Configurations for how to perform the search query and return results. For more information, see Query configurations.

    This data type is used in the following API operations:

    " - }, - "KnowledgeBaseVectorSearchConfigurationNumberOfResultsInteger":{ - "type":"integer", - "box":true, - "max":100, - "min":1 - }, - "KnowledgeBases":{ - "type":"list", - "member":{"shape":"KnowledgeBase"} - }, - "LambdaArn":{"type":"string"}, - "LambdaResourceArn":{ - "type":"string", - "max":2048, - "min":0, - "pattern":"^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$" + } }, "ListInvocationStepsRequest":{ "type":"structure", @@ -4304,6 +5017,10 @@ } } }, + "Long":{ + "type":"long", + "box":true + }, "MaxResults":{ "type":"integer", "documentation":"

    Max Results.

    ", @@ -4399,12 +5116,32 @@ "Metadata":{ "type":"structure", "members":{ + "clientRequestId":{ + "shape":"String", + "documentation":"

    A unique identifier associated with the downstream invocation. This ID can be used for tracing, debugging, and identifying specific invocations in customer logs or systems.

    " + }, + "endTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    In the final response, endTime is the end time of the agent invocation operation.

    " + }, + "operationTotalTimeMs":{ + "shape":"Long", + "documentation":"

    The total time it took for the agent to complete execution. This field is only set for the final response.

    " + }, + "startTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

    In the final response, startTime is the start time of the agent invocation operation.

    " + }, + "totalTimeMs":{ + "shape":"Long", + "documentation":"

    The total execution time for the specific invocation being processed (model, knowledge base, guardrail, agent collaborator, or code interpreter). It represents how long the individual invocation took.

    " + }, "usage":{ "shape":"Usage", - "documentation":"

    Contains details of the foundation model usage.

    " + "documentation":"

    Specific to model invocation and contains details about the usage of a foundation model.

    " } }, - "documentation":"

    Provides details of the foundation model.

    ", + "documentation":"

    Provides information about the execution process for different types of invocations, such as model invocation, knowledge base invocation, agent collaborator invocation, guardrail invocation, and code interpreter Invocation.

    ", "sensitive":true }, "MetadataAttributeSchema":{ @@ -4543,6 +5280,105 @@ "min":1, "pattern":"^\\S*$" }, + "NodeErrorCode":{ + "type":"string", + "enum":[ + "VALIDATION", + "DEPENDENCY_FAILED", + "BAD_GATEWAY", + "INTERNAL_SERVER" + ] + }, + "NodeExecutionContent":{ + "type":"structure", + "members":{ + "document":{ + "shape":"Document", + "documentation":"

    The document content of the field, which can contain text or structured data.

    " + } + }, + "documentation":"

    Contains the content of a flow node's input or output field for a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true, + "union":true + }, + "NodeFailureEvent":{ + "type":"structure", + "required":[ + "errorCode", + "errorMessage", + "nodeName", + "timestamp" + ], + "members":{ + "errorCode":{ + "shape":"NodeErrorCode", + "documentation":"

    The error code that identifies the type of failure that occurred at the node.

    " + }, + "errorMessage":{ + "shape":"String", + "documentation":"

    A descriptive message that provides details about the node failure.

    " + }, + "nodeName":{ + "shape":"NodeName", + "documentation":"

    The name of the node where the failure occurred.

    " + }, + "timestamp":{ + "shape":"DateTimestamp", + "documentation":"

    The timestamp when the node failure occurred.

    " + } + }, + "documentation":"

    Contains information about a failure that occurred at a specific node during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true + }, + "NodeInputEvent":{ + "type":"structure", + "required":[ + "fields", + "nodeName", + "timestamp" + ], + "members":{ + "fields":{ + "shape":"NodeInputFields", + "documentation":"

    A list of input fields provided to the node.

    " + }, + "nodeName":{ + "shape":"NodeName", + "documentation":"

    The name of the node that received the inputs.

    " + }, + "timestamp":{ + "shape":"DateTimestamp", + "documentation":"

    The timestamp when the inputs were provided to the node.

    " + } + }, + "documentation":"

    Contains information about the inputs provided to a specific node during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true + }, + "NodeInputField":{ + "type":"structure", + "required":[ + "content", + "name" + ], + "members":{ + "content":{ + "shape":"NodeExecutionContent", + "documentation":"

    The content of the input field, which can contain text or structured data.

    " + }, + "name":{ + "shape":"NodeInputName", + "documentation":"

    The name of the input field as defined in the node's input schema.

    " + } + }, + "documentation":"

    Represents an input field provided to a node during a flow execution.

    ", + "sensitive":true + }, + "NodeInputFields":{ + "type":"list", + "member":{"shape":"NodeInputField"}, + "max":5, + "min":1 + }, "NodeInputName":{ "type":"string", "pattern":"^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$" @@ -4551,6 +5387,55 @@ "type":"string", "pattern":"^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$" }, + "NodeOutputEvent":{ + "type":"structure", + "required":[ + "fields", + "nodeName", + "timestamp" + ], + "members":{ + "fields":{ + "shape":"NodeOutputFields", + "documentation":"

    A list of output fields produced by the node.

    " + }, + "nodeName":{ + "shape":"NodeName", + "documentation":"

    The name of the node that produced the outputs.

    " + }, + "timestamp":{ + "shape":"DateTimestamp", + "documentation":"

    The timestamp when the outputs were produced by the node.

    " + } + }, + "documentation":"

    Contains information about the outputs produced by a specific node during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true + }, + "NodeOutputField":{ + "type":"structure", + "required":[ + "content", + "name" + ], + "members":{ + "content":{ + "shape":"NodeExecutionContent", + "documentation":"

    The content of the output field, which can contain text or structured data.

    " + }, + "name":{ + "shape":"NodeOutputName", + "documentation":"

    The name of the output field as defined in the node's output schema.

    " + } + }, + "documentation":"

    Represents an output field produced by a node during a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true + }, + "NodeOutputFields":{ + "type":"list", + "member":{"shape":"NodeOutputField"}, + "max":2, + "min":1 + }, "NodeOutputName":{ "type":"string", "pattern":"^[a-zA-Z]([_]?[0-9a-zA-Z]){0,99}$" @@ -5105,6 +5990,25 @@ "max":10, "min":0 }, + "PromptCreationConfigurations":{ + "type":"structure", + "members":{ + "excludePreviousThinkingSteps":{ + "shape":"Boolean", + "documentation":"

    If true, the service removes any content between <thinking> tags from previous conversations in an agent session. The service will only remove content from already processed turns. This helps you remove content which might not be useful for current and subsequent invocations. This can reduce the input token count and potentially save costs. The default value is false.

    " + }, + "previousConversationTurnsToInclude":{ + "shape":"PromptCreationConfigurationsPreviousConversationTurnsToIncludeInteger", + "documentation":"

    The number of previous conversations from the ongoing agent session to include in the conversation history of the agent prompt, during the current invocation. This gives you more granular control over the context that the model is made aware of, and helps the model remove older context which is no longer useful during the ongoing agent session.

    " + } + }, + "documentation":"

    Specifies parameters that control how the service populates the agent prompt for an InvokeAgent or InvokeInlineAgent request. You can control which aspects of previous invocations in the same agent session the service uses to populate the agent prompt. This gives you more granular control over the contextual history that is used to process the current request.

    " + }, + "PromptCreationConfigurationsPreviousConversationTurnsToIncludeInteger":{ + "type":"integer", + "box":true, + "min":0 + }, "PromptOverrideConfiguration":{ "type":"structure", "required":["promptConfigurations"], @@ -6407,6 +7311,24 @@ "min":1, "pattern":"^s3://[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]/.{1,1024}$" }, + "SatisfiedCondition":{ + "type":"structure", + "required":["conditionName"], + "members":{ + "conditionName":{ + "shape":"String", + "documentation":"

    The name of the condition that was satisfied.

    " + } + }, + "documentation":"

    Represents a condition that was satisfied during a condition node evaluation in a flow execution.

    Flow executions is in preview release for Amazon Bedrock and is subject to change.

    ", + "sensitive":true + }, + "SatisfiedConditions":{ + "type":"list", + "member":{"shape":"SatisfiedCondition"}, + "max":5, + "min":1 + }, "SearchType":{ "type":"string", "enum":[ @@ -6580,6 +7502,91 @@ "box":true, "min":0 }, + "StartFlowExecutionRequest":{ + "type":"structure", + "required":[ + "flowAliasIdentifier", + "flowIdentifier", + "inputs" + ], + "members":{ + "flowAliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

    The unique identifier of the flow alias to use for the flow execution.

    ", + "location":"uri", + "locationName":"flowAliasIdentifier" + }, + "flowExecutionName":{ + "shape":"FlowExecutionName", + "documentation":"

    The unique name for the flow execution. If you don't provide one, a system-generated name is used.

    " + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

    The unique identifier of the flow to execute.

    ", + "location":"uri", + "locationName":"flowIdentifier" + }, + "inputs":{ + "shape":"FlowInputs", + "documentation":"

    The input data required for the flow execution. This must match the input schema defined in the flow.

    " + }, + "modelPerformanceConfiguration":{ + "shape":"ModelPerformanceConfiguration", + "documentation":"

    The performance settings for the foundation model used in the flow execution.

    " + } + } + }, + "StartFlowExecutionResponse":{ + "type":"structure", + "members":{ + "executionArn":{ + "shape":"FlowExecutionIdentifier", + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the flow execution.

    " + } + } + }, + "StopFlowExecutionRequest":{ + "type":"structure", + "required":[ + "executionIdentifier", + "flowAliasIdentifier", + "flowIdentifier" + ], + "members":{ + "executionIdentifier":{ + "shape":"FlowExecutionIdentifier", + "documentation":"

    The unique identifier of the flow execution to stop.

    ", + "location":"uri", + "locationName":"executionIdentifier" + }, + "flowAliasIdentifier":{ + "shape":"FlowAliasIdentifier", + "documentation":"

    The unique identifier of the flow alias used for the execution.

    ", + "location":"uri", + "locationName":"flowAliasIdentifier" + }, + "flowIdentifier":{ + "shape":"FlowIdentifier", + "documentation":"

    The unique identifier of the flow.

    ", + "location":"uri", + "locationName":"flowIdentifier" + } + } + }, + "StopFlowExecutionResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "executionArn":{ + "shape":"FlowExecutionIdentifier", + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the flow execution that was stopped.

    " + }, + "status":{ + "shape":"FlowExecutionStatus", + "documentation":"

    The updated status of the flow execution after the stop request. This will typically be ABORTED if the execution was successfully stopped.

    " + } + } + }, "StopSequences":{ "type":"list", "member":{"shape":"String"}, @@ -7073,6 +8080,12 @@ "Verb":{ "type":"string", "sensitive":true + }, + "Version":{ + "type":"string", + "max":5, + "min":1, + "pattern":"^(DRAFT|[0-9]{0,4}[1-9][0-9]{0,4})$" } }, "documentation":"

    Contains APIs related to model invocation and querying of knowledge bases.

    " diff --git a/services/bedrockdataautomation/pom.xml b/services/bedrockdataautomation/pom.xml index 30be1e54fdf4..f6b89102f03a 100644 --- a/services/bedrockdataautomation/pom.xml +++ b/services/bedrockdataautomation/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT bedrockdataautomation AWS Java SDK :: Services :: Bedrock Data Automation diff --git a/services/bedrockdataautomation/src/main/resources/codegen-resources/customization.config b/services/bedrockdataautomation/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/bedrockdataautomation/src/main/resources/codegen-resources/customization.config +++ b/services/bedrockdataautomation/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/bedrockdataautomation/src/main/resources/codegen-resources/service-2.json b/services/bedrockdataautomation/src/main/resources/codegen-resources/service-2.json index a2b90ee8c8eb..5324676bac64 100644 --- a/services/bedrockdataautomation/src/main/resources/codegen-resources/service-2.json +++ b/services/bedrockdataautomation/src/main/resources/codegen-resources/service-2.json @@ -1275,7 +1275,8 @@ "enum":[ "DOCUMENT", "IMAGE", - "AUDIO" + "AUDIO", + "VIDEO" ] }, "UntagResourceRequest":{ diff --git a/services/bedrockdataautomationruntime/pom.xml b/services/bedrockdataautomationruntime/pom.xml index ffdfebc93076..3b7de00ee73c 100644 --- a/services/bedrockdataautomationruntime/pom.xml +++ b/services/bedrockdataautomationruntime/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT bedrockdataautomationruntime AWS Java SDK :: Services :: Bedrock Data Automation Runtime diff --git a/services/bedrockdataautomationruntime/src/main/resources/codegen-resources/customization.config b/services/bedrockdataautomationruntime/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/bedrockdataautomationruntime/src/main/resources/codegen-resources/customization.config +++ b/services/bedrockdataautomationruntime/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/bedrockdataautomationruntime/src/main/resources/codegen-resources/service-2.json b/services/bedrockdataautomationruntime/src/main/resources/codegen-resources/service-2.json index 89eb0a4a44ee..027cfd252e14 100644 --- a/services/bedrockdataautomationruntime/src/main/resources/codegen-resources/service-2.json +++ b/services/bedrockdataautomationruntime/src/main/resources/codegen-resources/service-2.json @@ -24,8 +24,8 @@ "input":{"shape":"GetDataAutomationStatusRequest"}, "output":{"shape":"GetDataAutomationStatusResponse"}, "errors":[ - {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} @@ -42,8 +42,8 @@ "output":{"shape":"InvokeDataAutomationAsyncResponse"}, "errors":[ {"shape":"ServiceQuotaExceededException"}, - {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], @@ -59,11 +59,11 @@ "input":{"shape":"ListTagsForResourceRequest"}, "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ - {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    List tags for an Amazon Bedrock Data Automation resource

    " }, @@ -77,11 +77,11 @@ "output":{"shape":"TagResourceResponse"}, "errors":[ {"shape":"ServiceQuotaExceededException"}, - {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Tag an Amazon Bedrock Data Automation resource

    " }, @@ -94,11 +94,11 @@ "input":{"shape":"UntagResourceRequest"}, "output":{"shape":"UntagResourceResponse"}, "errors":[ - {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Untag an Amazon Bedrock Data Automation resource

    " } @@ -112,6 +112,16 @@ "documentation":"

    This exception will be thrown when customer does not have access to API.

    ", "exception":true }, + "AssetProcessingConfiguration":{ + "type":"structure", + "members":{ + "video":{ + "shape":"VideoAssetProcessingConfiguration", + "documentation":"

    Video asset processing configuration

    " + } + }, + "documentation":"

    Config containing asset processing related knobs for all modalities

    " + }, "AutomationJobStatus":{ "type":"string", "documentation":"

    List of status supported by automation jobs

    ", @@ -309,6 +319,10 @@ "s3Uri":{ "shape":"S3Uri", "documentation":"

    S3 uri.

    " + }, + "assetProcessingConfiguration":{ + "shape":"AssetProcessingConfiguration", + "documentation":"

    Asset processing configuration

    " } }, "documentation":"

    Input configuration.

    " @@ -528,6 +542,34 @@ "documentation":"

    This exception will be thrown when customer reached API TPS limit.

    ", "exception":true }, + "TimestampSegment":{ + "type":"structure", + "required":[ + "startTimeMillis", + "endTimeMillis" + ], + "members":{ + "startTimeMillis":{ + "shape":"TimestampSegmentStartTimeMillisLong", + "documentation":"

    Start timestamp in milliseconds

    " + }, + "endTimeMillis":{ + "shape":"TimestampSegmentEndTimeMillisLong", + "documentation":"

    End timestamp in milliseconds

    " + } + }, + "documentation":"

    Timestamp segment

    " + }, + "TimestampSegmentEndTimeMillisLong":{ + "type":"long", + "box":true, + "min":300000 + }, + "TimestampSegmentStartTimeMillisLong":{ + "type":"long", + "box":true, + "min":0 + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -551,6 +593,27 @@ }, "documentation":"

    This exception will be thrown when customer provided invalid parameters.

    ", "exception":true + }, + "VideoAssetProcessingConfiguration":{ + "type":"structure", + "members":{ + "segmentConfiguration":{ + "shape":"VideoSegmentConfiguration", + "documentation":"

    Delimits the segment of the input that will be processed

    " + } + }, + "documentation":"

    Video asset processing configuration

    " + }, + "VideoSegmentConfiguration":{ + "type":"structure", + "members":{ + "timestampSegment":{ + "shape":"TimestampSegment", + "documentation":"

    Timestamp segment

    " + } + }, + "documentation":"

    Delimits the segment of the input that will be processed

    ", + "union":true } }, "documentation":"

    Amazon Bedrock Data Automation Runtime

    " diff --git a/services/bedrockruntime/pom.xml b/services/bedrockruntime/pom.xml index bed5e93e3b6c..64c8b74be456 100644 --- a/services/bedrockruntime/pom.xml +++ b/services/bedrockruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT bedrockruntime AWS Java SDK :: Services :: Bedrock Runtime diff --git a/services/bedrockruntime/src/main/resources/codegen-resources/customization.config b/services/bedrockruntime/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..42aa381cf2ea 100644 --- a/services/bedrockruntime/src/main/resources/codegen-resources/customization.config +++ b/services/bedrockruntime/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,4 @@ { "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableEnvironmentBearerToken": true } diff --git a/services/bedrockruntime/src/main/resources/codegen-resources/service-2.json b/services/bedrockruntime/src/main/resources/codegen-resources/service-2.json index d38e1adb49af..42c931b1a59d 100644 --- a/services/bedrockruntime/src/main/resources/codegen-resources/service-2.json +++ b/services/bedrockruntime/src/main/resources/codegen-resources/service-2.json @@ -2,7 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2023-09-30", - "auth":["aws.auth#sigv4"], + "auth":[ + "aws.auth#sigv4", + "smithy.api#httpBearerAuth" + ], "endpointPrefix":"bedrock-runtime", "protocol":"rest-json", "protocolSettings":{"h2":"optional"}, @@ -468,6 +471,134 @@ "type":"string", "enum":["default"] }, + "Citation":{ + "type":"structure", + "members":{ + "title":{ + "shape":"String", + "documentation":"

    The title or identifier of the source document being cited.

    " + }, + "sourceContent":{ + "shape":"CitationSourceContentList", + "documentation":"

    The specific content from the source document that was referenced or cited in the generated response.

    " + }, + "location":{ + "shape":"CitationLocation", + "documentation":"

    The precise location within the source document where the cited content can be found, including character positions, page numbers, or chunk identifiers.

    " + } + }, + "documentation":"

    Contains information about a citation that references a specific source document. Citations provide traceability between the model's generated response and the source documents that informed that response.

    " + }, + "CitationGeneratedContent":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"

    The text content that was generated by the model and is supported by the associated citation.

    " + } + }, + "documentation":"

    Contains the generated text content that corresponds to or is supported by a citation from a source document.

    ", + "union":true + }, + "CitationGeneratedContentList":{ + "type":"list", + "member":{"shape":"CitationGeneratedContent"} + }, + "CitationLocation":{ + "type":"structure", + "members":{ + "documentChar":{ + "shape":"DocumentCharLocation", + "documentation":"

    The character-level location within the document where the cited content is found.

    " + }, + "documentPage":{ + "shape":"DocumentPageLocation", + "documentation":"

    The page-level location within the document where the cited content is found.

    " + }, + "documentChunk":{ + "shape":"DocumentChunkLocation", + "documentation":"

    The chunk-level location within the document where the cited content is found, typically used for documents that have been segmented into logical chunks.

    " + } + }, + "documentation":"

    Specifies the precise location within a source document where cited content can be found. This can include character-level positions, page numbers, or document chunks depending on the document type and indexing method.

    ", + "union":true + }, + "CitationSourceContent":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"

    The text content from the source document that is being cited.

    " + } + }, + "documentation":"

    Contains the actual text content from a source document that is being cited or referenced in the model's response.

    ", + "union":true + }, + "CitationSourceContentDelta":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"

    An incremental update to the text content from the source document that is being cited.

    " + } + }, + "documentation":"

    Contains incremental updates to the source content text during streaming responses, allowing clients to build up the cited content progressively.

    " + }, + "CitationSourceContentList":{ + "type":"list", + "member":{"shape":"CitationSourceContent"} + }, + "CitationSourceContentListDelta":{ + "type":"list", + "member":{"shape":"CitationSourceContentDelta"} + }, + "Citations":{ + "type":"list", + "member":{"shape":"Citation"} + }, + "CitationsConfig":{ + "type":"structure", + "required":["enabled"], + "members":{ + "enabled":{ + "shape":"Boolean", + "documentation":"

    Specifies whether document citations should be included in the model's response. When set to true, the model can generate citations that reference the source documents used to inform the response.

    " + } + }, + "documentation":"

    Configuration settings for enabling and controlling document citations in Converse API responses. When enabled, the model can include citation information that links generated content back to specific source documents.

    " + }, + "CitationsContentBlock":{ + "type":"structure", + "members":{ + "content":{ + "shape":"CitationGeneratedContentList", + "documentation":"

    The generated content that is supported by the associated citations.

    " + }, + "citations":{ + "shape":"Citations", + "documentation":"

    An array of citations that reference the source documents used to generate the associated content.

    " + } + }, + "documentation":"

    A content block that contains both generated text and associated citation information. This block type is returned when document citations are enabled, providing traceability between the generated content and the source documents that informed the response.

    " + }, + "CitationsDelta":{ + "type":"structure", + "members":{ + "title":{ + "shape":"String", + "documentation":"

    The title or identifier of the source document being cited.

    " + }, + "sourceContent":{ + "shape":"CitationSourceContentListDelta", + "documentation":"

    The specific content from the source document that was referenced or cited in the generated response.

    " + }, + "location":{ + "shape":"CitationLocation", + "documentation":"

    Specifies the precise location within a source document where cited content can be found. This can include character-level positions, page numbers, or document chunks depending on the document type and indexing method.

    " + } + }, + "documentation":"

    Contains incremental updates to citation information during streaming responses. This allows clients to build up citation data progressively as the response is generated.

    " + }, "ConflictException":{ "type":"structure", "members":{ @@ -509,7 +640,7 @@ }, "guardContent":{ "shape":"GuardrailConverseContentBlock", - "documentation":"

    Contains the content to assess with the guardrail. If you don't specify guardContent in a call to the Converse API, the guardrail (if passed in the Converse API) assesses the entire message.

    For more information, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.

     </p> 
    " + "documentation":"

    Contains the content to assess with the guardrail. If you don't specify guardContent in a call to the Converse API, the guardrail (if passed in the Converse API) assesses the entire message.

    For more information, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.

    " }, "cachePoint":{ "shape":"CachePointBlock", @@ -518,6 +649,10 @@ "reasoningContent":{ "shape":"ReasoningContentBlock", "documentation":"

    Contains content regarding the reasoning that is carried out by the model. Reasoning refers to a Chain of Thought (CoT) that the model generates to enhance the accuracy of its final response.

    " + }, + "citationsContent":{ + "shape":"CitationsContentBlock", + "documentation":"

    A content block that contains both generated text and associated citation information, providing traceability between the response and source documents.

    " } }, "documentation":"

    A block of content for a message that you pass to, or receive from, a model with the Converse or ConverseStream API operations.

    ", @@ -537,6 +672,10 @@ "reasoningContent":{ "shape":"ReasoningContentBlockDelta", "documentation":"

    Contains content regarding the reasoning that is carried out by the model. Reasoning refers to a Chain of Thought (CoT) that the model generates to enhance the accuracy of its final response.

    " + }, + "citation":{ + "shape":"CitationsDelta", + "documentation":"

    Incremental citation information that is streamed as part of the response generation process.

    " } }, "documentation":"

    A block of content in a streaming response.

    ", @@ -943,7 +1082,6 @@ "DocumentBlock":{ "type":"structure", "required":[ - "format", "name", "source" ], @@ -959,6 +1097,14 @@ "source":{ "shape":"DocumentSource", "documentation":"

    Contains the content of the document.

    " + }, + "context":{ + "shape":"String", + "documentation":"

    Contextual information about how the document should be processed or interpreted by the model when generating citations.

    " + }, + "citations":{ + "shape":"CitationsConfig", + "documentation":"

    Configuration settings that control how citations should be generated for this specific document.

    " } }, "documentation":"

    A document to include in a message.

    " @@ -968,6 +1114,87 @@ "max":200, "min":1 }, + "DocumentCharLocation":{ + "type":"structure", + "members":{ + "documentIndex":{ + "shape":"DocumentCharLocationDocumentIndexInteger", + "documentation":"

    The index of the document within the array of documents provided in the request.

    " + }, + "start":{ + "shape":"DocumentCharLocationStartInteger", + "documentation":"

    The starting character position of the cited content within the document.

    " + }, + "end":{ + "shape":"DocumentCharLocationEndInteger", + "documentation":"

    The ending character position of the cited content within the document.

    " + } + }, + "documentation":"

    Specifies a character-level location within a document, providing precise positioning information for cited content using start and end character indices.

    " + }, + "DocumentCharLocationDocumentIndexInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "DocumentCharLocationEndInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "DocumentCharLocationStartInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "DocumentChunkLocation":{ + "type":"structure", + "members":{ + "documentIndex":{ + "shape":"DocumentChunkLocationDocumentIndexInteger", + "documentation":"

    The index of the document within the array of documents provided in the request.

    " + }, + "start":{ + "shape":"DocumentChunkLocationStartInteger", + "documentation":"

    The starting chunk identifier or index of the cited content within the document.

    " + }, + "end":{ + "shape":"DocumentChunkLocationEndInteger", + "documentation":"

    The ending chunk identifier or index of the cited content within the document.

    " + } + }, + "documentation":"

    Specifies a chunk-level location within a document, providing positioning information for cited content using logical document segments or chunks.

    " + }, + "DocumentChunkLocationDocumentIndexInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "DocumentChunkLocationEndInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "DocumentChunkLocationStartInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "DocumentContentBlock":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"

    The text content of the document.

    " + } + }, + "documentation":"

    Contains the actual content of a document that can be processed by the model and potentially cited in the response.

    ", + "union":true + }, + "DocumentContentBlocks":{ + "type":"list", + "member":{"shape":"DocumentContentBlock"} + }, "DocumentFormat":{ "type":"string", "enum":[ @@ -982,6 +1209,39 @@ "md" ] }, + "DocumentPageLocation":{ + "type":"structure", + "members":{ + "documentIndex":{ + "shape":"DocumentPageLocationDocumentIndexInteger", + "documentation":"

    The index of the document within the array of documents provided in the request.

    " + }, + "start":{ + "shape":"DocumentPageLocationStartInteger", + "documentation":"

    The starting page number of the cited content within the document.

    " + }, + "end":{ + "shape":"DocumentPageLocationEndInteger", + "documentation":"

    The ending page number of the cited content within the document.

    " + } + }, + "documentation":"

    Specifies a page-level location within a document, providing positioning information for cited content using page numbers.

    " + }, + "DocumentPageLocationDocumentIndexInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "DocumentPageLocationEndInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "DocumentPageLocationStartInteger":{ + "type":"integer", + "box":true, + "min":0 + }, "DocumentSource":{ "type":"structure", "members":{ @@ -992,6 +1252,14 @@ "s3Location":{ "shape":"S3Location", "documentation":"

    The location of a document object in an Amazon S3 bucket. To see which models support S3 uploads, see Supported models and features for Converse.

    " + }, + "text":{ + "shape":"String", + "documentation":"

    The text content of the document source.

    " + }, + "content":{ + "shape":"DocumentContentBlocks", + "documentation":"

    The structured content of the document source, which may include various content blocks such as text, images, or other document elements.

    " } }, "documentation":"

    Contains the content of a document.

    ", @@ -2102,7 +2370,7 @@ }, "modelId":{ "shape":"InvokeModelIdentifier", - "documentation":"

    The unique identifier of the model to invoke to run inference.

    The modelId to provide depends on the type of model or throughput that you use:

    ", + "documentation":"

    The unique identifier of the model to invoke to run inference.

    The modelId to provide depends on the type of model or throughput that you use:

    ", "location":"uri", "locationName":"modelId" }, @@ -2258,7 +2526,7 @@ }, "modelId":{ "shape":"InvokeModelIdentifier", - "documentation":"

    The unique identifier of the model to invoke to run inference.

    The modelId to provide depends on the type of model or throughput that you use:

    ", + "documentation":"

    The unique identifier of the model to invoke to run inference.

    The modelId to provide depends on the type of model or throughput that you use:

    ", "location":"uri", "locationName":"modelId" }, diff --git a/services/billing/pom.xml b/services/billing/pom.xml index 82acb225bdcf..7db1a153478a 100644 --- a/services/billing/pom.xml +++ b/services/billing/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT billing AWS Java SDK :: Services :: Billing diff --git a/services/billing/src/main/resources/codegen-resources/customization.config b/services/billing/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/billing/src/main/resources/codegen-resources/customization.config +++ b/services/billing/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/billingconductor/pom.xml b/services/billingconductor/pom.xml index 84010a5a7035..ef21d4439e9f 100644 --- a/services/billingconductor/pom.xml +++ b/services/billingconductor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT billingconductor AWS Java SDK :: Services :: Billingconductor diff --git a/services/billingconductor/src/main/resources/codegen-resources/customization.config b/services/billingconductor/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/billingconductor/src/main/resources/codegen-resources/customization.config +++ b/services/billingconductor/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/braket/pom.xml b/services/braket/pom.xml index ea9bd3a9a6a5..ceb39a6c813a 100644 --- a/services/braket/pom.xml +++ b/services/braket/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT braket AWS Java SDK :: Services :: Braket diff --git a/services/braket/src/main/resources/codegen-resources/customization.config b/services/braket/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/braket/src/main/resources/codegen-resources/customization.config +++ b/services/braket/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/budgets/pom.xml b/services/budgets/pom.xml index 7b41ec7838e2..49284bc63f02 100644 --- a/services/budgets/pom.xml +++ b/services/budgets/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT budgets AWS Java SDK :: Services :: AWS Budgets diff --git a/services/budgets/src/main/resources/codegen-resources/customization.config b/services/budgets/src/main/resources/codegen-resources/customization.config index 751bc41d283c..6afa5b2bbf61 100644 --- a/services/budgets/src/main/resources/codegen-resources/customization.config +++ b/services/budgets/src/main/resources/codegen-resources/customization.config @@ -10,6 +10,5 @@ ] } }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/chatbot/pom.xml b/services/chatbot/pom.xml index e4df3442a198..bd30162cefc0 100644 --- a/services/chatbot/pom.xml +++ b/services/chatbot/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT chatbot AWS Java SDK :: Services :: Chatbot diff --git a/services/chatbot/src/main/resources/codegen-resources/customization.config b/services/chatbot/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/chatbot/src/main/resources/codegen-resources/customization.config +++ b/services/chatbot/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/chime/pom.xml b/services/chime/pom.xml index fe6e79c61f13..f0a325ac72ef 100644 --- a/services/chime/pom.xml +++ b/services/chime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT chime AWS Java SDK :: Services :: Chime diff --git a/services/chime/src/main/resources/codegen-resources/customization.config b/services/chime/src/main/resources/codegen-resources/customization.config index bfe46aa45227..0e77f570c68a 100644 --- a/services/chime/src/main/resources/codegen-resources/customization.config +++ b/services/chime/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "listAccounts" ], "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/chimesdkidentity/pom.xml b/services/chimesdkidentity/pom.xml index 36b3a1b1bf04..317649190033 100644 --- a/services/chimesdkidentity/pom.xml +++ b/services/chimesdkidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT chimesdkidentity AWS Java SDK :: Services :: Chime SDK Identity diff --git a/services/chimesdkidentity/src/main/resources/codegen-resources/customization.config b/services/chimesdkidentity/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/chimesdkidentity/src/main/resources/codegen-resources/customization.config +++ b/services/chimesdkidentity/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/chimesdkmediapipelines/pom.xml b/services/chimesdkmediapipelines/pom.xml index 551aa771a5b8..7873d08b1ab9 100644 --- a/services/chimesdkmediapipelines/pom.xml +++ b/services/chimesdkmediapipelines/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT chimesdkmediapipelines AWS Java SDK :: Services :: Chime SDK Media Pipelines diff --git a/services/chimesdkmediapipelines/src/main/resources/codegen-resources/customization.config b/services/chimesdkmediapipelines/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/chimesdkmediapipelines/src/main/resources/codegen-resources/customization.config +++ b/services/chimesdkmediapipelines/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/chimesdkmeetings/pom.xml b/services/chimesdkmeetings/pom.xml index 6b5b3404c05d..026de720cc79 100644 --- a/services/chimesdkmeetings/pom.xml +++ b/services/chimesdkmeetings/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT chimesdkmeetings AWS Java SDK :: Services :: Chime SDK Meetings diff --git a/services/chimesdkmeetings/src/main/resources/codegen-resources/customization.config b/services/chimesdkmeetings/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/chimesdkmeetings/src/main/resources/codegen-resources/customization.config +++ b/services/chimesdkmeetings/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/chimesdkmessaging/pom.xml b/services/chimesdkmessaging/pom.xml index 14dffa58c3e2..48c797841497 100644 --- a/services/chimesdkmessaging/pom.xml +++ b/services/chimesdkmessaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT chimesdkmessaging AWS Java SDK :: Services :: Chime SDK Messaging diff --git a/services/chimesdkmessaging/src/main/resources/codegen-resources/customization.config b/services/chimesdkmessaging/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/chimesdkmessaging/src/main/resources/codegen-resources/customization.config +++ b/services/chimesdkmessaging/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/chimesdkvoice/pom.xml b/services/chimesdkvoice/pom.xml index 3b9183c9fc0d..3f47d58fc20d 100644 --- a/services/chimesdkvoice/pom.xml +++ b/services/chimesdkvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT chimesdkvoice AWS Java SDK :: Services :: Chime SDK Voice diff --git a/services/chimesdkvoice/src/main/resources/codegen-resources/customization.config b/services/chimesdkvoice/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/chimesdkvoice/src/main/resources/codegen-resources/customization.config +++ b/services/chimesdkvoice/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cleanrooms/pom.xml b/services/cleanrooms/pom.xml index 231897d604b2..619ecee3c3e3 100644 --- a/services/cleanrooms/pom.xml +++ b/services/cleanrooms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cleanrooms AWS Java SDK :: Services :: Clean Rooms diff --git a/services/cleanrooms/src/main/resources/codegen-resources/customization.config b/services/cleanrooms/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/cleanrooms/src/main/resources/codegen-resources/customization.config +++ b/services/cleanrooms/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cleanroomsml/pom.xml b/services/cleanroomsml/pom.xml index b884429727d8..da25c402d05e 100644 --- a/services/cleanroomsml/pom.xml +++ b/services/cleanroomsml/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cleanroomsml AWS Java SDK :: Services :: Clean Rooms ML diff --git a/services/cleanroomsml/src/main/resources/codegen-resources/customization.config b/services/cleanroomsml/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/cleanroomsml/src/main/resources/codegen-resources/customization.config +++ b/services/cleanroomsml/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cleanroomsml/src/main/resources/codegen-resources/paginators-1.json b/services/cleanroomsml/src/main/resources/codegen-resources/paginators-1.json index 4e90779d1908..dc9278dd214e 100644 --- a/services/cleanroomsml/src/main/resources/codegen-resources/paginators-1.json +++ b/services/cleanroomsml/src/main/resources/codegen-resources/paginators-1.json @@ -78,6 +78,12 @@ "limit_key": "maxResults", "result_key": "trainedModelInferenceJobs" }, + "ListTrainedModelVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "trainedModels" + }, "ListTrainedModels": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/cleanroomsml/src/main/resources/codegen-resources/service-2.json b/services/cleanroomsml/src/main/resources/codegen-resources/service-2.json index d9804c45da72..90a06c3fb1d4 100644 --- a/services/cleanroomsml/src/main/resources/codegen-resources/service-2.json +++ b/services/cleanroomsml/src/main/resources/codegen-resources/service-2.json @@ -22,10 +22,11 @@ }, "input":{"shape":"CancelTrainedModelRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Submits a request to cancel the trained model job.

    ", "idempotent":true @@ -39,10 +40,11 @@ }, "input":{"shape":"CancelTrainedModelInferenceJobRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Submits a request to cancel a trained model inference job.

    ", "idempotent":true @@ -57,8 +59,8 @@ "input":{"shape":"CreateAudienceModelRequest"}, "output":{"shape":"CreateAudienceModelResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ServiceQuotaExceededException"} @@ -76,8 +78,8 @@ "input":{"shape":"CreateConfiguredAudienceModelRequest"}, "output":{"shape":"CreateConfiguredAudienceModelResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ServiceQuotaExceededException"} @@ -95,8 +97,8 @@ "input":{"shape":"CreateConfiguredModelAlgorithmRequest"}, "output":{"shape":"CreateConfiguredModelAlgorithmResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ServiceQuotaExceededException"} ], @@ -113,10 +115,11 @@ "input":{"shape":"CreateConfiguredModelAlgorithmAssociationRequest"}, "output":{"shape":"CreateConfiguredModelAlgorithmAssociationResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"} ], "documentation":"

    Associates a configured model algorithm to a collaboration for use by any member of the collaboration.

    ", @@ -132,10 +135,11 @@ "input":{"shape":"CreateMLInputChannelRequest"}, "output":{"shape":"CreateMLInputChannelResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"} ], "documentation":"

    Provides the information to create an ML input channel. An ML input channel is the result of a query that can be used for ML modeling.

    ", @@ -151,10 +155,12 @@ "input":{"shape":"CreateTrainedModelRequest"}, "output":{"shape":"CreateTrainedModelResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"}, {"shape":"ServiceQuotaExceededException"} ], "documentation":"

    Creates a trained model from an associated configured model algorithm using data from any member of the collaboration.

    ", @@ -170,8 +176,8 @@ "input":{"shape":"CreateTrainingDatasetRequest"}, "output":{"shape":"CreateTrainingDatasetResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"} ], "documentation":"

    Defines the information necessary to create a training dataset. In Clean Rooms ML, the TrainingDataset is metadata that points to a Glue table, which is read only during AudienceModel creation.

    ", @@ -186,8 +192,8 @@ }, "input":{"shape":"DeleteAudienceGenerationJobRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -203,8 +209,8 @@ }, "input":{"shape":"DeleteAudienceModelRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -220,8 +226,8 @@ }, "input":{"shape":"DeleteConfiguredAudienceModelRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -253,8 +259,8 @@ }, "input":{"shape":"DeleteConfiguredModelAlgorithmRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -270,10 +276,11 @@ }, "input":{"shape":"DeleteConfiguredModelAlgorithmAssociationRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Deletes a configured model algorithm association.

    ", "idempotent":true @@ -289,7 +296,8 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Deletes a ML modeling configuration.

    ", "idempotent":true @@ -303,10 +311,11 @@ }, "input":{"shape":"DeleteMLInputChannelDataRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Provides the information necessary to delete an ML input channel.

    ", "idempotent":true @@ -320,12 +329,13 @@ }, "input":{"shape":"DeleteTrainedModelOutputRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], - "documentation":"

    Deletes the output of a trained model.

    ", + "documentation":"

    Deletes the model artifacts stored by the service.

    ", "idempotent":true }, "DeleteTrainingDataset":{ @@ -337,8 +347,8 @@ }, "input":{"shape":"DeleteTrainingDatasetRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -389,7 +399,8 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns information about the configured model algorithm association in a collaboration.

    " }, @@ -405,7 +416,8 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns information about a specific ML input channel in a collaboration.

    " }, @@ -421,7 +433,8 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns information about a trained model in a collaboration.

    " }, @@ -485,7 +498,8 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns information about a configured model algorithm association.

    " }, @@ -501,7 +515,8 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns information about a specific ML configuration.

    " }, @@ -517,7 +532,8 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns information about an ML input channel.

    " }, @@ -533,7 +549,8 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns information about a trained model.

    " }, @@ -549,7 +566,8 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns information about a trained model inference job.

    " }, @@ -625,7 +643,8 @@ "output":{"shape":"ListCollaborationConfiguredModelAlgorithmAssociationsResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns a list of the configured model algorithm associations in a collaboration.

    " }, @@ -640,7 +659,8 @@ "output":{"shape":"ListCollaborationMLInputChannelsResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns a list of the ML input channels in a collaboration.

    " }, @@ -655,7 +675,8 @@ "output":{"shape":"ListCollaborationTrainedModelExportJobsResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns a list of the export jobs for a trained model in a collaboration.

    " }, @@ -670,7 +691,8 @@ "output":{"shape":"ListCollaborationTrainedModelInferenceJobsResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns a list of trained model inference jobs in a specified collaboration.

    " }, @@ -685,7 +707,8 @@ "output":{"shape":"ListCollaborationTrainedModelsResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns a list of the trained models in a collaboration.

    " }, @@ -715,7 +738,8 @@ "output":{"shape":"ListConfiguredModelAlgorithmAssociationsResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns a list of configured model algorithm associations.

    " }, @@ -745,7 +769,8 @@ "output":{"shape":"ListMLInputChannelsResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns a list of ML input channels.

    " }, @@ -776,10 +801,28 @@ "output":{"shape":"ListTrainedModelInferenceJobsResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns a list of trained model inference jobs that match the request parameters.

    " }, + "ListTrainedModelVersions":{ + "name":"ListTrainedModelVersions", + "http":{ + "method":"GET", + "requestUri":"/memberships/{membershipIdentifier}/trained-models/{trainedModelArn}/versions", + "responseCode":200 + }, + "input":{"shape":"ListTrainedModelVersionsRequest"}, + "output":{"shape":"ListTrainedModelVersionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns a list of trained model versions for a specified trained model. This operation allows you to view all versions of a trained model, including information about their status and creation details. You can use this to track the evolution of your trained models and select specific versions for inference or further training.

    " + }, "ListTrainedModels":{ "name":"ListTrainedModels", "http":{ @@ -791,7 +834,8 @@ "output":{"shape":"ListTrainedModelsResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns a list of trained models.

    " }, @@ -837,7 +881,8 @@ "input":{"shape":"PutMLConfigurationRequest"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Assigns information about an ML configuration.

    ", "idempotent":true @@ -851,8 +896,8 @@ }, "input":{"shape":"StartAudienceExportJobRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ServiceQuotaExceededException"} @@ -870,10 +915,11 @@ "input":{"shape":"StartAudienceGenerationJobRequest"}, "output":{"shape":"StartAudienceGenerationJobResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"} ], "documentation":"

    Information necessary to start the audience generation job.

    ", @@ -888,10 +934,11 @@ }, "input":{"shape":"StartTrainedModelExportJobRequest"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Provides the information necessary to start a trained model export job.

    ", "idempotent":true @@ -906,10 +953,11 @@ "input":{"shape":"StartTrainedModelInferenceJobRequest"}, "output":{"shape":"StartTrainedModelInferenceJobResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"} ], "documentation":"

    Defines the information necessary to begin a trained model inference job.

    ", @@ -958,8 +1006,8 @@ "input":{"shape":"UpdateConfiguredAudienceModelRequest"}, "output":{"shape":"UpdateConfiguredAudienceModelResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -1348,6 +1396,12 @@ "documentation":"

    The Amazon Resource Name (ARN) of the trained model job that you want to cancel.

    ", "location":"uri", "locationName":"trainedModelArn" + }, + "versionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model to cancel. This parameter allows you to specify which version of the trained model you want to cancel when multiple versions exist.

    If versionIdentifier is not specified, the base model will be cancelled.

    ", + "location":"querystring", + "locationName":"versionIdentifier" } } }, @@ -1522,6 +1576,10 @@ "shape":"TrainedModelArn", "documentation":"

    The Amazon Resource Name (ARN) of the trained model that is being exported.

    " }, + "trainedModelVersionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model that was exported in this job.

    " + }, "membershipIdentifier":{ "shape":"UUID", "documentation":"

    The membership ID of the member that created the trained model export job.

    " @@ -1568,6 +1626,10 @@ "shape":"TrainedModelArn", "documentation":"

    The Amazon Resource Name (ARN) of the trained model that is used for the trained model inference job.

    " }, + "trainedModelVersionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model that was used for inference in this job.

    " + }, "collaborationIdentifier":{ "shape":"UUID", "documentation":"

    The collaboration ID of the collaboration that contains the trained model inference job.

    " @@ -1653,6 +1715,14 @@ "shape":"NameString", "documentation":"

    The name of the trained model.

    " }, + "versionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of this trained model version.

    " + }, + "incrementalTrainingDataChannels":{ + "shape":"IncrementalTrainingDataChannelsOutput", + "documentation":"

    Information about the incremental training data channels used to create this version of the trained model.

    " + }, "description":{ "shape":"ResourceDescription", "documentation":"

    The description of the trained model.

    " @@ -1817,7 +1887,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:(membership/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/)?configured-model-algorithm-association/[-a-zA-Z0-9_/.]+" + "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:membership/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/configured-model-algorithm-association/[-a-zA-Z0-9_/.]+" }, "ConfiguredModelAlgorithmAssociationList":{ "type":"list", @@ -1937,7 +2007,7 @@ "members":{ "imageUri":{ "shape":"AlgorithmImage", - "documentation":"

    The registry path of the docker image that contains the algorithm. Clean Rooms ML supports both registry/repository[:tag] and registry/repositry[@digest] image path formats. For more information about using images in Clean Rooms ML, see the Sagemaker API reference.

    " + "documentation":"

    The registry path of the docker image that contains the algorithm. Clean Rooms ML currently only supports the registry/repository[:tag] image path format. For more information about using images in Clean Rooms ML, see the Sagemaker API reference.

    " }, "entrypoint":{ "shape":"ContainerEntrypoint", @@ -2271,9 +2341,17 @@ "shape":"StoppingCondition", "documentation":"

    The criteria that is used to stop model training.

    " }, + "incrementalTrainingDataChannels":{ + "shape":"IncrementalTrainingDataChannels", + "documentation":"

    Specifies the incremental training data channels for the trained model.

    Incremental training allows you to create a new trained model with updates without retraining from scratch. You can specify up to one incremental training data channel that references a previously trained model and its version.

    Limit: Maximum of 20 channels total (including both incrementalTrainingDataChannels and dataChannels).

    " + }, "dataChannels":{ "shape":"ModelTrainingDataChannels", - "documentation":"

    Defines the data channels that are used as input for the trained model request.

    " + "documentation":"

    Defines the data channels that are used as input for the trained model request.

    Limit: Maximum of 20 channels total (including both dataChannels and incrementalTrainingDataChannels).

    " + }, + "trainingInputMode":{ + "shape":"TrainingInputMode", + "documentation":"

    The input mode for accessing the training data. This parameter determines how the training data is made available to the training algorithm. Valid values are:

    • File - The training data is downloaded to the training instance and made available as files.

    • FastFile - The training data is streamed directly from Amazon S3 to the training algorithm, providing faster access for large datasets.

    • Pipe - The training data is streamed to the training algorithm using named pipes, which can improve performance for certain algorithms.

    " }, "description":{ "shape":"ResourceDescription", @@ -2296,6 +2374,10 @@ "trainedModelArn":{ "shape":"TrainedModelArn", "documentation":"

    The Amazon Resource Name (ARN) of the trained model.

    " + }, + "versionIdentifier":{ + "shape":"UUID", + "documentation":"

    The unique version identifier assigned to the newly created trained model. This identifier can be used to reference this specific version of the trained model in subsequent operations such as inference jobs or incremental training.

    The initial version identifier for the base version of the trained model is \"NULL\".

    " } } }, @@ -2538,6 +2620,12 @@ "documentation":"

    The membership ID of the member that is deleting the trained model output.

    ", "location":"uri", "locationName":"membershipIdentifier" + }, + "versionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model to delete. If not specified, the operation will delete the base version of the trained model. When specified, only the particular version will be deleted.

    ", + "location":"querystring", + "locationName":"versionIdentifier" } } }, @@ -2934,6 +3022,12 @@ "documentation":"

    The collaboration ID that contains the trained model that you want to return information about.

    ", "location":"uri", "locationName":"collaborationIdentifier" + }, + "versionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model to retrieve. If not specified, the operation returns information about the latest version of the trained model.

    ", + "location":"querystring", + "locationName":"versionIdentifier" } } }, @@ -2963,6 +3057,14 @@ "shape":"TrainedModelArn", "documentation":"

    The Amazon Resource Name (ARN) of the trained model.

    " }, + "versionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model. This unique identifier distinguishes this version from other versions of the same trained model.

    " + }, + "incrementalTrainingDataChannels":{ + "shape":"IncrementalTrainingDataChannelsOutput", + "documentation":"

    Information about the incremental training data channels used to create this version of the trained model. This includes details about the base model that was used for incremental training and the channel configuration.

    " + }, "name":{ "shape":"NameString", "documentation":"

    The name of the trained model.

    " @@ -2984,6 +3086,10 @@ "shape":"ResourceConfig", "documentation":"

    The EC2 resource configuration that was used to train this model.

    " }, + "trainingInputMode":{ + "shape":"TrainingInputMode", + "documentation":"

    The input mode that was used for accessing the training data when this trained model was created. This indicates how the training data was made available to the training algorithm.

    " + }, "stoppingCondition":{ "shape":"StoppingCondition", "documentation":"

    The stopping condition that determined when model training ended.

    " @@ -3514,6 +3620,10 @@ "shape":"TrainedModelArn", "documentation":"

    The Amazon Resource Name (ARN) for the trained model that was used for the trained model inference job.

    " }, + "trainedModelVersionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model used for this inference job. This identifies the specific version of the trained model that was used to generate the inference results.

    " + }, "resourceConfig":{ "shape":"InferenceResourceConfig", "documentation":"

    The resource configuration information for the trained model inference job.

    " @@ -3591,6 +3701,12 @@ "documentation":"

    The membership ID of the member that created the trained model that you are interested in.

    ", "location":"uri", "locationName":"membershipIdentifier" + }, + "versionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model to retrieve. If not specified, the operation returns information about the latest version of the trained model.

    ", + "location":"querystring", + "locationName":"versionIdentifier" } } }, @@ -3620,6 +3736,14 @@ "shape":"TrainedModelArn", "documentation":"

    The Amazon Resource Name (ARN) of the trained model.

    " }, + "versionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model. This unique identifier distinguishes this version from other versions of the same trained model.

    " + }, + "incrementalTrainingDataChannels":{ + "shape":"IncrementalTrainingDataChannelsOutput", + "documentation":"

    Information about the incremental training data channels used to create this version of the trained model. This includes details about the base model that was used for incremental training and the channel configuration.

    " + }, "name":{ "shape":"NameString", "documentation":"

    The name of the trained model.

    " @@ -3641,6 +3765,10 @@ "shape":"ResourceConfig", "documentation":"

    The EC2 resource configuration that was used to create the trained model.

    " }, + "trainingInputMode":{ + "shape":"TrainingInputMode", + "documentation":"

    The input mode that was used for accessing the training data when this trained model was created. This indicates how the training data was made available to the training algorithm.

    " + }, "stoppingCondition":{ "shape":"StoppingCondition", "documentation":"

    The stopping condition that was used to terminate model training.

    " @@ -3822,13 +3950,69 @@ "min":20, "pattern":"arn:aws[-a-z]*:iam::[0-9]{12}:role/.+" }, + "IncrementalTrainingDataChannel":{ + "type":"structure", + "required":[ + "trainedModelArn", + "channelName" + ], + "members":{ + "trainedModelArn":{ + "shape":"TrainedModelArn", + "documentation":"

    The Amazon Resource Name (ARN) of the base trained model to use for incremental training. This model serves as the starting point for the incremental training process.

    " + }, + "versionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the base trained model to use for incremental training. If not specified, the latest version of the trained model is used.

    " + }, + "channelName":{ + "shape":"ModelTrainingDataChannelName", + "documentation":"

    The name of the incremental training data channel. This name is used to identify the channel during the training process and must be unique within the training job.

    " + } + }, + "documentation":"

    Defines an incremental training data channel that references a previously trained model. Incremental training allows you to update an existing trained model with new data, building upon the knowledge from a base model rather than training from scratch. This can significantly reduce training time and computational costs while improving model performance with additional data.

    " + }, + "IncrementalTrainingDataChannelOutput":{ + "type":"structure", + "required":[ + "channelName", + "modelName" + ], + "members":{ + "channelName":{ + "shape":"ModelTrainingDataChannelName", + "documentation":"

    The name of the incremental training data channel that was used.

    " + }, + "versionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model that was used for incremental training.

    " + }, + "modelName":{ + "shape":"NameString", + "documentation":"

    The name of the base trained model that was used for incremental training.

    " + } + }, + "documentation":"

    Contains information about an incremental training data channel that was used to create a trained model. This structure provides details about the base model and channel configuration used during incremental training.

    " + }, + "IncrementalTrainingDataChannels":{ + "type":"list", + "member":{"shape":"IncrementalTrainingDataChannel"}, + "max":1, + "min":1 + }, + "IncrementalTrainingDataChannelsOutput":{ + "type":"list", + "member":{"shape":"IncrementalTrainingDataChannelOutput"}, + "max":1, + "min":1 + }, "InferenceContainerConfig":{ "type":"structure", "required":["imageUri"], "members":{ "imageUri":{ "shape":"AlgorithmImage", - "documentation":"

    The registry path of the docker image that contains the inference algorithm. Clean Rooms ML supports both registry/repository[:tag] and registry/repositry[@digest] image path formats. For more information about using images in Clean Rooms ML, see the Sagemaker API reference.

    " + "documentation":"

    The registry path of the docker image that contains the inference algorithm. Clean Rooms ML currently only supports the registry/repository[:tag] image path format. For more information about using images in Clean Rooms ML, see the Sagemaker API reference.

    " } }, "documentation":"

    Provides configuration information for the inference container.

    " @@ -4039,7 +4223,7 @@ }, "roleArn":{ "shape":"IamRoleArn", - "documentation":"

    The ARN of the IAM role that Clean Rooms ML can assume to read the data referred to in the dataSource field the input channel.

    Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an AccessDeniedException error.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the role used to run the query specified in the dataSource field of the input channel.

    Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an AccessDeniedException error.

    " } }, "documentation":"

    Provides information about the data source that is used to create an ML input channel.

    " @@ -4146,6 +4330,17 @@ "ml.r5.24xlarge" ] }, + "InternalServiceException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    An internal service error occurred. Retry your request. If the problem persists, contact AWS Support.

    ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, "KmsKeyArn":{ "type":"string", "max":2048, @@ -4369,6 +4564,12 @@ "documentation":"

    The Amazon Resource Name (ARN) of the trained model that was used to create the export jobs that you are interested in.

    ", "location":"uri", "locationName":"trainedModelArn" + }, + "trainedModelVersionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model to filter export jobs by. When specified, only export jobs for this specific version of the trained model are returned.

    ", + "location":"querystring", + "locationName":"trainedModelVersionIdentifier" } } }, @@ -4413,6 +4614,12 @@ "documentation":"

    The Amazon Resource Name (ARN) of the trained model that was used to create the trained model inference jobs that you are interested in.

    ", "location":"querystring", "locationName":"trainedModelArn" + }, + "trainedModelVersionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model to filter inference jobs by. When specified, only inference jobs that used this specific version of the trained model are returned.

    ", + "location":"querystring", + "locationName":"trainedModelVersionIdentifier" } } }, @@ -4655,6 +4862,12 @@ "documentation":"

    The Amazon Resource Name (ARN) of a trained model that was used to create the trained model inference jobs that you are interested in.

    ", "location":"querystring", "locationName":"trainedModelArn" + }, + "trainedModelVersionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model to filter inference jobs by. When specified, only inference jobs that used this specific version of the trained model are returned.

    ", + "location":"querystring", + "locationName":"trainedModelVersionIdentifier" } } }, @@ -4672,6 +4885,59 @@ } } }, + "ListTrainedModelVersionsRequest":{ + "type":"structure", + "required":[ + "membershipIdentifier", + "trainedModelArn" + ], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token from a previous ListTrainedModelVersions request. Use this token to retrieve the next page of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of trained model versions to return in a single page. The default value is 10, and the maximum value is 100.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "membershipIdentifier":{ + "shape":"UUID", + "documentation":"

    The membership identifier for the collaboration that contains the trained model.

    ", + "location":"uri", + "locationName":"membershipIdentifier" + }, + "trainedModelArn":{ + "shape":"TrainedModelArn", + "documentation":"

    The Amazon Resource Name (ARN) of the trained model for which to list versions.

    ", + "location":"uri", + "locationName":"trainedModelArn" + }, + "status":{ + "shape":"TrainedModelStatus", + "documentation":"

    Filter the results to only include trained model versions with the specified status. Valid values include CREATE_PENDING, CREATE_IN_PROGRESS, ACTIVE, CREATE_FAILED, and others.

    ", + "location":"querystring", + "locationName":"status" + } + } + }, + "ListTrainedModelVersionsResponse":{ + "type":"structure", + "required":["trainedModels"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

    The pagination token to use in a subsequent ListTrainedModelVersions request to retrieve the next page of results. This value is null when there are no more results to return.

    " + }, + "trainedModels":{ + "shape":"TrainedModelList", + "documentation":"

    A list of trained model versions that match the specified criteria. Each entry contains summary information about a trained model version, including its version identifier, status, and creation details.

    " + } + } + }, "ListTrainedModelsRequest":{ "type":"structure", "required":["membershipIdentifier"], @@ -4971,6 +5237,10 @@ "channelName":{ "shape":"ModelTrainingDataChannelName", "documentation":"

    The name of the training data channel.

    " + }, + "s3DataDistributionType":{ + "shape":"S3DataDistributionType", + "documentation":"

    Specifies how the training data stored in Amazon S3 should be distributed to training instances. This parameter controls the data distribution strategy for the training job:

    • FullyReplicated - The entire dataset is replicated on each training instance. This is suitable for smaller datasets and algorithms that require access to the complete dataset.

    • ShardedByS3Key - The dataset is distributed across training instances based on Amazon S3 key names. This is suitable for larger datasets and distributed training scenarios where each instance processes a subset of the data.

    " } }, "documentation":"

    Information about the model training data channel. A training data channel is a named data source that the training algorithms can consume.

    " @@ -5205,7 +5475,7 @@ "ResourceConfigInstanceCountInteger":{ "type":"integer", "box":true, - "max":1, + "max":5, "min":1 }, "ResourceConfigVolumeSizeInGBInteger":{ @@ -5249,6 +5519,13 @@ }, "documentation":"

    Provides information about an Amazon S3 bucket and path.

    " }, + "S3DataDistributionType":{ + "type":"string", + "enum":[ + "FullyReplicated", + "ShardedByS3Key" + ] + }, "S3Path":{ "type":"string", "max":1285, @@ -5259,7 +5536,15 @@ "type":"structure", "required":["message"], "members":{ - "message":{"shape":"String"} + "message":{"shape":"String"}, + "quotaName":{ + "shape":"String", + "documentation":"

    The name of the service quota limit that was exceeded

    " + }, + "quotaValue":{ + "shape":"ServiceQuotaExceededExceptionQuotaValueDouble", + "documentation":"

    The current limit on the service quota that was exceeded

    " + } }, "documentation":"

    You have exceeded your service quota.

    ", "error":{ @@ -5268,6 +5553,12 @@ }, "exception":true }, + "ServiceQuotaExceededExceptionQuotaValueDouble":{ + "type":"double", + "box":true, + "max":100000, + "min":0 + }, "SharedAudienceMetrics":{ "type":"string", "enum":[ @@ -5365,6 +5656,10 @@ "location":"uri", "locationName":"trainedModelArn" }, + "trainedModelVersionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model to export. This specifies which version of the trained model should be exported to the specified destination.

    " + }, "membershipIdentifier":{ "shape":"UUID", "documentation":"

    The membership ID of the member that is receiving the exported trained model artifacts.

    ", @@ -5406,6 +5701,10 @@ "shape":"TrainedModelArn", "documentation":"

    The Amazon Resource Name (ARN) of the trained model that is used for this trained model inference job.

    " }, + "trainedModelVersionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model to use for inference. This specifies which version of the trained model should be used to generate predictions on the input data.

    " + }, "configuredModelAlgorithmAssociationArn":{ "shape":"ConfiguredModelAlgorithmAssociationArn", "documentation":"

    The Amazon Resource Name (ARN) of the configured model algorithm association that is used for this trained model inference job.

    " @@ -5547,13 +5846,54 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:(membership/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/(configured-model-algorithm-association|trained-model|trained-model-inference-job|ml-input-channel)|training-dataset|audience-model|configured-audience-model|audience-generation-job|configured-model-algorithm|configured-model-algorithm-association|trained-model|trained-model-inference-job)/[-a-zA-Z0-9_/.]+" + "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:((membership/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/(configured-model-algorithm-association|trained-model|trained-model-inference-job|ml-input-channel))|training-dataset|audience-model|configured-audience-model|audience-generation-job|configured-model-algorithm)/[-a-zA-Z0-9_/.]+" + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

    The request was denied due to request throttling.

    ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true }, "TrainedModelArn":{ "type":"string", "max":2048, "min":20, - "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:(membership/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/)?trained-model/[-a-zA-Z0-9_/.]+" + "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:membership/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/trained-model/[-a-zA-Z0-9_/.]+" + }, + "TrainedModelArtifactMaxSize":{ + "type":"structure", + "required":[ + "unit", + "value" + ], + "members":{ + "unit":{ + "shape":"TrainedModelArtifactMaxSizeUnitType", + "documentation":"

    The unit of measurement for the maximum artifact size. Valid values include common storage units such as bytes, kilobytes, megabytes, gigabytes, and terabytes.

    " + }, + "value":{ + "shape":"TrainedModelArtifactMaxSizeValue", + "documentation":"

    The numerical value for the maximum artifact size limit. This value is interpreted according to the specified unit.

    " + } + }, + "documentation":"

    Specifies the maximum size limit for trained model artifacts. This configuration helps control storage costs and ensures that trained models don't exceed specified size constraints. The size limit applies to the total size of all artifacts produced by the training job.

    " + }, + "TrainedModelArtifactMaxSizeUnitType":{ + "type":"string", + "enum":["GB"] + }, + "TrainedModelArtifactMaxSizeValue":{ + "type":"double", + "box":true, + "max":10.0, + "min":0.01 }, "TrainedModelExportFileType":{ "type":"string", @@ -5655,7 +5995,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:(membership/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/)?trained-model-inference-job/[-a-zA-Z0-9_/.]+" + "pattern":"arn:aws[-a-z]*:cleanrooms-ml:[-a-z0-9]+:[0-9]{12}:membership/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/trained-model-inference-job/[-a-zA-Z0-9_/.]+" }, "TrainedModelInferenceJobList":{ "type":"list", @@ -5704,6 +6044,10 @@ "shape":"TrainedModelArn", "documentation":"

    The Amazon Resource Name (ARN) of the trained model that is used for the trained model inference job.

    " }, + "trainedModelVersionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of the trained model that was used for inference in this job.

    " + }, "collaborationIdentifier":{ "shape":"UUID", "documentation":"

    The collaboration ID of the collaboration that contains the trained model inference job.

    " @@ -5790,7 +6134,7 @@ "TrainedModelInferenceMaxOutputSizeValue":{ "type":"double", "box":true, - "max":10.0, + "max":50.0, "min":0.01 }, "TrainedModelList":{ @@ -5838,6 +6182,14 @@ "shape":"TrainedModelArn", "documentation":"

    The Amazon Resource Name (ARN) of the trained model.

    " }, + "versionIdentifier":{ + "shape":"UUID", + "documentation":"

    The version identifier of this trained model version.

    " + }, + "incrementalTrainingDataChannels":{ + "shape":"IncrementalTrainingDataChannelsOutput", + "documentation":"

    Information about the incremental training data channels used to create this version of the trained model.

    " + }, "name":{ "shape":"NameString", "documentation":"

    The name of the trained model.

    " @@ -5875,6 +6227,10 @@ "containerMetrics":{ "shape":"MetricsConfigurationPolicy", "documentation":"

    The container for the metrics of the trained model.

    " + }, + "maxArtifactSize":{ + "shape":"TrainedModelArtifactMaxSize", + "documentation":"

    The maximum size limit for trained model artifacts as defined in the configuration policy. This setting helps enforce consistent size limits across trained models in the collaboration.

    " } }, "documentation":"

    The configuration policy for the trained models.

    " @@ -5930,6 +6286,14 @@ }, "documentation":"

    Provides information about the training dataset.

    " }, + "TrainingInputMode":{ + "type":"string", + "enum":[ + "File", + "FastFile", + "Pipe" + ] + }, "UUID":{ "type":"string", "max":36, diff --git a/services/cloud9/pom.xml b/services/cloud9/pom.xml index 1bbd4ce081b8..56c7c7dfe540 100644 --- a/services/cloud9/pom.xml +++ b/services/cloud9/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 cloud9 diff --git a/services/cloud9/src/main/resources/codegen-resources/customization.config b/services/cloud9/src/main/resources/codegen-resources/customization.config index 13aee22d7a5d..8255336b5f6b 100644 --- a/services/cloud9/src/main/resources/codegen-resources/customization.config +++ b/services/cloud9/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "describeEnvironmentMemberships", "listEnvironments" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cloudcontrol/pom.xml b/services/cloudcontrol/pom.xml index 8b0558d1f027..abdf73cdfea3 100644 --- a/services/cloudcontrol/pom.xml +++ b/services/cloudcontrol/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudcontrol AWS Java SDK :: Services :: Cloud Control diff --git a/services/cloudcontrol/src/main/resources/codegen-resources/customization.config b/services/cloudcontrol/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/cloudcontrol/src/main/resources/codegen-resources/customization.config +++ b/services/cloudcontrol/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/clouddirectory/pom.xml b/services/clouddirectory/pom.xml index 4c5b9f1be685..bb4ccac3cf45 100644 --- a/services/clouddirectory/pom.xml +++ b/services/clouddirectory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT clouddirectory AWS Java SDK :: Services :: Amazon CloudDirectory diff --git a/services/clouddirectory/src/main/resources/codegen-resources/customization.config b/services/clouddirectory/src/main/resources/codegen-resources/customization.config index 44a2fcefcbb4..45b9f344391b 100644 --- a/services/clouddirectory/src/main/resources/codegen-resources/customization.config +++ b/services/clouddirectory/src/main/resources/codegen-resources/customization.config @@ -10,6 +10,5 @@ "union": true } }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cloudformation/pom.xml b/services/cloudformation/pom.xml index 070743baee77..a4f30e58daf9 100644 --- a/services/cloudformation/pom.xml +++ b/services/cloudformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudformation AWS Java SDK :: Services :: AWS CloudFormation diff --git a/services/cloudformation/src/main/resources/codegen-resources/service-2.json b/services/cloudformation/src/main/resources/codegen-resources/service-2.json index 297ec54c0184..bf0bd11f3ee3 100644 --- a/services/cloudformation/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudformation/src/main/resources/codegen-resources/service-2.json @@ -167,7 +167,7 @@ {"shape":"InvalidOperationException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Creates stack instances for the specified accounts, within the specified Amazon Web Services Regions. A stack instance refers to a stack in a specific account and Region. You must specify at least one value for either Accounts or DeploymentTargets, and you must specify at least one value for Regions.

    " + "documentation":"

    Creates stack instances for the specified accounts, within the specified Amazon Web Services Regions. A stack instance refers to a stack in a specific account and Region. You must specify at least one value for either Accounts or DeploymentTargets, and you must specify at least one value for Regions.

    The maximum number of organizational unit (OUs) supported by a CreateStackInstances operation is 50.

    If you need more than 50, consider the following options:

    • Batch processing: If you don't want to expose your OU hierarchy, split up the operations into multiple calls with less than 50 OUs each.

    • Parent OU strategy: If you don't mind exposing the OU hierarchy, target a parent OU that contains all desired child OUs.

    " }, "CreateStackRefactor":{ "name":"CreateStackRefactor", @@ -294,7 +294,7 @@ {"shape":"StaleRequestException"}, {"shape":"InvalidOperationException"} ], - "documentation":"

    Deletes stack instances for the specified accounts, in the specified Amazon Web Services Regions.

    " + "documentation":"

    Deletes stack instances for the specified accounts, in the specified Amazon Web Services Regions.

    The maximum number of organizational unit (OUs) supported by a DeleteStackInstances operation is 50.

    If you need more than 50, consider the following options:

    • Batch processing: If you don't want to expose your OU hierarchy, split up the operations into multiple calls with less than 50 OUs each.

    • Parent OU strategy: If you don't mind exposing the OU hierarchy, target a parent OU that contains all desired child OUs.

    " }, "DeleteStackSet":{ "name":"DeleteStackSet", @@ -554,7 +554,7 @@ "errors":[ {"shape":"StackSetNotFoundException"} ], - "documentation":"

    Returns the description of the specified StackSet.

    " + "documentation":"

    Returns the description of the specified StackSet.

    This API provides strongly consistent reads meaning it will always return the most up-to-date data.

    " }, "DescribeStackSetOperation":{ "name":"DescribeStackSetOperation", @@ -571,7 +571,7 @@ {"shape":"StackSetNotFoundException"}, {"shape":"OperationNotFoundException"} ], - "documentation":"

    Returns the description of the specified StackSet operation.

    " + "documentation":"

    Returns the description of the specified StackSet operation.

    This API provides strongly consistent reads meaning it will always return the most up-to-date data.

    " }, "DescribeStacks":{ "name":"DescribeStacks", @@ -1008,7 +1008,7 @@ {"shape":"StackSetNotFoundException"}, {"shape":"OperationNotFoundException"} ], - "documentation":"

    Returns summary information about the results of a stack set operation.

    " + "documentation":"

    Returns summary information about the results of a stack set operation.

    This API provides eventually consistent reads meaning it may take some time but will eventually return the most up-to-date data.

    " }, "ListStackSetOperations":{ "name":"ListStackSetOperations", @@ -1024,7 +1024,7 @@ "errors":[ {"shape":"StackSetNotFoundException"} ], - "documentation":"

    Returns summary information about operations performed on a stack set.

    " + "documentation":"

    Returns summary information about operations performed on a stack set.

    This API provides eventually consistent reads meaning it may take some time but will eventually return the most up-to-date data.

    " }, "ListStackSets":{ "name":"ListStackSets", @@ -1037,7 +1037,7 @@ "shape":"ListStackSetsOutput", "resultWrapper":"ListStackSetsResult" }, - "documentation":"

    Returns summary information about stack sets that are associated with the user.

    • [Self-managed permissions] If you set the CallAs parameter to SELF while signed in to your Amazon Web Services account, ListStackSets returns all self-managed stack sets in your Amazon Web Services account.

    • [Service-managed permissions] If you set the CallAs parameter to SELF while signed in to the organization's management account, ListStackSets returns all stack sets in the management account.

    • [Service-managed permissions] If you set the CallAs parameter to DELEGATED_ADMIN while signed in to your member account, ListStackSets returns all stack sets with service-managed permissions in the management account.

    " + "documentation":"

    Returns summary information about stack sets that are associated with the user.

    This API provides strongly consistent reads meaning it will always return the most up-to-date data.

    • [Self-managed permissions] If you set the CallAs parameter to SELF while signed in to your Amazon Web Services account, ListStackSets returns all self-managed stack sets in your Amazon Web Services account.

    • [Service-managed permissions] If you set the CallAs parameter to SELF while signed in to the organization's management account, ListStackSets returns all stack sets in the management account.

    • [Service-managed permissions] If you set the CallAs parameter to DELEGATED_ADMIN while signed in to your member account, ListStackSets returns all stack sets with service-managed permissions in the management account.

    " }, "ListStacks":{ "name":"ListStacks", @@ -1349,7 +1349,7 @@ {"shape":"StaleRequestException"}, {"shape":"InvalidOperationException"} ], - "documentation":"

    Updates the parameter values for stack instances for the specified accounts, within the specified Amazon Web Services Regions. A stack instance refers to a stack in a specific account and Region.

    You can only update stack instances in Amazon Web Services Regions and accounts where they already exist; to create additional stack instances, use CreateStackInstances.

    During stack set updates, any parameters overridden for a stack instance aren't updated, but retain their overridden value.

    You can only update the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances.

    " + "documentation":"

    Updates the parameter values for stack instances for the specified accounts, within the specified Amazon Web Services Regions. A stack instance refers to a stack in a specific account and Region.

    You can only update stack instances in Amazon Web Services Regions and accounts where they already exist; to create additional stack instances, use CreateStackInstances.

    During stack set updates, any parameters overridden for a stack instance aren't updated, but retain their overridden value.

    You can only update the parameter values that are specified in the stack set; to add or delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and parameter value specified in the stack set. Once a stack instance has been updated with the new parameter, you can then override the parameter value using UpdateStackInstances.

    The maximum number of organizational unit (OUs) supported by a UpdateStackInstances operation is 50.

    If you need more than 50, consider the following options:

    • Batch processing: If you don't want to expose your OU hierarchy, split up the operations into multiple calls with less than 50 OUs each.

    • Parent OU strategy: If you don't mind exposing the OU hierarchy, target a parent OU that contains all desired child OUs.

    " }, "UpdateStackSet":{ "name":"UpdateStackSet", @@ -1370,7 +1370,7 @@ {"shape":"InvalidOperationException"}, {"shape":"StackInstanceNotFoundException"} ], - "documentation":"

    Updates the stack set and associated stack instances in the specified accounts and Amazon Web Services Regions.

    Even if the stack set operation created by updating the stack set fails (completely or partially, below or above a specified failure tolerance), the stack set is updated with your changes. Subsequent CreateStackInstances calls on the specified stack set use the updated stack set.

    " + "documentation":"

    Updates the stack set and associated stack instances in the specified accounts and Amazon Web Services Regions.

    Even if the stack set operation created by updating the stack set fails (completely or partially, below or above a specified failure tolerance), the stack set is updated with your changes. Subsequent CreateStackInstances calls on the specified stack set use the updated stack set.

    The maximum number of organizational unit (OUs) supported by a UpdateStackSet operation is 50.

    If you need more than 50, consider the following options:

    • Batch processing: If you don't want to expose your OU hierarchy, split up the operations into multiple calls with less than 50 OUs each.

    • Parent OU strategy: If you don't mind exposing the OU hierarchy, target a parent OU that contains all desired child OUs.

    " }, "UpdateTerminationProtection":{ "name":"UpdateTerminationProtection", @@ -1467,13 +1467,11 @@ }, "ActivateOrganizationsAccessInput":{ "type":"structure", - "members":{ - } + "members":{} }, "ActivateOrganizationsAccessOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "ActivateTypeInput":{ "type":"structure", @@ -1538,8 +1536,7 @@ }, "AlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The resource with the name requested already exists.

    ", "error":{ "code":"AlreadyExistsException", @@ -1816,8 +1813,7 @@ }, "ChangeSetNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified change set name or ID doesn't exit. To view valid change sets for a stack, use the ListChangeSets operation.

    ", "error":{ "code":"ChangeSetNotFound", @@ -1948,8 +1944,7 @@ }, "ConcurrentResourcesLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    No more than 5 generated templates can be in an InProgress or Pending status at one time. This error is also returned if a generated template that is in an InProgress or Pending status is attempted to be updated or deleted.

    ", "error":{ "code":"ConcurrentResourcesLimitExceeded", @@ -1995,8 +1990,7 @@ }, "ContinueUpdateRollbackOutput":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The output for a ContinueUpdateRollback operation.

    " }, "CreateChangeSetInput":{ @@ -2016,7 +2010,7 @@ }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

    The URL of the file that contains the revised template. The URL must point to a template (max size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. CloudFormation generates the change set by comparing this template with the stack that you specified. The location for an Amazon S3 bucket must start with https://.

    Conditional: You must specify only TemplateBody or TemplateURL.

    " + "documentation":"

    The URL of the file that contains the revised template. The URL must point to a template (max size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. CloudFormation generates the change set by comparing this template with the stack that you specified. The location for an Amazon S3 bucket must start with https://. URLs from S3 static websites are not supported.

    Conditional: You must specify only TemplateBody or TemplateURL.

    " }, "UsePreviousTemplate":{ "shape":"UsePreviousTemplate", @@ -2028,7 +2022,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

    In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

    • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

      Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

      The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

      • If you have IAM resources, you can specify either capability.

      • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

      • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

      If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.

      For more information, see Acknowledging IAM resources in CloudFormation templates.

    • CAPABILITY_AUTO_EXPAND

      Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

      This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect.

      If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.

      For more information about macros, see Perform custom processing on CloudFormation templates with template macros.

    Only one of the Capabilities and ResourceType parameters can be specified.

    " + "documentation":"

    In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

    • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

      Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

      The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

      • If you have IAM resources, you can specify either capability.

      • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

      • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

      If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.

      For more information, see Acknowledging IAM resources in CloudFormation templates.

    • CAPABILITY_AUTO_EXPAND

      Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

      This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect.

      If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability.

      For more information about macros, see Perform custom processing on CloudFormation templates with template macros.

    Only one of the Capabilities and ResourceType parameters can be specified.

    " }, "ResourceTypes":{ "shape":"ResourceTypes", @@ -2080,7 +2074,7 @@ }, "ImportExistingResources":{ "shape":"ImportExistingResources", - "documentation":"

    Indicates if the change set imports resources that already exist.

    This parameter can only import resources that have custom names in templates. For more information, see name type in the CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead. For more information, see Import Amazon Web Services resources into a CloudFormation stack with a resource import in the CloudFormation User Guide.

    " + "documentation":"

    Indicates if the change set auto-imports resources that already exist. For more information, see Import Amazon Web Services resources into a CloudFormation stack automatically in the CloudFormation User Guide.

    This parameter can only import resources that have custom names in templates. For more information, see name type in the CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances, use the ResourcesToImport parameter instead.

    " } }, "documentation":"

    The input for the CreateChangeSet action.

    " @@ -2140,11 +2134,11 @@ }, "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    " + "documentation":"

    Structure that contains the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.

    Conditional: You must specify either TemplateBody or TemplateURL, but not both.

    " }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

    The URL of a file containing the template body. The URL must point to a template (max size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    " + "documentation":"

    The URL of a file that contains the template body. The URL must point to a template (max size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. URLs from S3 static websites are not supported.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    " }, "Parameters":{ "shape":"Parameters", @@ -2168,7 +2162,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

    In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

    • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

      Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

      The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

      • If you have IAM resources, you can specify either capability.

      • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

      • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

      If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

      For more information, see Acknowledging IAM resources in CloudFormation templates.

    • CAPABILITY_AUTO_EXPAND

      Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

      If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.

      You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.

      Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.

      For more information, see Perform custom processing on CloudFormation templates with template macros.

    Only one of the Capabilities and ResourceType parameters can be specified.

    " + "documentation":"

    In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack.

    • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

      Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

      The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

      • If you have IAM resources, you can specify either capability.

      • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

      • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

      If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

      For more information, see Acknowledging IAM resources in CloudFormation templates.

    • CAPABILITY_AUTO_EXPAND

      Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

      If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability.

      You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs.

      Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.

      For more information, see Perform custom processing on CloudFormation templates with template macros.

    Only one of the Capabilities and ResourceType parameters can be specified.

    " }, "ResourceTypes":{ "shape":"ResourceTypes", @@ -2184,11 +2178,11 @@ }, "StackPolicyBody":{ "shape":"StackPolicyBody", - "documentation":"

    Structure containing the stack policy body. For more information, see Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    " + "documentation":"

    Structure that contains the stack policy body. For more information, see Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    " }, "StackPolicyURL":{ "shape":"StackPolicyURL", - "documentation":"

    Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with https://. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    " + "documentation":"

    Location of a file that contains the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with https://. URLs from S3 static websites are not supported.

    You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    " }, "Tags":{ "shape":"Tags", @@ -2316,11 +2310,11 @@ }, "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

    The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    " + "documentation":"

    The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    " }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

    The URL of a file that contains the template body. The URL must point to a template (maximum size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    " + "documentation":"

    The URL of a file that contains the template body. The URL must point to a template (maximum size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. S3 static website URLs are not supported.

    Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

    " }, "StackId":{ "shape":"StackId", @@ -2332,7 +2326,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

    In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances.

    • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

      Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities.

      The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

      • If you have IAM resources, you can specify either capability.

      • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

      • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

      If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

      For more information, see Acknowledging IAM resources in CloudFormation templates.

    • CAPABILITY_AUTO_EXPAND

      Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Perform custom processing on CloudFormation templates with template macros.

      Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.

    " + "documentation":"

    In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances.

    • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

      Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities.

      The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

      • If you have IAM resources, you can specify either capability.

      • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

      • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

      If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

      For more information, see Acknowledging IAM resources in CloudFormation templates.

    • CAPABILITY_AUTO_EXPAND

      Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Perform custom processing on CloudFormation templates with template macros.

      Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.

    " }, "Tags":{ "shape":"Tags", @@ -2380,8 +2374,7 @@ }, "CreatedButModifiedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified resource exists, but has been changed.

    ", "error":{ "code":"CreatedButModifiedException", @@ -2393,13 +2386,11 @@ "CreationTime":{"type":"timestamp"}, "DeactivateOrganizationsAccessInput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeactivateOrganizationsAccessOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeactivateTypeInput":{ "type":"structure", @@ -2420,8 +2411,7 @@ }, "DeactivateTypeOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteChangeSetInput":{ "type":"structure", @@ -2440,8 +2430,7 @@ }, "DeleteChangeSetOutput":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The output for the DeleteChangeSet action.

    " }, "DeleteGeneratedTemplateInput":{ @@ -2549,8 +2538,7 @@ }, "DeleteStackSetOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeletionMode":{ "type":"string", @@ -2612,8 +2600,7 @@ }, "DeregisterTypeOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeAccountLimitsInput":{ "type":"structure", @@ -2802,7 +2789,7 @@ }, "ImportExistingResources":{ "shape":"ImportExistingResources", - "documentation":"

    Indicates if the change set imports resources that already exist.

    This parameter can only import resources that have custom names in templates. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead.

    " + "documentation":"

    Indicates if the change set imports resources that already exist.

    This parameter can only import resources that have custom names in templates. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead.

    " } }, "documentation":"

    The output for the DescribeChangeSet action.

    " @@ -2998,7 +2985,7 @@ }, "StackDriftStatus":{ "shape":"StackDriftStatus", - "documentation":"

    Status of the stack's actual configuration compared to its expected configuration.

    • DRIFTED: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.

    • NOT_CHECKED: CloudFormation hasn't checked if the stack differs from its expected template configuration.

    • IN_SYNC: The stack's actual configuration matches its expected template configuration.

    • UNKNOWN: This value is reserved for future use.

    " + "documentation":"

    Status of the stack's actual configuration compared to its expected configuration.

    • DRIFTED: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.

    • NOT_CHECKED: CloudFormation hasn't checked if the stack differs from its expected template configuration.

    • IN_SYNC: The stack's actual configuration matches its expected template configuration.

    • UNKNOWN: CloudFormation could not run drift detection for a resource in the stack. See the DetectionStatusReason for details.

    " }, "DetectionStatus":{ "shape":"StackDriftDetectionStatus", @@ -3134,7 +3121,7 @@ }, "StackResourceDriftStatusFilters":{ "shape":"StackResourceDriftStatusFilters", - "documentation":"

    The resource drift status values to use as filters for the resource drift results returned.

    • DELETED: The resource differs from its expected template configuration in that the resource has been deleted.

    • MODIFIED: One or more resource properties differ from their expected template values.

    • IN_SYNC: The resource's actual configuration matches its expected template configuration.

    • NOT_CHECKED: CloudFormation doesn't currently return this value.

    " + "documentation":"

    The resource drift status values to use as filters for the resource drift results returned.

    • DELETED: The resource differs from its expected template configuration in that the resource has been deleted.

    • MODIFIED: One or more resource properties differ from their expected template values.

    • IN_SYNC: The resource's actual configuration matches its expected template configuration.

    • NOT_CHECKED: CloudFormation doesn't currently return this value.

    • UNKNOWN: CloudFormation could not run drift detection for the resource.

    " }, "NextToken":{ "shape":"NextToken", @@ -3183,7 +3170,7 @@ "members":{ "StackResourceDetail":{ "shape":"StackResourceDetail", - "documentation":"

    A StackResourceDetail structure containing the description of the specified resource in the specified stack.

    " + "documentation":"

    A StackResourceDetail structure that contains the description of the specified resource in the specified stack.

    " } }, "documentation":"

    The output for a DescribeStackResource action.

    " @@ -3596,11 +3583,11 @@ "members":{ "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.

    Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used.

    " + "documentation":"

    Structure that contains the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.

    Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used.

    " }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

    The URL of a file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://.

    Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

    " + "documentation":"

    The URL of a file that contains the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. URLs from S3 static websites are not supported.

    Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

    " }, "Parameters":{ "shape":"Parameters", @@ -3656,8 +3643,7 @@ }, "ExecuteChangeSetOutput":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The output for the ExecuteChangeSet action.

    " }, "ExecuteStackRefactorInput":{ @@ -3744,8 +3730,7 @@ }, "GeneratedTemplateNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The generated template was not found.

    ", "error":{ "code":"GeneratedTemplateNotFound", @@ -3826,7 +3811,7 @@ "members":{ "StackPolicyBody":{ "shape":"StackPolicyBody", - "documentation":"

    Structure containing the stack policy body. (For more information, see Prevent updates to stack resources in the CloudFormation User Guide.)

    " + "documentation":"

    Structure that contains the stack policy body. (For more information, see Prevent updates to stack resources in the CloudFormation User Guide.)

    " } }, "documentation":"

    The output for the GetStackPolicy action.

    " @@ -3854,7 +3839,7 @@ "members":{ "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

    Structure containing the template body.

    CloudFormation returns the same template that was used when the stack was created.

    " + "documentation":"

    Structure that contains the template body.

    CloudFormation returns the same template that was used when the stack was created.

    " }, "StagesAvailable":{ "shape":"StageList", @@ -3868,11 +3853,11 @@ "members":{ "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.

    Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL.

    " + "documentation":"

    Structure that contains the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.

    Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL.

    " }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

    The URL of a file containing the template body. The URL must point to a template (max size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://.

    Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL.

    " + "documentation":"

    The URL of a file that contains the template body. The URL must point to a template (max size: 1 MB) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://.

    Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL.

    " }, "StackName":{ "shape":"StackNameOrId", @@ -3934,7 +3919,7 @@ }, "Warnings":{ "shape":"Warnings", - "documentation":"

    An object containing any warnings returned.

    " + "documentation":"

    An object that contains any warnings returned.

    " } }, "documentation":"

    The output for the GetTemplateSummary action.

    " @@ -3987,8 +3972,7 @@ }, "HookResultNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified target doesn't have any requested Hook invocations.

    ", "error":{ "code":"HookResultNotFound", @@ -4150,8 +4134,7 @@ "IncludePropertyValues":{"type":"boolean"}, "InsufficientCapabilitiesException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The template contains resources with capabilities that weren't specified in the Capabilities parameter.

    ", "error":{ "code":"InsufficientCapabilitiesException", @@ -4162,8 +4145,7 @@ }, "InvalidChangeSetStatusException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified change set can't be used to update the stack. For example, the change set status might be CREATE_IN_PROGRESS, or the stack status might be UPDATE_IN_PROGRESS.

    ", "error":{ "code":"InvalidChangeSetStatus", @@ -4174,8 +4156,7 @@ }, "InvalidOperationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified operation isn't valid.

    ", "error":{ "code":"InvalidOperationException", @@ -4186,8 +4167,7 @@ }, "InvalidStateTransitionException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Error reserved for use by the CloudFormation CLI. CloudFormation doesn't return this error to users.

    ", "error":{ "code":"InvalidStateTransition", @@ -4216,8 +4196,7 @@ "LastUpdatedTime":{"type":"timestamp"}, "LimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The quota for the resource has already been reached.

    For information about resource and stack limitations, see CloudFormation quotas in the CloudFormation User Guide.

    ", "error":{ "code":"LimitExceededException", @@ -4862,7 +4841,7 @@ "members":{ "StackSummaries":{ "shape":"StackSummaries", - "documentation":"

    A list of StackSummary structures containing information about the specified stacks.

    " + "documentation":"

    A list of StackSummary structures that contains information about the specified stacks.

    " }, "NextToken":{ "shape":"NextToken", @@ -5074,11 +5053,11 @@ "members":{ "TypeHierarchy":{ "shape":"TypeHierarchy", - "documentation":"

    A concatenated list of the module type or types containing the resource. Module types are listed starting with the inner-most nested module, and separated by /.

    In the following example, the resource was created from a module of type AWS::First::Example::MODULE, that's nested inside a parent module of type AWS::Second::Example::MODULE.

    AWS::First::Example::MODULE/AWS::Second::Example::MODULE

    " + "documentation":"

    A concatenated list of the module type or types that contains the resource. Module types are listed starting with the inner-most nested module, and separated by /.

    In the following example, the resource was created from a module of type AWS::First::Example::MODULE, that's nested inside a parent module of type AWS::Second::Example::MODULE.

    AWS::First::Example::MODULE/AWS::Second::Example::MODULE

    " }, "LogicalIdHierarchy":{ "shape":"LogicalIdHierarchy", - "documentation":"

    A concatenated list of the logical IDs of the module or modules containing the resource. Modules are listed starting with the inner-most nested module, and separated by /.

    In the following example, the resource was created from a module, moduleA, that's nested inside a parent module, moduleB.

    moduleA/moduleB

    For more information, see Reference module resources in CloudFormation templates in the CloudFormation User Guide.

    " + "documentation":"

    A concatenated list of the logical IDs of the module or modules that contains the resource. Modules are listed starting with the inner-most nested module, and separated by /.

    In the following example, the resource was created from a module, moduleA, that's nested inside a parent module, moduleB.

    moduleA/moduleB

    For more information, see Reference module resources in CloudFormation templates in the CloudFormation User Guide.

    " } }, "documentation":"

    Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

    For more information about modules, see Create reusable resource configurations that can be included across templates with CloudFormation modules in the CloudFormation User Guide.

    " @@ -5090,8 +5069,7 @@ }, "NameAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified name is already in use.

    ", "error":{ "code":"NameAlreadyExistsException", @@ -5134,8 +5112,7 @@ }, "OperationIdAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified operation ID already exists.

    ", "error":{ "code":"OperationIdAlreadyExistsException", @@ -5146,8 +5123,7 @@ }, "OperationInProgressException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Another operation is currently in progress for this stack set. Only one operation can be performed for a stack set at a given time.

    ", "error":{ "code":"OperationInProgressException", @@ -5158,8 +5134,7 @@ }, "OperationNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified ID refers to an operation that doesn't exist.

    ", "error":{ "code":"OperationNotFoundException", @@ -5208,8 +5183,7 @@ }, "OperationStatusCheckFailedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Error reserved for use by the CloudFormation CLI. CloudFormation doesn't return this error to users.

    ", "error":{ "code":"ConditionalCheckFailed", @@ -5531,8 +5505,7 @@ }, "RecordHandlerProgressOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "RefreshAllResources":{"type":"boolean"}, "Region":{ @@ -5589,7 +5562,7 @@ }, "SchemaHandlerPackage":{ "shape":"S3Url", - "documentation":"

    A URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register.

    For information about generating a schema handler package for the extension you want to register, see submit in the CloudFormation Command Line Interface (CLI) User Guide.

    The user registering the extension must be able to access the package in the S3 bucket. That's, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Identity and Access Management User Guide.

    " + "documentation":"

    A URL to the S3 bucket that contains the extension project package that contains the necessary files for the extension you want to register.

    For information about generating a schema handler package for the extension you want to register, see submit in the CloudFormation Command Line Interface (CLI) User Guide.

    The user registering the extension must be able to access the package in the S3 bucket. That's, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Identity and Access Management User Guide.

    " }, "LoggingConfig":{ "shape":"LoggingConfig", @@ -5750,11 +5723,11 @@ }, "BeforeContext":{ "shape":"BeforeContext", - "documentation":"

    An encoded JSON string containing the context of the resource before the change is executed.

    " + "documentation":"

    An encoded JSON string that contains the context of the resource before the change is executed.

    " }, "AfterContext":{ "shape":"AfterContext", - "documentation":"

    An encoded JSON string containing the context of the resource after the change is executed.

    " + "documentation":"

    An encoded JSON string that contains the context of the resource after the change is executed.

    " } }, "documentation":"

    The ResourceChange structure describes the resource and the action that CloudFormation will perform on it if you execute this change set.

    " @@ -5943,8 +5916,7 @@ "ResourceScanId":{"type":"string"}, "ResourceScanInProgressException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A resource scan is currently in progress. Only one can be run at a time for an account in a Region.

    ", "error":{ "code":"ResourceScanInProgress", @@ -5955,8 +5927,7 @@ }, "ResourceScanLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The limit on resource scans has been exceeded. Reasons include:

    • Exceeded the daily quota for resource scans.

    • A resource scan recently failed. You must wait 10 minutes before starting a new resource scan.

    • The last resource scan failed after exceeding 100,000 resources. When this happens, you must wait 24 hours before starting a new resource scan.

    ", "error":{ "code":"ResourceScanLimitExceeded", @@ -5967,8 +5938,7 @@ }, "ResourceScanNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The resource scan was not found.

    ", "error":{ "code":"ResourceScanNotFound", @@ -6214,7 +6184,7 @@ "documentation":"

    The amount of time, in minutes, during which CloudFormation should monitor all the rollback triggers after the stack creation or update operation deploys all necessary resources.

    The default is 0 minutes.

    If you specify a monitoring period but don't specify any rollback triggers, CloudFormation still waits the specified period of time before cleaning up old resources after update operations. You can use this monitoring period to perform any manual stack validation desired, and manually cancel the stack creation or update (using CancelUpdateStack, for example) as necessary.

    If you specify 0 for this parameter, CloudFormation still monitors the specified rollback triggers during stack creation and update operations. Then, for update operations, it begins disposing of old resources immediately once the operation completes.

    " } }, - "documentation":"

    Structure containing the rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

    Rollback triggers enable you to have CloudFormation monitor the state of your application during stack creation and updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've specified. For more information, see Roll back your CloudFormation stack on alarm breach with rollback triggers.

    " + "documentation":"

    Structure that contains the rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

    Rollback triggers enable you to have CloudFormation monitor the state of your application during stack creation and updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've specified. For more information, see Roll back your CloudFormation stack on alarm breach with rollback triggers.

    " }, "RollbackStackInput":{ "type":"structure", @@ -6260,7 +6230,7 @@ }, "Type":{ "shape":"Type", - "documentation":"

    The resource type of the rollback trigger. Specify either AWS::CloudWatch::Alarm or AWS::CloudWatch::CompositeAlarm resource types.

    " + "documentation":"

    The resource type of the rollback trigger. Specify either AWS::CloudWatch::Alarm or AWS::CloudWatch::CompositeAlarm resource types.

    " } }, "documentation":"

    A rollback trigger CloudFormation monitors during creation and updating of stacks. If any of the alarms you specify goes to ALARM state during the stack operation or within the specified monitoring period afterwards, CloudFormation rolls back the entire stack operation.

    " @@ -6286,7 +6256,7 @@ "members":{ "Types":{ "shape":"ResourceTypeFilters", - "documentation":"

    An array of strings where each string represents an Amazon Web Services resource type you want to scan. Each string defines the resource type using the format AWS::ServiceName::ResourceType, for example, AWS::DynamoDB::Table. For the full list of supported resource types, see the Resource type support table in the CloudFormation User Guide.

    To scan all resource types within a service, you can use a wildcard, represented by an asterisk (*). You can place a asterisk at only the end of the string, for example, AWS::S3::*.

    " + "documentation":"

    An array of strings where each string represents an Amazon Web Services resource type you want to scan. Each string defines the resource type using the format AWS::ServiceName::ResourceType, for example, AWS::DynamoDB::Table. For the full list of supported resource types, see the Resource type support table in the CloudFormation User Guide.

    To scan all resource types within a service, you can use a wildcard, represented by an asterisk (*). You can place an asterisk at only the end of the string, for example, AWS::S3::*.

    " } }, "documentation":"

    A filter that is used to specify which resource types to scan.

    " @@ -6362,11 +6332,11 @@ }, "StackPolicyBody":{ "shape":"StackPolicyBody", - "documentation":"

    Structure containing the stack policy body. For more information, see Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    " + "documentation":"

    Structure that contains the stack policy body. For more information, see Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    " }, "StackPolicyURL":{ "shape":"StackPolicyURL", - "documentation":"

    Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an Amazon S3 bucket in the same Amazon Web Services Region as the stack. The location for an Amazon S3 bucket must start with https://. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    " + "documentation":"

    Location of a file that contains the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an Amazon S3 bucket in the same Amazon Web Services Region as the stack. The location for an Amazon S3 bucket must start with https://. URLs from S3 static websites are not supported.

    You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    " } }, "documentation":"

    The input for the SetStackPolicy action.

    " @@ -6429,8 +6399,7 @@ }, "SetTypeDefaultVersionOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "SignalResourceInput":{ "type":"structure", @@ -6546,11 +6515,11 @@ }, "ParentId":{ "shape":"StackId", - "documentation":"

    For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.

    For more information, see Embed stacks within other stacks using nested stacks in the CloudFormation User Guide.

    " + "documentation":"

    For nested stacks, the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.

    For more information, see Nested stacks in the CloudFormation User Guide.

    " }, "RootId":{ "shape":"StackId", - "documentation":"

    For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.

    For more information, see Embed stacks within other stacks using nested stacks in the CloudFormation User Guide.

    " + "documentation":"

    For nested stacks, the stack ID of the top-level stack to which the nested stack ultimately belongs.

    For more information, see Nested stacks in the CloudFormation User Guide.

    " }, "DriftInformation":{ "shape":"StackDriftInformation", @@ -6613,7 +6582,7 @@ "members":{ "StackDriftStatus":{ "shape":"StackDriftStatus", - "documentation":"

    Status of the stack's actual configuration compared to its expected template configuration.

    • DRIFTED: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.

    • NOT_CHECKED: CloudFormation hasn't checked if the stack differs from its expected template configuration.

    • IN_SYNC: The stack's actual configuration matches its expected template configuration.

    • UNKNOWN: This value is reserved for future use.

    " + "documentation":"

    Status of the stack's actual configuration compared to its expected template configuration.

    • DRIFTED: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.

    • NOT_CHECKED: CloudFormation hasn't checked if the stack differs from its expected template configuration.

    • IN_SYNC: The stack's actual configuration matches its expected template configuration.

    • UNKNOWN: CloudFormation could not run drift detection for a resource in the stack.

    " }, "LastCheckTimestamp":{ "shape":"Timestamp", @@ -6628,7 +6597,7 @@ "members":{ "StackDriftStatus":{ "shape":"StackDriftStatus", - "documentation":"

    Status of the stack's actual configuration compared to its expected template configuration.

    • DRIFTED: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.

    • NOT_CHECKED: CloudFormation hasn't checked if the stack differs from its expected template configuration.

    • IN_SYNC: The stack's actual configuration matches its expected template configuration.

    • UNKNOWN: This value is reserved for future use.

    " + "documentation":"

    Status of the stack's actual configuration compared to its expected template configuration.

    • DRIFTED: The stack differs from its expected template configuration. A stack is considered to have drifted if one or more of its resources have drifted.

    • NOT_CHECKED: CloudFormation hasn't checked if the stack differs from its expected template configuration.

    • IN_SYNC: The stack's actual configuration matches its expected template configuration.

    • UNKNOWN: CloudFormation could not run drift detection for a resource in the stack.

    " }, "LastCheckTimestamp":{ "shape":"Timestamp", @@ -6721,7 +6690,7 @@ }, "DetailedStatus":{ "shape":"DetailedStatus", - "documentation":"

    An optional field containing information about the detailed status of the stack event.

    • VALIDATION_FAILED - template validation failed because of invalid properties in the template. The ResourceStatusReason field shows what properties are defined incorrectly.

    " + "documentation":"

    An optional field that contains information about the detailed status of the stack event.

    • VALIDATION_FAILED - template validation failed because of invalid properties in the template. The ResourceStatusReason field shows what properties are defined incorrectly.

    " } }, "documentation":"

    The StackEvent data type.

    " @@ -6857,8 +6826,7 @@ }, "StackInstanceNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified stack instance doesn't exist.

    ", "error":{ "code":"StackInstanceNotFoundException", @@ -6986,8 +6954,7 @@ }, "StackNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified stack ARN doesn't exist or stack doesn't exist corresponding to the ARN in input.

    ", "error":{ "code":"StackNotFoundException", @@ -7108,8 +7075,7 @@ "StackRefactorId":{"type":"string"}, "StackRefactorNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified stack refactor can't be found.

    ", "error":{ "code":"StackRefactorNotFoundException", @@ -7321,11 +7287,11 @@ }, "ExpectedProperties":{ "shape":"Properties", - "documentation":"

    A JSON structure containing the expected property values of the stack resource, as defined in the stack template and any values specified as template parameters.

    For resources whose StackResourceDriftStatus is DELETED, this structure will not be present.

    " + "documentation":"

    A JSON structure that contains the expected property values of the stack resource, as defined in the stack template and any values specified as template parameters.

    For resources whose StackResourceDriftStatus is DELETED, this structure will not be present.

    " }, "ActualProperties":{ "shape":"Properties", - "documentation":"

    A JSON structure containing the actual property values of the stack resource.

    For resources whose StackResourceDriftStatus is DELETED, this structure will not be present.

    " + "documentation":"

    A JSON structure that contains the actual property values of the stack resource.

    For resources whose StackResourceDriftStatus is DELETED, this structure will not be present.

    " }, "PropertyDifferences":{ "shape":"PropertyDifferences", @@ -7333,7 +7299,7 @@ }, "StackResourceDriftStatus":{ "shape":"StackResourceDriftStatus", - "documentation":"

    Status of the resource's actual configuration compared to its expected configuration.

    • DELETED: The resource differs from its expected template configuration because the resource has been deleted.

    • MODIFIED: One or more resource properties differ from their expected values (as defined in the stack template and any values specified as template parameters).

    • IN_SYNC: The resource's actual configuration matches its expected template configuration.

    • NOT_CHECKED: CloudFormation does not currently return this value.

    " + "documentation":"

    Status of the resource's actual configuration compared to its expected configuration.

    • DELETED: The resource differs from its expected template configuration because the resource has been deleted.

    • MODIFIED: One or more resource properties differ from their expected values (as defined in the stack template and any values specified as template parameters).

    • IN_SYNC: The resource's actual configuration matches its expected template configuration.

    • NOT_CHECKED: CloudFormation does not currently return this value.

    • UNKNOWN: CloudFormation could not run drift detection for the resource. See the DriftStatusReason for details.

    " }, "Timestamp":{ "shape":"Timestamp", @@ -7342,6 +7308,10 @@ "ModuleInfo":{ "shape":"ModuleInfo", "documentation":"

    Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template.

    " + }, + "DriftStatusReason":{ + "shape":"StackResourceDriftStatusReason", + "documentation":"

    The reason for the drift status.

    " } }, "documentation":"

    Contains the drift information for a resource that has been checked for drift. This includes actual and expected property values for resources in which CloudFormation has detected drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information, see Detect unmanaged configuration changes to stacks and resources with drift detection.

    Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resource type support for imports and drift detection.

    Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

    " @@ -7382,7 +7352,8 @@ "IN_SYNC", "MODIFIED", "DELETED", - "NOT_CHECKED" + "NOT_CHECKED", + "UNKNOWN" ] }, "StackResourceDriftStatusFilters":{ @@ -7391,6 +7362,7 @@ "max":4, "min":1 }, + "StackResourceDriftStatusReason":{"type":"string"}, "StackResourceDrifts":{ "type":"list", "member":{"shape":"StackResourceDrift"} @@ -7604,8 +7576,7 @@ }, "StackSetNotEmptyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You can't yet delete this stack set, because it still contains one or more stack instances. Delete all stack instances from the stack set before deleting the stack set.

    ", "error":{ "code":"StackSetNotEmptyException", @@ -7616,8 +7587,7 @@ }, "StackSetNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified stack set doesn't exist.

    ", "error":{ "code":"StackSetNotFoundException", @@ -7710,19 +7680,19 @@ }, "FailureToleranceCount":{ "shape":"FailureToleranceCount", - "documentation":"

    The number of accounts, per Region, for which this operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions.

    Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage (but not both).

    By default, 0 is specified.

    " + "documentation":"

    The number of accounts, per Region, for which this operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions.

    You can specify either FailureToleranceCount or FailureTolerancePercentage, but not both.

    By default, 0 is specified.

    " }, "FailureTolerancePercentage":{ "shape":"FailureTolerancePercentage", - "documentation":"

    The percentage of accounts, per Region, for which this stack operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions.

    When calculating the number of accounts based on the specified percentage, CloudFormation rounds down to the next whole number.

    Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage, but not both.

    By default, 0 is specified.

    " + "documentation":"

    The percentage of accounts, per Region, for which this stack operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions.

    When calculating the number of accounts based on the specified percentage, CloudFormation rounds down to the next whole number.

    You can specify either FailureToleranceCount or FailureTolerancePercentage, but not both.

    By default, 0 is specified.

    " }, "MaxConcurrentCount":{ "shape":"MaxConcurrentCount", - "documentation":"

    The maximum number of accounts in which to perform this operation at one time. This can depend on the value of FailureToleranceCount depending on your ConcurrencyMode. MaxConcurrentCount is at most one more than the FailureToleranceCount if you're using STRICT_FAILURE_TOLERANCE.

    Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.

    Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

    By default, 1 is specified.

    " + "documentation":"

    The maximum number of accounts in which to perform this operation at one time. This can depend on the value of FailureToleranceCount depending on your ConcurrencyMode. MaxConcurrentCount is at most one more than the FailureToleranceCount if you're using STRICT_FAILURE_TOLERANCE.

    Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.

    You can specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

    By default, 1 is specified.

    " }, "MaxConcurrentPercentage":{ "shape":"MaxConcurrentPercentage", - "documentation":"

    The maximum percentage of accounts in which to perform this operation at one time.

    When calculating the number of accounts based on the specified percentage, CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.

    Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.

    Conditional: You must specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

    By default, 1 is specified.

    " + "documentation":"

    The maximum percentage of accounts in which to perform this operation at one time.

    When calculating the number of accounts based on the specified percentage, CloudFormation rounds down to the next whole number. This is true except in cases where rounding down would result is zero. In this case, CloudFormation sets the number as one instead.

    Note that this setting lets you specify the maximum for operations. For large deployments, under certain circumstances the actual number of accounts acted upon concurrently may be lower due to service throttling.

    You can specify either MaxConcurrentCount or MaxConcurrentPercentage, but not both.

    By default, 1 is specified.

    " }, "ConcurrencyMode":{ "shape":"ConcurrencyMode", @@ -7971,11 +7941,11 @@ }, "ParentId":{ "shape":"StackId", - "documentation":"

    For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.

    For more information, see Embed stacks within other stacks using nested stacks in the CloudFormation User Guide.

    " + "documentation":"

    For nested stacks, the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.

    For more information, see Nested stacks in the CloudFormation User Guide.

    " }, "RootId":{ "shape":"StackId", - "documentation":"

    For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.

    For more information, see Embed stacks within other stacks using nested stacks in the CloudFormation User Guide.

    " + "documentation":"

    For nested stacks, the stack ID of the top-level stack to which the nested stack ultimately belongs.

    For more information, see Nested stacks in the CloudFormation User Guide.

    " }, "DriftInformation":{ "shape":"StackDriftInformationSummary", @@ -7994,8 +7964,7 @@ }, "StaleRequestException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Another operation has been performed on this stack set since the specified operation was performed.

    ", "error":{ "code":"StaleRequestException", @@ -8053,8 +8022,7 @@ }, "StopStackSetOperationOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "SupportedMajorVersion":{ "type":"integer", @@ -8078,7 +8046,7 @@ }, "Value":{ "shape":"TagValue", - "documentation":"

    Required. A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value.

    " + "documentation":"

    Required. A string that contains the value for this tag. You can specify a maximum of 256 characters for a tag value.

    " } }, "documentation":"

    The Tag type enables you to specify a key-value pair that can be used to store information about an CloudFormation stack.

    " @@ -8291,8 +8259,7 @@ "Timestamp":{"type":"timestamp"}, "TokenAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A client request token already exists.

    ", "error":{ "code":"TokenAlreadyExistsException", @@ -8409,8 +8376,7 @@ }, "TypeConfigurationNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified extension configuration can't be found.

    ", "error":{ "code":"TypeConfigurationNotFoundException", @@ -8452,8 +8418,7 @@ }, "TypeNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified extension doesn't exist in the CloudFormation registry.

    ", "error":{ "code":"TypeNotFoundException", @@ -8645,11 +8610,11 @@ }, "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.

    Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

    " + "documentation":"

    Structure that contains the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.

    Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

    " }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

    The URL of a file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://.

    Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

    " + "documentation":"

    The URL of a file that contains the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://.

    Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true.

    " }, "UsePreviousTemplate":{ "shape":"UsePreviousTemplate", @@ -8657,11 +8622,11 @@ }, "StackPolicyDuringUpdateBody":{ "shape":"StackPolicyDuringUpdateBody", - "documentation":"

    Structure containing the temporary overriding stack policy body. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both.

    If you want to update protected resources, specify a temporary overriding stack policy during this update. If you don't specify a stack policy, the current policy that is associated with the stack will be used.

    " + "documentation":"

    Structure that contains the temporary overriding stack policy body. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both.

    If you want to update protected resources, specify a temporary overriding stack policy during this update. If you don't specify a stack policy, the current policy that is associated with the stack will be used.

    " }, "StackPolicyDuringUpdateURL":{ "shape":"StackPolicyDuringUpdateURL", - "documentation":"

    Location of a file containing the temporary overriding stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with https://. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both.

    If you want to update protected resources, specify a temporary overriding stack policy during this update. If you don't specify a stack policy, the current policy that is associated with the stack will be used.

    " + "documentation":"

    Location of a file that contains the temporary overriding stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with https://. URLs from S3 static websites are not supported.

    You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both.

    If you want to update protected resources, specify a temporary overriding stack policy during this update. If you don't specify a stack policy, the current policy that is associated with the stack will be used.

    " }, "Parameters":{ "shape":"Parameters", @@ -8669,7 +8634,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

    In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack.

    • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

      Some stack templates might include resources that can affect permissions in your Amazon Web Services account, for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

      The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

      • If you have IAM resources, you can specify either capability.

      • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

      • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

      If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.

      For more information, see Acknowledging IAM resources in CloudFormation templates.

    • CAPABILITY_AUTO_EXPAND

      Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

      If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.

      You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs.

      Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.

      For more information, see Perform custom processing on CloudFormation templates with template macros.

    Only one of the Capabilities and ResourceType parameters can be specified.

    " + "documentation":"

    In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack.

    • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

      Some stack templates might include resources that can affect permissions in your Amazon Web Services account, for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities.

      The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

      • If you have IAM resources, you can specify either capability.

      • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

      • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

      If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary.

      For more information, see Acknowledging IAM resources in CloudFormation templates.

    • CAPABILITY_AUTO_EXPAND

      Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

      If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability.

      You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs.

      Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified.

      For more information, see Perform custom processing on CloudFormation templates with template macros.

    Only one of the Capabilities and ResourceType parameters can be specified.

    " }, "ResourceTypes":{ "shape":"ResourceTypes", @@ -8685,11 +8650,11 @@ }, "StackPolicyBody":{ "shape":"StackPolicyBody", - "documentation":"

    Structure containing a new stack policy body. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you don't specify a stack policy, the current policy that is associated with the stack is unchanged.

    " + "documentation":"

    Structure that contains a new stack policy body. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you don't specify a stack policy, the current policy that is associated with the stack is unchanged.

    " }, "StackPolicyURL":{ "shape":"StackPolicyURL", - "documentation":"

    Location of a file containing the updated stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with https://. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you don't specify a stack policy, the current policy that is associated with the stack is unchanged.

    " + "documentation":"

    Location of a file that contains the updated stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with https://. URLs from S3 static websites are not supported.

    You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both.

    You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you don't specify a stack policy, the current policy that is associated with the stack is unchanged.

    " }, "NotificationARNs":{ "shape":"NotificationARNs", @@ -8793,7 +8758,7 @@ }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

    The URL of a file that contains the template body. The URL must point to a template (maximum size: 1 MB) that is located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://.

    Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

    " + "documentation":"

    The URL of a file that contains the template body. The URL must point to a template (maximum size: 1 MB) that is located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. S3 static website URLs are not supported.

    Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true.

    " }, "UsePreviousTemplate":{ "shape":"UsePreviousTemplate", @@ -8805,7 +8770,7 @@ }, "Capabilities":{ "shape":"Capabilities", - "documentation":"

    In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances.

    • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

      Some stack templates might include resources that can affect permissions in your Amazon Web Services account, for example, by creating new IAM users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities.

      The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

      • If you have IAM resources, you can specify either capability.

      • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

      • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

      If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

      For more information, see Acknowledging IAM resources in CloudFormation templates.

    • CAPABILITY_AUTO_EXPAND

      Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Perform custom processing on CloudFormation templates with template macros.

      Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.

    " + "documentation":"

    In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances.

    • CAPABILITY_IAM and CAPABILITY_NAMED_IAM

      Some stack templates might include resources that can affect permissions in your Amazon Web Services account, for example, by creating new IAM users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities.

      The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

      • If you have IAM resources, you can specify either capability.

      • If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM.

      • If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error.

      If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary.

      For more information, see Acknowledging IAM resources in CloudFormation templates.

    • CAPABILITY_AUTO_EXPAND

      Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Perform custom processing on CloudFormation templates with template macros.

      Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail.

    " }, "Tags":{ "shape":"Tags", @@ -8901,11 +8866,11 @@ "members":{ "TemplateBody":{ "shape":"TemplateBody", - "documentation":"

    Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.

    Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

    " + "documentation":"

    Structure that contains the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.

    Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

    " }, "TemplateURL":{ "shape":"TemplateURL", - "documentation":"

    The URL of a file containing the template body. The URL must point to a template (max size: 1 MB) that is located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://.

    Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

    " + "documentation":"

    The URL of a file that contains the template body. The URL must point to a template (max size: 1 MB) that is located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://.

    Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used.

    " } }, "documentation":"

    The input for ValidateTemplate action.

    " @@ -8997,7 +8962,8 @@ "enum":[ "MUTUALLY_EXCLUSIVE_PROPERTIES", "UNSUPPORTED_PROPERTIES", - "MUTUALLY_EXCLUSIVE_TYPES" + "MUTUALLY_EXCLUSIVE_TYPES", + "EXCLUDED_PROPERTIES" ] }, "Warnings":{ diff --git a/services/cloudfront/pom.xml b/services/cloudfront/pom.xml index 8084ac82d908..f39c340f3465 100644 --- a/services/cloudfront/pom.xml +++ b/services/cloudfront/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudfront AWS Java SDK :: Services :: Amazon CloudFront diff --git a/services/cloudfrontkeyvaluestore/pom.xml b/services/cloudfrontkeyvaluestore/pom.xml index 29816b6f83fd..1b1f769ed0b4 100644 --- a/services/cloudfrontkeyvaluestore/pom.xml +++ b/services/cloudfrontkeyvaluestore/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudfrontkeyvaluestore AWS Java SDK :: Services :: Cloud Front Key Value Store diff --git a/services/cloudfrontkeyvaluestore/src/main/resources/codegen-resources/customization.config b/services/cloudfrontkeyvaluestore/src/main/resources/codegen-resources/customization.config index fe4c05aef8db..3388694e6427 100644 --- a/services/cloudfrontkeyvaluestore/src/main/resources/codegen-resources/customization.config +++ b/services/cloudfrontkeyvaluestore/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "enableGenerateCompiledEndpointRules": true, - "enableEndpointAuthSchemeParams": true, - "enableFastUnmarshaller": true + "enableEndpointAuthSchemeParams": true } diff --git a/services/cloudhsm/pom.xml b/services/cloudhsm/pom.xml index ae7f63be88ea..dbb76f898888 100644 --- a/services/cloudhsm/pom.xml +++ b/services/cloudhsm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudhsm AWS Java SDK :: Services :: AWS CloudHSM diff --git a/services/cloudhsm/src/main/resources/codegen-resources/customization.config b/services/cloudhsm/src/main/resources/codegen-resources/customization.config index 51c468f66ae4..6652e711eac8 100644 --- a/services/cloudhsm/src/main/resources/codegen-resources/customization.config +++ b/services/cloudhsm/src/main/resources/codegen-resources/customization.config @@ -30,6 +30,5 @@ "listHsms", "listLunaClients" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cloudhsmv2/pom.xml b/services/cloudhsmv2/pom.xml index a2f983f284a1..9f6eda9b9d3c 100644 --- a/services/cloudhsmv2/pom.xml +++ b/services/cloudhsmv2/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 cloudhsmv2 diff --git a/services/cloudhsmv2/src/main/resources/codegen-resources/customization.config b/services/cloudhsmv2/src/main/resources/codegen-resources/customization.config index 6e1e18f9ca3e..945a45b4b737 100644 --- a/services/cloudhsmv2/src/main/resources/codegen-resources/customization.config +++ b/services/cloudhsmv2/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "describeBackups", "describeClusters" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cloudsearch/pom.xml b/services/cloudsearch/pom.xml index 50b1ab9b588f..d8f88d7866dc 100644 --- a/services/cloudsearch/pom.xml +++ b/services/cloudsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudsearch AWS Java SDK :: Services :: Amazon CloudSearch diff --git a/services/cloudsearchdomain/pom.xml b/services/cloudsearchdomain/pom.xml index ec31db6f14fb..4b347da1a932 100644 --- a/services/cloudsearchdomain/pom.xml +++ b/services/cloudsearchdomain/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudsearchdomain AWS Java SDK :: Services :: Amazon CloudSearch Domain diff --git a/services/cloudsearchdomain/src/main/resources/codegen-resources/customization.config b/services/cloudsearchdomain/src/main/resources/codegen-resources/customization.config index 08ea308d2c9a..1636188d7d29 100644 --- a/services/cloudsearchdomain/src/main/resources/codegen-resources/customization.config +++ b/services/cloudsearchdomain/src/main/resources/codegen-resources/customization.config @@ -24,6 +24,5 @@ "interceptors": [ "software.amazon.awssdk.services.cloudsearchdomain.internal.SwitchToPostInterceptor" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cloudtrail/pom.xml b/services/cloudtrail/pom.xml index ef1b38c0effb..9af1b838d888 100644 --- a/services/cloudtrail/pom.xml +++ b/services/cloudtrail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudtrail AWS Java SDK :: Services :: AWS CloudTrail diff --git a/services/cloudtrail/src/main/resources/codegen-resources/customization.config b/services/cloudtrail/src/main/resources/codegen-resources/customization.config index ae5a2dab663f..2608a329c80e 100644 --- a/services/cloudtrail/src/main/resources/codegen-resources/customization.config +++ b/services/cloudtrail/src/main/resources/codegen-resources/customization.config @@ -7,6 +7,5 @@ "listPublicKeys", "lookupEvents" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json index bf317e00ad41..800bdb449789 100644 --- a/services/cloudtrail/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudtrail/src/main/resources/codegen-resources/service-2.json @@ -459,6 +459,29 @@ "documentation":"

    Returns the specified dashboard.

    ", "idempotent":true }, + "GetEventConfiguration":{ + "name":"GetEventConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEventConfigurationRequest"}, + "output":{"shape":"GetEventConfigurationResponse"}, + "errors":[ + {"shape":"CloudTrailARNInvalidException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"EventDataStoreARNInvalidException"}, + {"shape":"EventDataStoreNotFoundException"}, + {"shape":"InvalidEventDataStoreStatusException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidEventDataStoreCategoryException"}, + {"shape":"NoManagementAccountSLRExistsException"}, + {"shape":"InvalidParameterCombinationException"} + ], + "documentation":"

    Retrieves the current event configuration settings for the specified event data store, including details about maximum event size and context key selectors configured for the event data store.

    ", + "idempotent":true + }, "GetEventDataStore":{ "name":"GetEventDataStore", "http":{ @@ -814,6 +837,35 @@ "documentation":"

    Looks up management events or CloudTrail Insights events that are captured by CloudTrail. You can look up events that occurred in a Region within the last 90 days.

    LookupEvents returns recent Insights events for trails that enable Insights. To view Insights events for an event data store, you can run queries on your Insights event data store, and you can also view the Lake dashboard for Insights.

    Lookup supports the following attributes for management events:

    • Amazon Web Services access key

    • Event ID

    • Event name

    • Event source

    • Read only

    • Resource name

    • Resource type

    • User name

    Lookup supports the following attributes for Insights events:

    • Event ID

    • Event name

    • Event source

    All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.

    The rate of lookup requests is limited to two per second, per account, per Region. If this limit is exceeded, a throttling error occurs.

    ", "idempotent":true }, + "PutEventConfiguration":{ + "name":"PutEventConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutEventConfigurationRequest"}, + "output":{"shape":"PutEventConfigurationResponse"}, + "errors":[ + {"shape":"EventDataStoreARNInvalidException"}, + {"shape":"EventDataStoreNotFoundException"}, + {"shape":"InvalidEventDataStoreStatusException"}, + {"shape":"InvalidEventDataStoreCategoryException"}, + {"shape":"InactiveEventDataStoreException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"CloudTrailARNInvalidException"}, + {"shape":"ConflictException"}, + {"shape":"NotOrganizationMasterAccountException"}, + {"shape":"NoManagementAccountSLRExistsException"}, + {"shape":"InsufficientDependencyServiceAccessPermissionException"}, + {"shape":"InsufficientIAMAccessPermissionException"} + ], + "documentation":"

    Updates the event configuration settings for the specified event data store. You can update the maximum event size and context key selectors.

    ", + "idempotent":true + }, "PutEventSelectors":{ "name":"PutEventSelectors", "http":{ @@ -909,7 +961,8 @@ {"shape":"OrganizationNotInAllFeaturesModeException"}, {"shape":"OrganizationsNotInUseException"}, {"shape":"UnsupportedOperationException"}, - {"shape":"OperationNotPermittedException"} + {"shape":"OperationNotPermittedException"}, + {"shape":"InsufficientIAMAccessPermissionException"} ], "documentation":"

    Registers an organization’s member account as the CloudTrail delegated administrator.

    ", "idempotent":true @@ -1019,7 +1072,8 @@ {"shape":"UnsupportedOperationException"}, {"shape":"NotOrganizationMasterAccountException"}, {"shape":"NoManagementAccountSLRExistsException"}, - {"shape":"InsufficientDependencyServiceAccessPermissionException"} + {"shape":"InsufficientDependencyServiceAccessPermissionException"}, + {"shape":"ConflictException"} ], "documentation":"

    Starts the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To start ingestion, the event data store Status must be STOPPED_INGESTION and the eventCategory must be Management, Data, NetworkActivity, or ConfigurationItem.

    " }, @@ -1118,7 +1172,8 @@ {"shape":"UnsupportedOperationException"}, {"shape":"NotOrganizationMasterAccountException"}, {"shape":"NoManagementAccountSLRExistsException"}, - {"shape":"InsufficientDependencyServiceAccessPermissionException"} + {"shape":"InsufficientDependencyServiceAccessPermissionException"}, + {"shape":"ConflictException"} ], "documentation":"

    Stops the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To stop ingestion, the event data store Status must be ENABLED and the eventCategory must be Management, Data, NetworkActivity, or ConfigurationItem.

    " }, @@ -1289,15 +1344,13 @@ "shapes":{ "AccessDeniedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You do not have sufficient access to perform this action.

    ", "exception":true }, "AccountHasOngoingImportException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when you start a new import and a previous import is still in progress.

    ", "exception":true }, @@ -1309,22 +1362,19 @@ }, "AccountNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified account is not found or not part of an organization.

    ", "exception":true }, "AccountNotRegisteredException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified account is not registered as the CloudTrail delegated administrator.

    ", "exception":true }, "AccountRegisteredException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the account is already registered as the CloudTrail delegated administrator.

    ", "exception":true }, @@ -1348,8 +1398,7 @@ }, "AddTagsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the objects or data if successful. Otherwise, returns an error.

    " }, "AdvancedEventSelector":{ @@ -1463,8 +1512,7 @@ }, "CannotDelegateManagementAccountException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the management account of an organization is registered as the CloudTrail delegated administrator.

    ", "exception":true }, @@ -1484,15 +1532,13 @@ }, "ChannelARNInvalidException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified value of ChannelARN is not valid.

    ", "exception":true }, "ChannelAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided channel already exists.

    ", "exception":true }, @@ -1504,15 +1550,13 @@ }, "ChannelExistsForEDSException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified event data store cannot yet be deleted because it is in use by a channel.

    ", "exception":true }, "ChannelMaxLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the maximum number of channels limit is exceeded.

    ", "exception":true }, @@ -1524,8 +1568,7 @@ }, "ChannelNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when CloudTrail cannot find the specified channel.

    ", "exception":true }, @@ -1535,46 +1578,63 @@ }, "CloudTrailARNInvalidException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when an operation is called with an ARN that is not valid.

    The following is the format of a trail ARN: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

    The following is the format of an event data store ARN: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE

    The following is the format of a dashboard ARN: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash

    The following is the format of a channel ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890

    ", "exception":true }, "CloudTrailAccessNotEnabledException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, see How to enable or disable trusted access in the Organizations User Guide and Prepare For Creating a Trail For Your Organization in the CloudTrail User Guide.

    ", "exception":true }, "CloudTrailInvalidClientTokenIdException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when a call results in the InvalidClientTokenId error code. This can occur when you are creating or updating a trail to send notifications to an Amazon SNS topic that is in a suspended Amazon Web Services account.

    ", "exception":true }, "CloudWatchLogsDeliveryUnavailableException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Cannot set a CloudWatch Logs delivery for this Region.

    ", "exception":true }, "ConcurrentModificationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You are trying to update a resource when another request is in progress. Allow sufficient wait time for the previous request to complete, then retry your request.

    ", "exception":true }, "ConflictException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified resource is not ready for an operation. This can occur when you try to run an operation on a resource before CloudTrail has time to fully load the resource, or because another operation is modifying the resource. If this exception occurs, wait a few minutes, and then try the operation again.

    ", "exception":true }, + "ContextKeySelector":{ + "type":"structure", + "required":[ + "Type", + "Equals" + ], + "members":{ + "Type":{ + "shape":"Type", + "documentation":"

    Specifies the type of the event record field in ContextKeySelector. Valid values include RequestContext, TagContext.

    " + }, + "Equals":{ + "shape":"OperatorTargetList", + "documentation":"

    A list of keys defined by Type to be included in CloudTrail enriched events.

    " + } + }, + "documentation":"

    An object that contains information types to be included in CloudTrail enriched events.

    " + }, + "ContextKeySelectors":{ + "type":"list", + "member":{"shape":"ContextKeySelector"}, + "max":2 + }, "CreateChannelRequest":{ "type":"structure", "required":[ @@ -1954,8 +2014,7 @@ "Date":{"type":"timestamp"}, "DelegatedAdminAccountLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the maximum number of CloudTrail delegated administrators is reached.

    ", "exception":true }, @@ -1971,8 +2030,7 @@ }, "DeleteChannelResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDashboardRequest":{ "type":"structure", @@ -1986,8 +2044,7 @@ }, "DeleteDashboardResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteEventDataStoreRequest":{ "type":"structure", @@ -2001,8 +2058,7 @@ }, "DeleteEventDataStoreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteResourcePolicyRequest":{ "type":"structure", @@ -2016,8 +2072,7 @@ }, "DeleteResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteTrailRequest":{ "type":"structure", @@ -2032,8 +2087,7 @@ }, "DeleteTrailResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    " }, "DeliveryS3Uri":{ @@ -2068,8 +2122,7 @@ }, "DeregisterOrganizationDelegatedAdminResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the following response if successful. Otherwise, returns an error.

    " }, "DescribeQueryRequest":{ @@ -2374,15 +2427,13 @@ }, "EventDataStoreARNInvalidException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified event data store ARN is not valid or does not map to an event data store in your account.

    ", "exception":true }, "EventDataStoreAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    An event data store with that name already exists.

    ", "exception":true }, @@ -2394,15 +2445,13 @@ }, "EventDataStoreFederationEnabledException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You cannot delete the event data store because Lake query federation is enabled. To delete the event data store, run the DisableFederation operation to disable Lake query federation on the event data store.

    ", "exception":true }, "EventDataStoreHasOngoingImportException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when you try to update or delete an event data store that currently has an import in progress.

    ", "exception":true }, @@ -2420,8 +2469,7 @@ }, "EventDataStoreMaxLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Your account has used the maximum number of event data stores.

    ", "exception":true }, @@ -2433,8 +2481,7 @@ }, "EventDataStoreNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified event data store was not found.

    ", "exception":true }, @@ -2451,8 +2498,7 @@ }, "EventDataStoreTerminationProtectedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The event data store cannot be deleted because termination protection is enabled for it.

    ", "exception":true }, @@ -2555,8 +2601,7 @@ }, "GenerateResponseException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when a valid query could not be generated for the provided prompt.

    ", "exception":true }, @@ -2654,6 +2699,32 @@ } } }, + "GetEventConfigurationRequest":{ + "type":"structure", + "members":{ + "EventDataStore":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) or ID suffix of the ARN of the event data store for which you want to retrieve event configuration settings.

    " + } + } + }, + "GetEventConfigurationResponse":{ + "type":"structure", + "members":{ + "EventDataStoreArn":{ + "shape":"EventDataStoreArn", + "documentation":"

    The Amazon Resource Name (ARN) or ID suffix of the ARN of the event data store for which the event configuration settings are returned.

    " + }, + "MaxEventSize":{ + "shape":"MaxEventSize", + "documentation":"

    The maximum allowed size for events stored in the specified event data store.

    " + }, + "ContextKeySelectors":{ + "shape":"ContextKeySelectors", + "documentation":"

    The list of context key selectors that are configured for the event data store.

    " + } + } + }, "GetEventDataStoreRequest":{ "type":"structure", "required":["EventDataStore"], @@ -3068,8 +3139,7 @@ }, "ImportNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified import was not found.

    ", "exception":true }, @@ -3152,15 +3222,13 @@ }, "InactiveEventDataStoreException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The event data store is inactive.

    ", "exception":true }, "InactiveQueryException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified query cannot be canceled because it is in the FINISHED, FAILED, TIMED_OUT, or CANCELLED state.

    ", "exception":true }, @@ -3192,8 +3260,7 @@ }, "InsightNotEnabledException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    If you run GetInsightSelectors on a trail or event data store that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException.

    ", "exception":true }, @@ -3246,234 +3313,207 @@ }, "InsufficientDependencyServiceAccessPermissionException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the IAM identity that is used to create the organization resource lacks one or more required permissions for creating an organization resource in a required service.

    ", "exception":true }, "InsufficientEncryptionPolicyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    For the CreateTrail PutInsightSelectors, UpdateTrail, StartQuery, and StartImport operations, this exception is thrown when the policy on the S3 bucket or KMS key does not have sufficient permissions for the operation.

    For all other operations, this exception is thrown when the policy for the KMS key does not have sufficient permissions for the operation.

    ", "exception":true }, + "InsufficientIAMAccessPermissionException":{ + "type":"structure", + "members":{}, + "documentation":"

    The task can't be completed because you are signed in with an account that lacks permissions to view or create a service-linked role. Sign in with an account that has the required permissions and then try again.

    ", + "exception":true + }, "InsufficientS3BucketPolicyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the policy on the S3 bucket is not sufficient.

    ", "exception":true }, "InsufficientSnsTopicPolicyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the policy on the Amazon SNS topic is not sufficient.

    ", "exception":true }, "Integer":{"type":"integer"}, "InvalidCloudWatchLogsLogGroupArnException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided CloudWatch Logs log group is not valid.

    ", "exception":true }, "InvalidCloudWatchLogsRoleArnException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided role is not valid.

    ", "exception":true }, "InvalidDateRangeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A date range for the query was specified that is not valid. Be sure that the start time is chronologically before the end time. For more information about writing a query, see Create or edit a query in the CloudTrail User Guide.

    ", "exception":true }, "InvalidEventCategoryException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Occurs if an event category that is not valid is specified as a value of EventCategory.

    ", "exception":true }, "InvalidEventDataStoreCategoryException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when event categories of specified event data stores are not valid.

    ", "exception":true }, "InvalidEventDataStoreStatusException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The event data store is not in a status that supports the operation.

    ", "exception":true }, "InvalidEventSelectorsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the PutEventSelectors operation is called with a number of event selectors, advanced event selectors, or data resources that is not valid. The combination of event selectors or advanced event selectors and data resources is not valid. A trail can have up to 5 event selectors. If a trail uses advanced event selectors, a maximum of 500 total values for all conditions in all advanced event selectors is allowed. A trail is limited to 250 data resources. These data resources can be distributed across event selectors, but the overall total cannot exceed 250.

    You can:

    • Specify a valid number of event selectors (1 to 5) for a trail.

    • Specify a valid number of data resources (1 to 250) for an event selector. The limit of number of resources on an individual event selector is configurable up to 250. However, this upper limit is allowed only if the total number of data resources does not exceed 250 across all event selectors for a trail.

    • Specify up to 500 values for all conditions in all advanced event selectors for a trail.

    • Specify a valid value for a parameter. For example, specifying the ReadWriteType parameter with a value of read-only is not valid.

    ", "exception":true }, "InvalidHomeRegionException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when an operation is called on a trail from a Region other than the Region in which the trail was created.

    ", "exception":true }, "InvalidImportSourceException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided source S3 bucket is not valid for import.

    ", "exception":true }, "InvalidInsightSelectorsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    For PutInsightSelectors, this exception is thrown when the formatting or syntax of the InsightSelectors JSON statement is not valid, or the specified InsightType in the InsightSelectors statement is not valid. Valid values for InsightType are ApiCallRateInsight and ApiErrorRateInsight. To enable Insights on an event data store, the destination event data store specified by the InsightsDestination parameter must log Insights events and the source event data store specified by the EventDataStore parameter must log management events.

    For UpdateEventDataStore, this exception is thrown if Insights are enabled on the event data store and the updated advanced event selectors are not compatible with the configured InsightSelectors. If the InsightSelectors includes an InsightType of ApiCallRateInsight, the source event data store must log write management events. If the InsightSelectors includes an InsightType of ApiErrorRateInsight, the source event data store must log management events.

    ", "exception":true }, "InvalidKmsKeyIdException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the KMS key ARN is not valid.

    ", "exception":true }, "InvalidLookupAttributesException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Occurs when a lookup attribute is specified that is not valid.

    ", "exception":true }, "InvalidMaxResultsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown if the limit specified is not valid.

    ", "exception":true }, "InvalidNextTokenException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A token that is not valid, or a token that was previously used in a request with different parameters. This exception is thrown if the token is not valid.

    ", "exception":true }, "InvalidParameterCombinationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the combination of parameters provided is not valid.

    ", "exception":true }, "InvalidParameterException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The request includes a parameter that is not valid.

    ", "exception":true }, "InvalidQueryStatementException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The query that was submitted has validation errors, or uses incorrect syntax or unsupported keywords. For more information about writing a query, see Create or edit a query in the CloudTrail User Guide.

    ", "exception":true }, "InvalidQueryStatusException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The query status is not valid for the operation.

    ", "exception":true }, "InvalidS3BucketNameException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided S3 bucket name is not valid.

    ", "exception":true }, "InvalidS3PrefixException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided S3 prefix is not valid.

    ", "exception":true }, "InvalidSnsTopicNameException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided SNS topic name is not valid.

    ", "exception":true }, "InvalidSourceException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified value of Source is not valid.

    ", "exception":true }, "InvalidTagParameterException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified tag key or values are not valid. It can also occur if there are duplicate tags or too many tags on the resource.

    ", "exception":true }, "InvalidTimeRangeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Occurs if the timestamp values are not valid. Either the start time occurs after the end time, or the time range is outside the range of possible values.

    ", "exception":true }, "InvalidTokenException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Reserved for future use.

    ", "exception":true }, "InvalidTrailNameException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided trail name is not valid. Trail names must meet the following requirements:

    • Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)

    • Start with a letter or number, and end with a letter or number

    • Be between 3 and 128 characters

    • Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are not valid.

    • Not be in IP address format (for example, 192.168.5.4)

    ", "exception":true }, "KmsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when there is an issue with the specified KMS key and the trail or event data store can't be updated.

    ", "exception":true }, "KmsKeyDisabledException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is no longer in use.

    ", "deprecated":true, "exception":true }, "KmsKeyNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the KMS key does not exist, when the S3 bucket and the KMS key are not in the same Region, or when the KMS key associated with the Amazon SNS topic either does not exist or is not in the same Region.

    ", "exception":true }, @@ -3871,7 +3911,7 @@ "type":"string", "max":1024, "min":3, - "pattern":"^[a-zA-Z0-9._/\\-:]+$" + "pattern":"^[a-zA-Z0-9._/\\-:*]+$" }, "Long":{"type":"long"}, "LookupAttribute":{ @@ -3960,11 +4000,17 @@ }, "MaxConcurrentQueriesException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You are already running the maximum number of concurrent queries. The maximum number of concurrent queries is 10. Wait a minute for some queries to finish, and then run the query again.

    ", "exception":true }, + "MaxEventSize":{ + "type":"string", + "enum":[ + "Standard", + "Large" + ] + }, "MaxQueryResults":{ "type":"integer", "max":1000, @@ -3977,37 +4023,32 @@ }, "MaximumNumberOfTrailsExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the maximum number of trails is reached.

    ", "exception":true }, "NextToken":{"type":"string"}, "NoManagementAccountSLRExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the management account does not have a service-linked role.

    ", "exception":true }, "NotOrganizationManagementAccountException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the account making the request is not the organization's management account.

    ", "exception":true }, "NotOrganizationMasterAccountException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the Amazon Web Services account making the request to create or update an organization trail or event data store is not the management account for an organization in Organizations. For more information, see Prepare For Creating a Trail For Your Organization or Organization event data stores.

    ", "exception":true }, "OperationNotPermittedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the requested operation is not permitted.

    ", "exception":true }, @@ -4016,6 +4057,17 @@ "member":{"shape":"OperatorValue"}, "min":1 }, + "OperatorTargetList":{ + "type":"list", + "member":{"shape":"OperatorTargetListMember"}, + "max":50, + "min":1 + }, + "OperatorTargetListMember":{ + "type":"string", + "max":128, + "min":1 + }, "OperatorValue":{ "type":"string", "max":2048, @@ -4024,15 +4076,13 @@ }, "OrganizationNotInAllFeaturesModeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when Organizations is not configured to support all features. All features must be enabled in Organizations to support creating an organization trail or event data store.

    ", "exception":true }, "OrganizationsNotInUseException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the request is made from an Amazon Web Services account that is not a member of an organization. To make this request, sign in using the credentials of an account that belongs to an organization.

    ", "exception":true }, @@ -4109,6 +4159,44 @@ "type":"list", "member":{"shape":"PublicKey"} }, + "PutEventConfigurationRequest":{ + "type":"structure", + "required":[ + "MaxEventSize", + "ContextKeySelectors" + ], + "members":{ + "EventDataStore":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) or ID suffix of the ARN of the event data store for which you want to update event configuration settings.

    " + }, + "MaxEventSize":{ + "shape":"MaxEventSize", + "documentation":"

    The maximum allowed size for events to be stored in the specified event data store. If you are using context key selectors, MaxEventSize must be set to Large.

    " + }, + "ContextKeySelectors":{ + "shape":"ContextKeySelectors", + "documentation":"

    A list of context key selectors that will be included to provide enriched event data.

    " + } + } + }, + "PutEventConfigurationResponse":{ + "type":"structure", + "members":{ + "EventDataStoreArn":{ + "shape":"EventDataStoreArn", + "documentation":"

    The Amazon Resource Name (ARN) or ID suffix of the ARN of the event data store for which the event configuration settings were updated.

    " + }, + "MaxEventSize":{ + "shape":"MaxEventSize", + "documentation":"

    The maximum allowed size for events stored in the specified event data store.

    " + }, + "ContextKeySelectors":{ + "shape":"ContextKeySelectors", + "documentation":"

    The list of context key selectors that are configured for the event data store.

    " + } + } + }, "PutEventSelectorsRequest":{ "type":"structure", "required":["TrailName"], @@ -4251,8 +4339,7 @@ }, "QueryIdNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The query ID does not exist or does not map to a query.

    ", "exception":true }, @@ -4435,8 +4522,7 @@ }, "RegisterOrganizationDelegatedAdminResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the following response if successful. Otherwise, returns an error.

    " }, "RemoveTagsRequest":{ @@ -4459,8 +4545,7 @@ }, "RemoveTagsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    " }, "RequestWidget":{ @@ -4505,8 +4590,7 @@ }, "ResourceARNNotValidException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the provided resource does not exist, or the ARN format of the resource is not valid.

    The following is the format of an event data store ARN: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE

    The following is the format of a dashboard ARN: arn:aws:cloudtrail:us-east-1:123456789012:dashboard/exampleDash

    The following is the format of a channel ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890

    ", "exception":true }, @@ -4527,8 +4611,7 @@ }, "ResourceNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified resource is not found.

    ", "exception":true }, @@ -4539,15 +4622,13 @@ }, "ResourcePolicyNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified resource policy is not found.

    ", "exception":true }, "ResourcePolicyNotValidException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the resouce-based policy has syntax errors, or contains a principal that is not valid.

    ", "exception":true }, @@ -4571,8 +4652,7 @@ }, "ResourceTypeNotSupportedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified resource type is not supported by CloudTrail.

    ", "exception":true }, @@ -4646,8 +4726,7 @@ }, "S3BucketDoesNotExistException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified S3 bucket does not exist.

    ", "exception":true }, @@ -4760,8 +4839,7 @@ }, "ServiceQuotaExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the quota is exceeded. For information about CloudTrail quotas, see Service quotas in the Amazon Web Services General Reference.

    ", "exception":true }, @@ -4820,8 +4898,7 @@ }, "StartEventDataStoreIngestionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "StartImportRequest":{ "type":"structure", @@ -4898,8 +4975,7 @@ }, "StartLoggingResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    " }, "StartQueryRequest":{ @@ -4952,8 +5028,7 @@ }, "StopEventDataStoreIngestionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "StopImportRequest":{ "type":"structure", @@ -5019,8 +5094,7 @@ }, "StopLoggingResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Returns the objects or data listed below if successful. Otherwise, returns an error.

    " }, "String":{"type":"string"}, @@ -5051,8 +5125,7 @@ }, "TagsLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The number of tags per trail, event data store, dashboard, or channel has exceeded the permitted amount. Currently, the limit is 50.

    ", "exception":true }, @@ -5065,8 +5138,7 @@ "TerminationProtectionEnabled":{"type":"boolean"}, "ThrottlingException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the request rate exceeds the limit.

    ", "exception":true }, @@ -5151,8 +5223,7 @@ }, "TrailAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the specified trail already exists.

    ", "exception":true }, @@ -5184,15 +5255,13 @@ }, "TrailNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the trail with the given name is not found.

    ", "exception":true }, "TrailNotProvidedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is no longer in use.

    ", "exception":true }, @@ -5200,6 +5269,13 @@ "type":"list", "member":{"shape":"TrailInfo"} }, + "Type":{ + "type":"string", + "enum":[ + "TagContext", + "RequestContext" + ] + }, "UUID":{ "type":"string", "max":36, @@ -5208,8 +5284,7 @@ }, "UnsupportedOperationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception is thrown when the requested operation is not supported.

    ", "exception":true }, diff --git a/services/cloudtraildata/pom.xml b/services/cloudtraildata/pom.xml index 9b3817bb9e91..0e20f30729ea 100644 --- a/services/cloudtraildata/pom.xml +++ b/services/cloudtraildata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudtraildata AWS Java SDK :: Services :: Cloud Trail Data diff --git a/services/cloudtraildata/src/main/resources/codegen-resources/customization.config b/services/cloudtraildata/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/cloudtraildata/src/main/resources/codegen-resources/customization.config +++ b/services/cloudtraildata/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cloudwatch/pom.xml b/services/cloudwatch/pom.xml index 77fa0539e8dd..cbb47f1874b4 100644 --- a/services/cloudwatch/pom.xml +++ b/services/cloudwatch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudwatch AWS Java SDK :: Services :: Amazon CloudWatch diff --git a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json index baf31c5e0f0c..e4d10c769503 100644 --- a/services/cloudwatch/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatch/src/main/resources/codegen-resources/service-2.json @@ -24,7 +24,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

    Deletes the specified alarms. You can delete up to 100 alarms in one operation. However, this total can include no more than one composite alarm. For example, you could delete 99 metric alarms and one composite alarms with one operation, but you can't delete two composite alarms with one operation.

    If you specify an incorrect alarm name or make any other error in the operation, no alarms are deleted. To confirm that alarms were deleted successfully, you can use the DescribeAlarms operation after using DeleteAlarms.

    It is possible to create a loop or cycle of composite alarms, where composite alarm A depends on composite alarm B, and composite alarm B also depends on composite alarm A. In this scenario, you can't delete any composite alarm that is part of the cycle because there is always still a composite alarm that depends on that alarm that you want to delete.

    To get out of such a situation, you must break the cycle by changing the rule of one of the composite alarms in the cycle to remove a dependency that creates the cycle. The simplest change to make to break a cycle is to change the AlarmRule of one of the alarms to false.

    Additionally, the evaluation of composite alarms stops if CloudWatch detects a cycle in the evaluation path.

    " + "documentation":"

    Deletes the specified alarms. You can delete up to 100 alarms in one operation. However, this total can include no more than one composite alarm. For example, you could delete 99 metric alarms and one composite alarms with one operation, but you can't delete two composite alarms with one operation.

    If you specify any incorrect alarm names, the alarms you specify with correct names are still deleted. Other syntax errors might result in no alarms being deleted. To confirm that alarms were deleted successfully, you can use the DescribeAlarms operation after using DeleteAlarms.

    It is possible to create a loop or cycle of composite alarms, where composite alarm A depends on composite alarm B, and composite alarm B also depends on composite alarm A. In this scenario, you can't delete any composite alarm that is part of the cycle because there is always still a composite alarm that depends on that alarm that you want to delete.

    To get out of such a situation, you must break the cycle by changing the rule of one of the composite alarms in the cycle to remove a dependency that creates the cycle. The simplest change to make to break a cycle is to change the AlarmRule of one of the alarms to false.

    Additionally, the evaluation of composite alarms stops if CloudWatch detects a cycle in the evaluation path.

    " }, "DeleteAnomalyDetector":{ "name":"DeleteAnomalyDetector", @@ -60,7 +60,8 @@ "errors":[ {"shape":"InvalidParameterValueException"}, {"shape":"DashboardNotFoundError"}, - {"shape":"InternalServiceFault"} + {"shape":"InternalServiceFault"}, + {"shape":"ConflictException"} ], "documentation":"

    Deletes all dashboards that you specify. You can specify up to 100 dashboards to delete. If there is an error during this call, no dashboards are deleted.

    " }, @@ -470,7 +471,8 @@ }, "errors":[ {"shape":"DashboardInvalidInputError"}, - {"shape":"InternalServiceFault"} + {"shape":"InternalServiceFault"}, + {"shape":"ConflictException"} ], "documentation":"

    Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

    All dashboards in your account are global, not region-specific.

    A simple way to create a dashboard using PutDashboard is to copy an existing dashboard. To copy an existing dashboard using the console, you can load the dashboard and then use the View/edit source command in the Actions menu to display the JSON block for that dashboard. Another way to copy a dashboard is to use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard.

    When you create a dashboard with PutDashboard, a good practice is to add a text widget at the top of the dashboard with a message that the dashboard was created by script and should not be changed in the console. This message could also point console users to the location of the DashboardBody script or the CloudFormation template used to create the dashboard.

    " }, @@ -621,7 +623,8 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"}, - {"shape":"InternalServiceFault"} + {"shape":"InternalServiceFault"}, + {"shape":"ConflictException"} ], "documentation":"

    Assigns one or more tags (key-value pairs) to the specified CloudWatch resource. Currently, the only CloudWatch resources that can be tagged are alarms and Contributor Insights rules.

    Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.

    Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

    You can use the TagResource action with an alarm that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.

    You can associate as many as 50 tags with a CloudWatch resource.

    " }, @@ -640,7 +643,8 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"}, - {"shape":"InternalServiceFault"} + {"shape":"InternalServiceFault"}, + {"shape":"ConflictException"} ], "documentation":"

    Removes one or more tags from the specified resource.

    " } @@ -978,8 +982,7 @@ }, "ConcurrentModificationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    More than one process tried to modify a resource at the same time.

    ", "error":{ "code":"ConcurrentModificationException", @@ -988,6 +991,14 @@ }, "exception":true }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    This operation attempted to create a resource that already exists.

    ", + "exception":true + }, "Counts":{ "type":"list", "member":{"shape":"DatapointValue"} @@ -1188,8 +1199,7 @@ }, "DeleteAnomalyDetectorOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDashboardsInput":{ "type":"structure", @@ -1203,8 +1213,7 @@ }, "DeleteDashboardsOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteInsightRulesInput":{ "type":"structure", @@ -1237,8 +1246,7 @@ }, "DeleteMetricStreamOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeAlarmHistoryInput":{ "type":"structure", @@ -1782,7 +1790,7 @@ }, "StartTime":{ "shape":"Timestamp", - "documentation":"

    The time stamp indicating the earliest data to be returned.

    The value specified is inclusive; results include data points with the specified time stamp.

    CloudWatch rounds the specified time stamp as follows:

    • Start time less than 15 days ago - Round down to the nearest whole minute. For example, 12:32:34 is rounded down to 12:32:00.

    • Start time between 15 and 63 days ago - Round down to the nearest 5-minute clock interval. For example, 12:32:34 is rounded down to 12:30:00.

    • Start time greater than 63 days ago - Round down to the nearest 1-hour clock interval. For example, 12:32:34 is rounded down to 12:00:00.

    If you set Period to 5, 10, or 30, the start time of your request is rounded down to the nearest time that corresponds to even 5-, 10-, or 30-second divisions of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for the previous 10-second period, the start time of your request is rounded down and you receive data from 01:05:10 to 01:05:20. If you make a query at 15:07:17 for the previous 5 minutes of data, using a period of 5 seconds, you receive data timestamped between 15:02:15 and 15:07:15.

    For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as StartTime can get a faster response from CloudWatch than setting 12:07 or 12:29 as the StartTime.

    " + "documentation":"

    The time stamp indicating the earliest data to be returned.

    The value specified is inclusive; results include data points with the specified time stamp.

    CloudWatch rounds the specified time stamp as follows:

    • Start time less than 15 days ago - Round down to the nearest whole minute. For example, 12:32:34 is rounded down to 12:32:00.

    • Start time between 15 and 63 days ago - Round down to the nearest 5-minute clock interval. For example, 12:32:34 is rounded down to 12:30:00.

    • Start time greater than 63 days ago - Round down to the nearest 1-hour clock interval. For example, 12:32:34 is rounded down to 12:00:00.

    If you set Period to 5, 10, 20, or 30, the start time of your request is rounded down to the nearest time that corresponds to even 5-, 10-, 20-, or 30-second divisions of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for the previous 10-second period, the start time of your request is rounded down and you receive data from 01:05:10 to 01:05:20. If you make a query at 15:07:17 for the previous 5 minutes of data, using a period of 5 seconds, you receive data timestamped between 15:02:15 and 15:07:15.

    For better performance, specify StartTime and EndTime values that align with the value of the metric's Period and sync up with the beginning and end of an hour. For example, if the Period of a metric is 5 minutes, specifying 12:05 or 12:30 as StartTime can get a faster response from CloudWatch than setting 12:07 or 12:29 as the StartTime.

    " }, "EndTime":{ "shape":"Timestamp", @@ -1849,7 +1857,7 @@ }, "StartTime":{ "shape":"Timestamp", - "documentation":"

    The time stamp that determines the first data point to return. Start times are evaluated relative to the time that CloudWatch receives the request.

    The value specified is inclusive; results include data points with the specified time stamp. In a raw HTTP query, the time stamp must be in ISO 8601 UTC format (for example, 2016-10-03T23:00:00Z).

    CloudWatch rounds the specified time stamp as follows:

    • Start time less than 15 days ago - Round down to the nearest whole minute. For example, 12:32:34 is rounded down to 12:32:00.

    • Start time between 15 and 63 days ago - Round down to the nearest 5-minute clock interval. For example, 12:32:34 is rounded down to 12:30:00.

    • Start time greater than 63 days ago - Round down to the nearest 1-hour clock interval. For example, 12:32:34 is rounded down to 12:00:00.

    If you set Period to 5, 10, or 30, the start time of your request is rounded down to the nearest time that corresponds to even 5-, 10-, or 30-second divisions of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for the previous 10-second period, the start time of your request is rounded down and you receive data from 01:05:10 to 01:05:20. If you make a query at 15:07:17 for the previous 5 minutes of data, using a period of 5 seconds, you receive data timestamped between 15:02:15 and 15:07:15.

    " + "documentation":"

    The time stamp that determines the first data point to return. Start times are evaluated relative to the time that CloudWatch receives the request.

    The value specified is inclusive; results include data points with the specified time stamp. In a raw HTTP query, the time stamp must be in ISO 8601 UTC format (for example, 2016-10-03T23:00:00Z).

    CloudWatch rounds the specified time stamp as follows:

    • Start time less than 15 days ago - Round down to the nearest whole minute. For example, 12:32:34 is rounded down to 12:32:00.

    • Start time between 15 and 63 days ago - Round down to the nearest 5-minute clock interval. For example, 12:32:34 is rounded down to 12:30:00.

    • Start time greater than 63 days ago - Round down to the nearest 1-hour clock interval. For example, 12:32:34 is rounded down to 12:00:00.

    If you set Period to 5, 10, 20, or 30, the start time of your request is rounded down to the nearest time that corresponds to even 5-, 10-, 20-, or 30-second divisions of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for the previous 10-second period, the start time of your request is rounded down and you receive data from 01:05:10 to 01:05:20. If you make a query at 15:07:17 for the previous 5 minutes of data, using a period of 5 seconds, you receive data timestamped between 15:02:15 and 15:07:15.

    " }, "EndTime":{ "shape":"Timestamp", @@ -1857,7 +1865,7 @@ }, "Period":{ "shape":"Period", - "documentation":"

    The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second.

    If the StartTime parameter specifies a time stamp that is greater than 3 hours ago, you must specify the period as follows or no data points in that time range is returned:

    • Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 minute).

    • Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes).

    • Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 hour).

    " + "documentation":"

    The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 20, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second.

    If the StartTime parameter specifies a time stamp that is greater than 3 hours ago, you must specify the period as follows or no data points in that time range is returned:

    • Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 minute).

    • Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes).

    • Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 hour).

    " }, "Statistics":{ "shape":"Statistics", @@ -2020,6 +2028,10 @@ "ManagedRule":{ "shape":"InsightRuleIsManaged", "documentation":"

    An optional built-in rule that Amazon Web Services manages.

    " + }, + "ApplyOnTransformedLogs":{ + "shape":"InsightRuleOnTransformedLogs", + "documentation":"

    Displays whether the rule is evaluated on the transformed versions of logs, for log groups that have Log transformation enabled. If this is false, log events are evaluated before they are transformed.

    " } }, "documentation":"

    This structure contains the definition for a Contributor Insights rule. For more information about this rule, see Using Constributor Insights to analyze high-cardinality data in the Amazon CloudWatch User Guide.

    " @@ -2159,6 +2171,7 @@ "type":"list", "member":{"shape":"InsightRuleName"} }, + "InsightRuleOnTransformedLogs":{"type":"boolean"}, "InsightRuleOrderBy":{ "type":"string", "max":32, @@ -2274,8 +2287,7 @@ "LastModified":{"type":"timestamp"}, "LimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The operation exceeded one or more limits.

    ", "error":{ "code":"LimitExceededException", @@ -2401,7 +2413,7 @@ }, "Dimensions":{ "shape":"DimensionFilters", - "documentation":"

    The dimensions to filter against. Only the dimensions that match exactly will be returned.

    " + "documentation":"

    The dimensions to filter against. Only the dimension with names that match exactly will be returned. If you specify one dimension name and a metric has that dimension and also other dimensions, it will be returned.

    " }, "NextToken":{ "shape":"NextToken", @@ -2778,7 +2790,7 @@ }, "Period":{ "shape":"Period", - "documentation":"

    The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData operation that includes a StorageResolution of 1 second.

    " + "documentation":"

    The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 20, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData operation that includes a StorageResolution of 1 second.

    " }, "AccountId":{ "shape":"AccountId", @@ -2908,7 +2920,7 @@ }, "Period":{ "shape":"Period", - "documentation":"

    The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second.

    If the StartTime parameter specifies a time stamp that is greater than 3 hours ago, you must specify the period as follows or no data points in that time range is returned:

    • Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 minute).

    • Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes).

    • Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 hour).

    " + "documentation":"

    The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 20, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second.

    If the StartTime parameter specifies a time stamp that is greater than 3 hours ago, you must specify the period as follows or no data points in that time range is returned:

    • Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 minute).

    • Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes).

    • Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 hour).

    " }, "Stat":{ "shape":"Stat", @@ -3159,8 +3171,7 @@ }, "PutAnomalyDetectorOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "PutCompositeAlarmInput":{ "type":"structure", @@ -3175,7 +3186,7 @@ }, "AlarmActions":{ "shape":"ResourceList", - "documentation":"

    The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).

    Valid Values: ]

    Amazon SNS actions:

    arn:aws:sns:region:account-id:sns-topic-name

    Lambda actions:

    • Invoke the latest version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name

    • Invoke a specific version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name:version-number

    • Invoke a function by using an alias Lambda function: arn:aws:lambda:region:account-id:function:function-name:alias-name

    Systems Manager actions:

    arn:aws:ssm:region:account-id:opsitem:severity

    Start a Amazon Q Developer operational investigation

    arn:aws:aiops:region:account-id:investigation-group:ingestigation-group-id

    " + "documentation":"

    The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN).

    Valid Values: ]

    Amazon SNS actions:

    arn:aws:sns:region:account-id:sns-topic-name

    Lambda actions:

    • Invoke the latest version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name

    • Invoke a specific version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name:version-number

    • Invoke a function by using an alias Lambda function: arn:aws:lambda:region:account-id:function:function-name:alias-name

    Systems Manager actions:

    arn:aws:ssm:region:account-id:opsitem:severity

    Start a Amazon Q Developer operational investigation

    arn:aws:aiops:region:account-id:investigation-group:investigation-group-id

    " }, "AlarmDescription":{ "shape":"AlarmDescription", @@ -3263,13 +3274,16 @@ "Tags":{ "shape":"TagList", "documentation":"

    A list of key-value pairs to associate with the Contributor Insights rule. You can associate as many as 50 tags with a rule.

    Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only the resources that have certain tag values.

    To be able to associate tags with a rule, you must have the cloudwatch:TagResource permission in addition to the cloudwatch:PutInsightRule permission.

    If you are using this operation to update an existing Contributor Insights rule, any tags you specify in this parameter are ignored. To change the tags of an existing rule, use TagResource.

    " + }, + "ApplyOnTransformedLogs":{ + "shape":"InsightRuleOnTransformedLogs", + "documentation":"

    Specify true to have this rule evalute log events after they have been transformed by Log transformation. If you specify true, then the log events in log groups that have transformers will be evaluated by Contributor Insights after being transformed. Log groups that don't have transformers will still have their original log events evaluated by Contributor Insights.

    The default is false

    If a log group has a transformer, and transformation fails for some log events, those log events won't be evaluated by Contributor Insights. For information about investigating log transformation failures, see Transformation metrics and errors.

    " } } }, "PutInsightRuleOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "PutManagedInsightRulesInput":{ "type":"structure", @@ -3316,7 +3330,7 @@ }, "AlarmActions":{ "shape":"ResourceList", - "documentation":"

    The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN). Valid values:

    EC2 actions:

    • arn:aws:automate:region:ec2:stop

    • arn:aws:automate:region:ec2:terminate

    • arn:aws:automate:region:ec2:reboot

    • arn:aws:automate:region:ec2:recover

    • arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0

    • arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0

    • arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

    • arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Recover/1.0

    Autoscaling action:

    • arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

    Lambda actions:

    • Invoke the latest version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name

    • Invoke a specific version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name:version-number

    • Invoke a function by using an alias Lambda function: arn:aws:lambda:region:account-id:function:function-name:alias-name

    SNS notification action:

    • arn:aws:sns:region:account-id:sns-topic-name

    SSM integration actions:

    • arn:aws:ssm:region:account-id:opsitem:severity#CATEGORY=category-name

    • arn:aws:ssm-incidents::account-id:responseplan/response-plan-name

    Start a Amazon Q Developer operational investigation

    arn:aws:aiops:region:account-id:investigation-group:ingestigation-group-id

    " + "documentation":"

    The actions to execute when this alarm transitions to the ALARM state from any other state. Each action is specified as an Amazon Resource Name (ARN). Valid values:

    EC2 actions:

    • arn:aws:automate:region:ec2:stop

    • arn:aws:automate:region:ec2:terminate

    • arn:aws:automate:region:ec2:reboot

    • arn:aws:automate:region:ec2:recover

    • arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Stop/1.0

    • arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Terminate/1.0

    • arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Reboot/1.0

    • arn:aws:swf:region:account-id:action/actions/AWS_EC2.InstanceId.Recover/1.0

    Autoscaling action:

    • arn:aws:autoscaling:region:account-id:scalingPolicy:policy-id:autoScalingGroupName/group-friendly-name:policyName/policy-friendly-name

    Lambda actions:

    • Invoke the latest version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name

    • Invoke a specific version of a Lambda function: arn:aws:lambda:region:account-id:function:function-name:version-number

    • Invoke a function by using an alias Lambda function: arn:aws:lambda:region:account-id:function:function-name:alias-name

    SNS notification action:

    • arn:aws:sns:region:account-id:sns-topic-name

    SSM integration actions:

    • arn:aws:ssm:region:account-id:opsitem:severity#CATEGORY=category-name

    • arn:aws:ssm-incidents::account-id:responseplan/response-plan-name

    Start a Amazon Q Developer operational investigation

    arn:aws:aiops:region:account-id:investigation-group:investigation-group-id

    " }, "InsufficientDataActions":{ "shape":"ResourceList", @@ -3344,7 +3358,7 @@ }, "Period":{ "shape":"Period", - "documentation":"

    The length, in seconds, used each time the metric specified in MetricName is evaluated. Valid values are 10, 30, and any multiple of 60.

    Period is required for alarms based on static thresholds. If you are creating an alarm based on a metric math expression, you specify the period for each metric within the objects in the Metrics array.

    Be sure to specify 10 or 30 only for metrics that are stored by a PutMetricData call with a StorageResolution of 1. If you specify a period of 10 or 30 for a metric that does not have sub-minute resolution, the alarm still attempts to gather data at the period rate that you specify. In this case, it does not receive data for the attempts that do not correspond to a one-minute data resolution, and the alarm might often lapse into INSUFFICENT_DATA status. Specifying 10 or 30 also sets this alarm as a high-resolution alarm, which has a higher charge than other alarms. For more information about pricing, see Amazon CloudWatch Pricing.

    An alarm's total current evaluation period can be no longer than one day, so Period multiplied by EvaluationPeriods cannot be more than 86,400 seconds.

    " + "documentation":"

    The length, in seconds, used each time the metric specified in MetricName is evaluated. Valid values are 10, 20, 30, and any multiple of 60.

    Period is required for alarms based on static thresholds. If you are creating an alarm based on a metric math expression, you specify the period for each metric within the objects in the Metrics array.

    Be sure to specify 10, 20, or 30 only for metrics that are stored by a PutMetricData call with a StorageResolution of 1. If you specify a period of 10, 20, or 30 for a metric that does not have sub-minute resolution, the alarm still attempts to gather data at the period rate that you specify. In this case, it does not receive data for the attempts that do not correspond to a one-minute data resolution, and the alarm might often lapse into INSUFFICENT_DATA status. Specifying 10, 20, or 30 also sets this alarm as a high-resolution alarm, which has a higher charge than other alarms. For more information about pricing, see Amazon CloudWatch Pricing.

    An alarm's total current evaluation period can be no longer than seven days, so Period multiplied by EvaluationPeriods can't be more than 604,800 seconds. For alarms with a period of less than one hour (3,600 seconds), the total evaluation period can't be longer than one day (86,400 seconds).

    " }, "Unit":{ "shape":"StandardUnit", @@ -3352,7 +3366,7 @@ }, "EvaluationPeriods":{ "shape":"EvaluationPeriods", - "documentation":"

    The number of periods over which data is compared to the specified threshold. If you are setting an alarm that requires that a number of consecutive data points be breaching to trigger the alarm, this value specifies that number. If you are setting an \"M out of N\" alarm, this value is the N.

    An alarm's total current evaluation period can be no longer than one day, so this number multiplied by Period cannot be more than 86,400 seconds.

    " + "documentation":"

    The number of periods over which data is compared to the specified threshold. If you are setting an alarm that requires that a number of consecutive data points be breaching to trigger the alarm, this value specifies that number. If you are setting an \"M out of N\" alarm, this value is the N.

    " }, "DatapointsToAlarm":{ "shape":"DatapointsToAlarm", @@ -3406,7 +3420,7 @@ }, "StrictEntityValidation":{ "shape":"StrictEntityValidation", - "documentation":"

    Whether to accept valid metric data when an invalid entity is sent.

    • When set to true: Any validation error (for entity or metric data) will fail the entire request, and no data will be ingested. The failed operation will return a 400 result with the error.

    • When set to false: Validation errors in the entity will not associate the metric with the entity, but the metric data will still be accepted and ingested. Validation errors in the metric data will fail the entire request, and no data will be ingested.

      In the case of an invalid entity, the operation will return a 200 status, but an additional response header will contain information about the validation errors. The new header, X-Amzn-Failure-Message is an enumeration of the following values:

      • InvalidEntity - The provided entity is invalid.

      • InvalidKeyAttributes - The provided KeyAttributes of an entity is invalid.

      • InvalidAttributes - The provided Attributes of an entity is invalid.

      • InvalidTypeValue - The provided Type in the KeyAttributes of an entity is invalid.

      • EntitySizeTooLarge - The number of EntityMetricData objects allowed is 2.

      • MissingRequiredFields - There are missing required fields in the KeyAttributes for the provided Type.

      For details of the requirements for specifying an entity, see How to add related information to telemetry in the CloudWatch User Guide.

    This parameter is required when EntityMetricData is included.

    ", + "documentation":"

    Whether to accept valid metric data when an invalid entity is sent.

    • When set to true: Any validation error (for entity or metric data) will fail the entire request, and no data will be ingested. The failed operation will return a 400 result with the error.

    • When set to false: Validation errors in the entity will not associate the metric with the entity, but the metric data will still be accepted and ingested. Validation errors in the metric data will fail the entire request, and no data will be ingested.

      In the case of an invalid entity, the operation will return a 200 status, but an additional response header will contain information about the validation errors. The new header, X-Amzn-Failure-Message is an enumeration of the following values:

      • InvalidEntity - The provided entity is invalid.

      • InvalidKeyAttributes - The provided KeyAttributes of an entity is invalid.

      • InvalidAttributes - The provided Attributes of an entity is invalid.

      • InvalidTypeValue - The provided Type in the KeyAttributes of an entity is invalid.

      • EntitySizeTooLarge - The number of EntityMetricData objects allowed is 2.

      • MissingRequiredFields - There are missing required fields in the KeyAttributes for the provided Type.

      For details of the requirements for specifying an entity, see How to add related information to telemetry in the CloudWatch User Guide.

    This parameter is required when EntityMetricData is included.

    ", "box":true } } @@ -3640,8 +3654,7 @@ }, "StartMetricStreamsOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "Stat":{"type":"string"}, "StateReason":{ @@ -3727,8 +3740,7 @@ }, "StopMetricStreamsOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "StorageResolution":{ "type":"integer", @@ -3786,8 +3798,7 @@ }, "TagResourceOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -3830,8 +3841,7 @@ }, "UntagResourceOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "Values":{ "type":"list", diff --git a/services/cloudwatchevents/pom.xml b/services/cloudwatchevents/pom.xml index e41c4f8c6bfd..99c84a8b2e46 100644 --- a/services/cloudwatchevents/pom.xml +++ b/services/cloudwatchevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudwatchevents AWS Java SDK :: Services :: Amazon CloudWatch Events diff --git a/services/cloudwatchevents/src/main/resources/codegen-resources/customization.config b/services/cloudwatchevents/src/main/resources/codegen-resources/customization.config index 8d5bbd7d43b3..fbfefb027d8e 100644 --- a/services/cloudwatchevents/src/main/resources/codegen-resources/customization.config +++ b/services/cloudwatchevents/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "describeEventBus", "listRules" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cloudwatchlogs/pom.xml b/services/cloudwatchlogs/pom.xml index a303b9b5e4e5..19e06386b192 100644 --- a/services/cloudwatchlogs/pom.xml +++ b/services/cloudwatchlogs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cloudwatchlogs AWS Java SDK :: Services :: Amazon CloudWatch Logs diff --git a/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config b/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config index 23a3fe8bbf4c..beec95efc1d2 100644 --- a/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config +++ b/services/cloudwatchlogs/src/main/resources/codegen-resources/customization.config @@ -14,6 +14,5 @@ "paginationCustomization": { "GetLogEvents": "LastPageHasPreviousToken" }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json b/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json index 504e861a8c99..abb7f50aa1e8 100644 --- a/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json +++ b/services/cloudwatchlogs/src/main/resources/codegen-resources/service-2.json @@ -562,7 +562,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

    Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name.

    CloudWatch Logs doesn't support IAM policies that control access to the DescribeLogGroups action by using the aws:ResourceTag/key-name condition key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name condition key to control access. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags.

    If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

    " + "documentation":"

    Returns information about log groups. You can return all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name.

    CloudWatch Logs doesn't support IAM policies that control access to the DescribeLogGroups action by using the aws:ResourceTag/key-name condition key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name condition key to control access. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags.

    If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

    " }, "DescribeLogStreams":{ "name":"DescribeLogStreams", @@ -680,7 +680,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

    Lists log events from the specified log group. You can list all the log events or filter the results using one or more of the following:

    • A filter pattern

    • A time range

    • The log stream name, or a log stream name prefix that matches mutltiple log streams

    You must have the logs:FilterLogEvents permission to perform this operation.

    You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both.

    FilterLogEvents is a paginated operation. Each page returned can contain up to 1 MB of log events or up to 10,000 log events. A returned page might only be partially full, or even empty. For example, if the result of a query would return 15,000 log events, the first page isn't guaranteed to have 10,000 log events even if they all fit into 1 MB.

    Partially full or empty pages don't necessarily mean that pagination is finished. If the results include a nextToken, there might be more log events available. You can return these additional log events by providing the nextToken in a subsequent FilterLogEvents operation. If the results don't include a nextToken, then pagination is finished.

    If you set startFromHead to true and you don’t include endTime in your request, you can end up in a situation where the pagination doesn't terminate. This can happen when the new log events are being added to the target log streams faster than they are being read. This situation is a good use case for the CloudWatch Logs Live Tail feature.

    The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents request.

    If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

    If you are using log transformation, the FilterLogEvents operation returns only the original versions of log events, before they were transformed. To view the transformed versions, you must use a CloudWatch Logs query.

    " + "documentation":"

    Lists log events from the specified log group. You can list all the log events or filter the results using one or more of the following:

    • A filter pattern

    • A time range

    • The log stream name, or a log stream name prefix that matches mutltiple log streams

    You must have the logs:FilterLogEvents permission to perform this operation.

    You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both.

    FilterLogEvents is a paginated operation. Each page returned can contain up to 1 MB of log events or up to 10,000 log events. A returned page might only be partially full, or even empty. For example, if the result of a query would return 15,000 log events, the first page isn't guaranteed to have 10,000 log events even if they all fit into 1 MB.

    Partially full or empty pages don't necessarily mean that pagination is finished. If the results include a nextToken, there might be more log events available. You can return these additional log events by providing the nextToken in a subsequent FilterLogEvents operation. If the results don't include a nextToken, then pagination is finished.

    Specifying the limit parameter only guarantees that a single page doesn't return more log events than the specified limit, but it might return fewer events than the limit. This is the expected API behavior.

    The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents request.

    If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

    If you are using log transformation, the FilterLogEvents operation returns only the original versions of log events, before they were transformed. To view the transformed versions, you must use a CloudWatch Logs query.

    " }, "GetDataProtectionPolicy":{ "name":"GetDataProtectionPolicy", @@ -919,6 +919,20 @@ ], "documentation":"

    Retrieves a list of the log anomaly detectors in the account.

    " }, + "ListLogGroups":{ + "name":"ListLogGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListLogGroupsRequest"}, + "output":{"shape":"ListLogGroupsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

    Returns a list of log groups in the Region in your account. If you are performing this action in a monitoring account, you can choose to also return log groups from source accounts that are linked to the monitoring account. For more information about using cross-account observability to set up monitoring accounts and source accounts, see CloudWatch cross-account observability.

    You can optionally filter the list by log group class and by using regular expressions in your request to match strings in the log group names.

    This operation is paginated. By default, your first use of this operation returns 50 results, and includes a token to use in a subsequent operation to return more results.

    " + }, "ListLogGroupsForQuery":{ "name":"ListLogGroupsForQuery", "http":{ @@ -1129,7 +1143,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"UnrecognizedClientException"} ], - "documentation":"

    Uploads a batch of log events to the specified log stream.

    The sequence token is now ignored in PutLogEvents actions. PutLogEvents actions are always accepted and never return InvalidSequenceTokenException or DataAlreadyAcceptedException even if the sequence token is not valid. You can use parallel PutLogEvents actions on the same log stream.

    The batch of events must satisfy the following constraints:

    • The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.

    • None of the log events in the batch can be more than 2 hours in the future.

    • None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from earlier than the retention period of the log group.

    • The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.)

    • A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.

    • Each log event can be no larger than 1 MB.

    • The maximum number of log events in a batch is 10,000.

    • The quota of five requests per second per log stream has been removed. Instead, PutLogEvents actions are throttled based on a per-second per-account quota. You can request an increase to the per-second throttling quota by using the Service Quotas service.

    If a call to PutLogEvents returns \"UnrecognizedClientException\" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.

    " + "documentation":"

    Uploads a batch of log events to the specified log stream.

    The sequence token is now ignored in PutLogEvents actions. PutLogEvents actions are always accepted and never return InvalidSequenceTokenException or DataAlreadyAcceptedException even if the sequence token is not valid. You can use parallel PutLogEvents actions on the same log stream.

    The batch of events must satisfy the following constraints:

    • The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.

    • Events more than 2 hours in the future are rejected while processing remaining valid events.

    • Events older than 14 days or preceding the log group's retention period are rejected while processing remaining valid events.

    • The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.)

    • A batch of log events in a single request must be in a chronological order. Otherwise, the operation fails.

    • Each log event can be no larger than 1 MB.

    • The maximum number of log events in a batch is 10,000.

    • For valid events (within 14 days in the past to 2 hours in future), the time span in a single batch cannot exceed 24 hours. Otherwise, the operation fails.

    The quota of five requests per second per log stream has been removed. Instead, PutLogEvents actions are throttled based on a per-second per-account quota. You can request an increase to the per-second throttling quota by using the Service Quotas service.

    If a call to PutLogEvents returns \"UnrecognizedClientException\" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.

    " }, "PutMetricFilter":{ "name":"PutMetricFilter", @@ -2383,7 +2397,7 @@ }, "Delimiter":{ "type":"string", - "max":1, + "max":2, "min":1 }, "Deliveries":{ @@ -2802,12 +2816,18 @@ "max":50, "min":1 }, + "DescribeLogGroupsLogGroupIdentifiers":{ + "type":"list", + "member":{"shape":"LogGroupIdentifier"}, + "max":50, + "min":1 + }, "DescribeLogGroupsRequest":{ "type":"structure", "members":{ "accountIdentifiers":{ "shape":"AccountIds", - "documentation":"

    When includeLinkedAccounts is set to True, use this parameter to specify the list of accounts to search. You can specify as many as 20 account IDs in the array.

    " + "documentation":"

    When includeLinkedAccounts is set to true, use this parameter to specify the list of accounts to search. You can specify as many as 20 account IDs in the array.

    " }, "logGroupNamePrefix":{ "shape":"LogGroupName", @@ -2827,11 +2847,15 @@ }, "includeLinkedAccounts":{ "shape":"IncludeLinkedAccounts", - "documentation":"

    If you are using a monitoring account, set this to True to have the operation return log groups in the accounts listed in accountIdentifiers.

    If this parameter is set to true and accountIdentifiers contains a null value, the operation returns all log groups in the monitoring account and all log groups in all source accounts that are linked to the monitoring account.

    " + "documentation":"

    If you are using a monitoring account, set this to true to have the operation return log groups in the accounts listed in accountIdentifiers.

    If this parameter is set to true and accountIdentifiers contains a null value, the operation returns all log groups in the monitoring account and all log groups in all source accounts that are linked to the monitoring account.

    The default for this parameter is false.

    " }, "logGroupClass":{ "shape":"LogGroupClass", - "documentation":"

    Specifies the log group class for this log group. There are three classes:

    • The Standard log class supports all CloudWatch Logs features.

    • The Infrequent Access log class supports a subset of CloudWatch Logs features and incurs lower costs.

    • Use the Delivery log class only for delivering Lambda logs to store in Amazon S3 or Amazon Data Firehose. Log events in log groups in the Delivery class are kept in CloudWatch Logs for only one day. This log class doesn't offer rich CloudWatch Logs capabilities such as CloudWatch Logs Insights queries.

    For details about the features supported by each class, see Log classes

    " + "documentation":"

    Use this parameter to limit the results to only those log groups in the specified log group class. If you omit this parameter, log groups of all classes can be returned.

    Specifies the log group class for this log group. There are three classes:

    • The Standard log class supports all CloudWatch Logs features.

    • The Infrequent Access log class supports a subset of CloudWatch Logs features and incurs lower costs.

    • Use the Delivery log class only for delivering Lambda logs to store in Amazon S3 or Amazon Data Firehose. Log events in log groups in the Delivery class are kept in CloudWatch Logs for only one day. This log class doesn't offer rich CloudWatch Logs capabilities such as CloudWatch Logs Insights queries.

    For details about the features supported by each class, see Log classes

    " + }, + "logGroupIdentifiers":{ + "shape":"DescribeLogGroupsLogGroupIdentifiers", + "documentation":"

    Use this array to filter the list of log groups returned. If you specify this parameter, the only other filter that you can choose to specify is includeLinkedAccounts.

    If you are using this operation in a monitoring account, you can specify the ARNs of log groups in source accounts and in the monitoring account itself. If you are using this operation in an account that is not a cross-account monitoring account, you can specify only log group names in the same account as the operation.

    " } } }, @@ -2840,7 +2864,7 @@ "members":{ "logGroups":{ "shape":"LogGroups", - "documentation":"

    The log groups.

    If the retentionInDays value is not included for a log group, then that log group's events do not expire.

    " + "documentation":"

    An array of structures, where each structure contains the information about one log group.

    " }, "nextToken":{"shape":"NextToken"} } @@ -3231,6 +3255,16 @@ "min":1 }, "EventNumber":{"type":"long"}, + "EventSource":{ + "type":"string", + "enum":[ + "CloudTrail", + "Route53Resolver", + "VPCFlow", + "EKSAudit", + "AWSWAF" + ] + }, "EventsLimit":{ "type":"integer", "max":10000, @@ -3882,14 +3916,14 @@ }, "match":{ "shape":"GrokMatch", - "documentation":"

    The grok pattern to match against the log event. For a list of supported grok patterns, see Supported grok patterns.

    " + "documentation":"

    The grok pattern to match against the log event. For a list of supported grok patterns, see Supported grok patterns.

    " } }, "documentation":"

    This processor uses pattern matching to parse and structure unstructured data. This processor can also extract fields from log messages.

    For more information about this processor including examples, see grok in the CloudWatch Logs User Guide.

    " }, "GrokMatch":{ "type":"string", - "max":128, + "max":512, "min":1 }, "Histogram":{ @@ -4145,6 +4179,11 @@ } } }, + "ListLimit":{ + "type":"integer", + "max":1000, + "min":1 + }, "ListLogAnomalyDetectorsLimit":{ "type":"integer", "max":50, @@ -4204,6 +4243,42 @@ "nextToken":{"shape":"NextToken"} } }, + "ListLogGroupsRequest":{ + "type":"structure", + "members":{ + "logGroupNamePattern":{ + "shape":"LogGroupNameRegexPattern", + "documentation":"

    Use this parameter to limit the returned log groups to only those with names that match the pattern that you specify. This parameter is a regular expression that can match prefixes and substrings, and supports wildcard matching and matching multiple patterns, as in the following examples.

    • Use ^ to match log group names by prefix.

    • For a substring match, specify the string to match. All matches are case sensitive

    • To match multiple patterns, separate them with a | as in the example ^/aws/lambda|discovery

    You can specify as many as five different regular expression patterns in this field, each of which must be between 3 and 24 characters. You can include the ^ symbol as many as five times, and include the | symbol as many as four times.

    " + }, + "logGroupClass":{ + "shape":"LogGroupClass", + "documentation":"

    Use this parameter to limit the results to only those log groups in the specified log group class. If you omit this parameter, log groups of all classes can be returned.

    " + }, + "includeLinkedAccounts":{ + "shape":"IncludeLinkedAccounts", + "documentation":"

    If you are using a monitoring account, set this to true to have the operation return log groups in the accounts listed in accountIdentifiers.

    If this parameter is set to true and accountIdentifiers contains a null value, the operation returns all log groups in the monitoring account and all log groups in all source accounts that are linked to the monitoring account.

    The default for this parameter is false.

    " + }, + "accountIdentifiers":{ + "shape":"AccountIds", + "documentation":"

    When includeLinkedAccounts is set to true, use this parameter to specify the list of accounts to search. You can specify as many as 20 account IDs in the array.

    " + }, + "nextToken":{"shape":"NextToken"}, + "limit":{ + "shape":"ListLimit", + "documentation":"

    The maximum number of log groups to return. If you omit this parameter, the default is up to 50 log groups.

    " + } + } + }, + "ListLogGroupsResponse":{ + "type":"structure", + "members":{ + "logGroups":{ + "shape":"LogGroupSummaries", + "documentation":"

    An array of structures, where each structure contains the information about one log group.

    " + }, + "nextToken":{"shape":"NextToken"} + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -4278,7 +4353,7 @@ "documentation":"

    If you set flatten to true, use flattenedElement to specify which element, first or last, to keep.

    You must specify this parameter if flatten is true

    " } }, - "documentation":"

    This processor takes a list of objects that contain key fields, and converts them into a map of target keys.

    For more information about this processor including examples, see listToMap in the CloudWatch Logs User Guide.

    " + "documentation":"

    This processor takes a list of objects that contain key fields, and converts them into a map of target keys.

    For more information about this processor including examples, see listToMap in the CloudWatch Logs User Guide.

    " }, "LiveTailSessionLogEvent":{ "type":"structure", @@ -4490,10 +4565,38 @@ "min":0, "pattern":"[\\.\\-_/#A-Za-z0-9]*" }, + "LogGroupNameRegexPattern":{ + "type":"string", + "max":129, + "min":3, + "pattern":"(\\^?[\\.\\-_\\/#A-Za-z0-9]{3,24})(\\|\\^?[\\.\\-_\\/#A-Za-z0-9]{3,24}){0,4}" + }, "LogGroupNames":{ "type":"list", "member":{"shape":"LogGroupName"} }, + "LogGroupSummaries":{ + "type":"list", + "member":{"shape":"LogGroupSummary"} + }, + "LogGroupSummary":{ + "type":"structure", + "members":{ + "logGroupName":{ + "shape":"LogGroupName", + "documentation":"

    The name of the log group.

    " + }, + "logGroupArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the log group.

    " + }, + "logGroupClass":{ + "shape":"LogGroupClass", + "documentation":"

    The log group class for this log group. For details about the features supported by each log group class, see Log classes

    " + } + }, + "documentation":"

    This structure contains information about one log group in your account.

    " + }, "LogGroups":{ "type":"list", "member":{"shape":"LogGroup"} @@ -4766,6 +4869,10 @@ "max":128, "min":1 }, + "OCSFVersion":{ + "type":"string", + "enum":["V1.1"] + }, "OpenSearchApplication":{ "type":"structure", "members":{ @@ -5145,6 +5252,28 @@ }, "documentation":"

    Use this processor to parse Route 53 vended logs, extract fields, and and convert them into a JSON format. This processor always processes the entire log event message. For more information about this processor including examples, see parseRoute53.

    If you use this processor, it must be the first processor in your transformer.

    " }, + "ParseToOCSF":{ + "type":"structure", + "required":[ + "eventSource", + "ocsfVersion" + ], + "members":{ + "source":{ + "shape":"Source", + "documentation":"

    The path to the field in the log event that you want to parse. If you omit this value, the whole log message is parsed.

    " + }, + "eventSource":{ + "shape":"EventSource", + "documentation":"

    Specify the service or process that produces the log events that will be converted with this processor.

    " + }, + "ocsfVersion":{ + "shape":"OCSFVersion", + "documentation":"

    Specify which version of the OCSF schema to use for the transformed log events.

    " + } + }, + "documentation":"

    This processor converts logs into Open Cybersecurity Schema Framework (OCSF) events.

    For more information about this processor including examples, see parseToOSCF in the CloudWatch Logs User Guide.

    " + }, "ParseVPC":{ "type":"structure", "members":{ @@ -5302,6 +5431,10 @@ "shape":"ParseRoute53", "documentation":"

    Use this parameter to include the parseRoute53 processor in your transformer.

    If you use this processor, it must be the first processor in your transformer.

    " }, + "parseToOCSF":{ + "shape":"ParseToOCSF", + "documentation":"

    Use this processor to convert logs into Open Cybersecurity Schema Framework (OCSF) format

    " + }, "parsePostgres":{ "shape":"ParsePostgres", "documentation":"

    Use this parameter to include the parsePostGres processor in your transformer.

    If you use this processor, it must be the first processor in your transformer.

    " @@ -5498,7 +5631,7 @@ }, "logType":{ "shape":"LogType", - "documentation":"

    Defines the type of log that the source is sending.

    • For Amazon Bedrock, the valid value is APPLICATION_LOGS.

    • For CloudFront, the valid value is ACCESS_LOGS.

    • For Amazon CodeWhisperer, the valid value is EVENT_LOGS.

    • For Elemental MediaPackage, the valid values are EGRESS_ACCESS_LOGS and INGRESS_ACCESS_LOGS.

    • For Elemental MediaTailor, the valid values are AD_DECISION_SERVER_LOGS, MANIFEST_SERVICE_LOGS, and TRANSCODE_LOGS.

    • For IAM Identity Center, the valid value is ERROR_LOGS.

    • For Amazon Q, the valid value is EVENT_LOGS.

    • For Amazon SES mail manager, the valid value is APPLICATION_LOG.

    • For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, WORKMAIL_MAILBOX_ACCESS_LOGS, and WORKMAIL_PERSONAL_ACCESS_TOKEN_LOGS.

    " + "documentation":"

    Defines the type of log that the source is sending.

    • For Amazon Bedrock, the valid value is APPLICATION_LOGS.

    • For CloudFront, the valid value is ACCESS_LOGS.

    • For Amazon CodeWhisperer, the valid value is EVENT_LOGS.

    • For Elemental MediaPackage, the valid values are EGRESS_ACCESS_LOGS and INGRESS_ACCESS_LOGS.

    • For Elemental MediaTailor, the valid values are AD_DECISION_SERVER_LOGS, MANIFEST_SERVICE_LOGS, and TRANSCODE_LOGS.

    • For Entity Resolution, the valid value is WORKFLOW_LOGS.

    • For IAM Identity Center, the valid value is ERROR_LOGS.

    • For Amazon Q, the valid value is EVENT_LOGS.

    • For Amazon SES mail manager, the valid values are APPLICATION_LOG and TRAFFIC_POLICY_DEBUG_LOGS.

    • For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, WORKMAIL_MAILBOX_ACCESS_LOGS, and WORKMAIL_PERSONAL_ACCESS_TOKEN_LOGS.

    " }, "tags":{ "shape":"Tags", @@ -5921,7 +6054,7 @@ "QueryId":{ "type":"string", "max":256, - "min":0 + "min":1 }, "QueryInfo":{ "type":"structure", @@ -6315,6 +6448,11 @@ }, "documentation":"

    Use this processor to split a field into an array of strings using a delimiting character.

    For more information about this processor including examples, see splitString in the CloudWatch Logs User Guide.

    " }, + "SplitStringDelimiter":{ + "type":"string", + "max":128, + "min":1 + }, "SplitStringEntries":{ "type":"list", "member":{"shape":"SplitStringEntry"}, @@ -6333,7 +6471,7 @@ "documentation":"

    The key of the field to split.

    " }, "delimiter":{ - "shape":"Delimiter", + "shape":"SplitStringDelimiter", "documentation":"

    The separator characters to split the string entry on.

    " } }, diff --git a/services/codeartifact/pom.xml b/services/codeartifact/pom.xml index b70982c2a178..f322f359e5c8 100644 --- a/services/codeartifact/pom.xml +++ b/services/codeartifact/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codeartifact AWS Java SDK :: Services :: Codeartifact diff --git a/services/codeartifact/src/main/resources/codegen-resources/customization.config b/services/codeartifact/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/codeartifact/src/main/resources/codegen-resources/customization.config +++ b/services/codeartifact/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/codebuild/pom.xml b/services/codebuild/pom.xml index 324c23e3a969..fc63ff4e47c2 100644 --- a/services/codebuild/pom.xml +++ b/services/codebuild/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codebuild AWS Java SDK :: Services :: AWS Code Build diff --git a/services/codebuild/src/main/resources/codegen-resources/customization.config b/services/codebuild/src/main/resources/codegen-resources/customization.config index 01f4d6aa2101..14d6f6ce48c2 100644 --- a/services/codebuild/src/main/resources/codegen-resources/customization.config +++ b/services/codebuild/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ "listProjects", "listSourceCredentials" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/codebuild/src/main/resources/codegen-resources/service-2.json b/services/codebuild/src/main/resources/codegen-resources/service-2.json index 4c877676c505..ea0c7790f233 100644 --- a/services/codebuild/src/main/resources/codegen-resources/service-2.json +++ b/services/codebuild/src/main/resources/codegen-resources/service-2.json @@ -1909,11 +1909,11 @@ }, "computeType":{ "shape":"ComputeType", - "documentation":"

    Information about the compute resources the compute fleet uses. Available values include:

    • ATTRIBUTE_BASED_COMPUTE: Specify the amount of vCPUs, memory, disk space, and the type of machine.

      If you use ATTRIBUTE_BASED_COMPUTE, you must define your attributes by using computeConfiguration. CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment types in the CodeBuild User Guide.

    • BUILD_GENERAL1_SMALL: Use up to 4 GiB memory and 2 vCPUs for builds.

    • BUILD_GENERAL1_MEDIUM: Use up to 8 GiB memory and 4 vCPUs for builds.

    • BUILD_GENERAL1_LARGE: Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type.

    • BUILD_GENERAL1_XLARGE: Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type.

    • BUILD_GENERAL1_2XLARGE: Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.

    • BUILD_LAMBDA_1GB: Use up to 1 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_2GB: Use up to 2 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_4GB: Use up to 4 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_8GB: Use up to 8 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_10GB: Use up to 10 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    If you use BUILD_GENERAL1_SMALL:

    • For environment type LINUX_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs for builds.

    • For environment type LINUX_GPU_CONTAINER, you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.

    • For environment type ARM_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds.

    If you use BUILD_GENERAL1_LARGE:

    • For environment type LINUX_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs for builds.

    • For environment type LINUX_GPU_CONTAINER, you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.

    • For environment type ARM_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds.

    For more information, see On-demand environment types in the CodeBuild User Guide.

    " + "documentation":"

    Information about the compute resources the compute fleet uses. Available values include:

    • ATTRIBUTE_BASED_COMPUTE: Specify the amount of vCPUs, memory, disk space, and the type of machine.

      If you use ATTRIBUTE_BASED_COMPUTE, you must define your attributes by using computeConfiguration. CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment types in the CodeBuild User Guide.

    • CUSTOM_INSTANCE_TYPE: Specify the instance type for your compute fleet. For a list of supported instance types, see Supported instance families in the CodeBuild User Guide.

    • BUILD_GENERAL1_SMALL: Use up to 4 GiB memory and 2 vCPUs for builds.

    • BUILD_GENERAL1_MEDIUM: Use up to 8 GiB memory and 4 vCPUs for builds.

    • BUILD_GENERAL1_LARGE: Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type.

    • BUILD_GENERAL1_XLARGE: Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type.

    • BUILD_GENERAL1_2XLARGE: Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.

    • BUILD_LAMBDA_1GB: Use up to 1 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_2GB: Use up to 2 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_4GB: Use up to 4 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_8GB: Use up to 8 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_10GB: Use up to 10 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    If you use BUILD_GENERAL1_SMALL:

    • For environment type LINUX_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs for builds.

    • For environment type LINUX_GPU_CONTAINER, you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.

    • For environment type ARM_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds.

    If you use BUILD_GENERAL1_LARGE:

    • For environment type LINUX_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs for builds.

    • For environment type LINUX_GPU_CONTAINER, you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.

    • For environment type ARM_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds.

    For more information, see On-demand environment types in the CodeBuild User Guide.

    " }, "computeConfiguration":{ "shape":"ComputeConfiguration", - "documentation":"

    The compute configuration of the compute fleet. This is only required if computeType is set to ATTRIBUTE_BASED_COMPUTE.

    " + "documentation":"

    The compute configuration of the compute fleet. This is only required if computeType is set to ATTRIBUTE_BASED_COMPUTE or CUSTOM_INSTANCE_TYPE.

    " }, "scalingConfiguration":{ "shape":"ScalingConfigurationInput", @@ -2368,6 +2368,39 @@ } } }, + "DockerServer":{ + "type":"structure", + "required":["computeType"], + "members":{ + "computeType":{ + "shape":"ComputeType", + "documentation":"

    Information about the compute resources the docker server uses. Available values include:

    • BUILD_GENERAL1_SMALL: Use up to 4 GiB memory and 2 vCPUs for your docker server.

    • BUILD_GENERAL1_MEDIUM: Use up to 8 GiB memory and 4 vCPUs for your docker server.

    • BUILD_GENERAL1_LARGE: Use up to 16 GiB memory and 8 vCPUs for your docker server.

    • BUILD_GENERAL1_XLARGE: Use up to 64 GiB memory and 32 vCPUs for your docker server.

    • BUILD_GENERAL1_2XLARGE: Use up to 128 GiB memory and 64 vCPUs for your docker server.

    " + }, + "securityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

    A list of one or more security groups IDs.

    Security groups configured for Docker servers should allow ingress network traffic from the VPC configured in the project. They should allow ingress on port 9876.

    " + }, + "status":{ + "shape":"DockerServerStatus", + "documentation":"

    A DockerServerStatus object to use for this docker server.

    " + } + }, + "documentation":"

    Contains docker server information.

    " + }, + "DockerServerStatus":{ + "type":"structure", + "members":{ + "status":{ + "shape":"String", + "documentation":"

    The status of the docker server.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    A message associated with the status of a docker server.

    " + } + }, + "documentation":"

    Contains information about the status of the docker server.

    " + }, "EnvironmentImage":{ "type":"structure", "members":{ @@ -2544,11 +2577,11 @@ }, "computeType":{ "shape":"ComputeType", - "documentation":"

    Information about the compute resources the compute fleet uses. Available values include:

    • ATTRIBUTE_BASED_COMPUTE: Specify the amount of vCPUs, memory, disk space, and the type of machine.

      If you use ATTRIBUTE_BASED_COMPUTE, you must define your attributes by using computeConfiguration. CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment types in the CodeBuild User Guide.

    • BUILD_GENERAL1_SMALL: Use up to 4 GiB memory and 2 vCPUs for builds.

    • BUILD_GENERAL1_MEDIUM: Use up to 8 GiB memory and 4 vCPUs for builds.

    • BUILD_GENERAL1_LARGE: Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type.

    • BUILD_GENERAL1_XLARGE: Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type.

    • BUILD_GENERAL1_2XLARGE: Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.

    • BUILD_LAMBDA_1GB: Use up to 1 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_2GB: Use up to 2 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_4GB: Use up to 4 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_8GB: Use up to 8 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_10GB: Use up to 10 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    If you use BUILD_GENERAL1_SMALL:

    • For environment type LINUX_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs for builds.

    • For environment type LINUX_GPU_CONTAINER, you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.

    • For environment type ARM_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds.

    If you use BUILD_GENERAL1_LARGE:

    • For environment type LINUX_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs for builds.

    • For environment type LINUX_GPU_CONTAINER, you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.

    • For environment type ARM_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds.

    For more information, see On-demand environment types in the CodeBuild User Guide.

    " + "documentation":"

    Information about the compute resources the compute fleet uses. Available values include:

    • ATTRIBUTE_BASED_COMPUTE: Specify the amount of vCPUs, memory, disk space, and the type of machine.

      If you use ATTRIBUTE_BASED_COMPUTE, you must define your attributes by using computeConfiguration. CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment types in the CodeBuild User Guide.

    • CUSTOM_INSTANCE_TYPE: Specify the instance type for your compute fleet. For a list of supported instance types, see Supported instance families in the CodeBuild User Guide.

    • BUILD_GENERAL1_SMALL: Use up to 4 GiB memory and 2 vCPUs for builds.

    • BUILD_GENERAL1_MEDIUM: Use up to 8 GiB memory and 4 vCPUs for builds.

    • BUILD_GENERAL1_LARGE: Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type.

    • BUILD_GENERAL1_XLARGE: Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type.

    • BUILD_GENERAL1_2XLARGE: Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.

    • BUILD_LAMBDA_1GB: Use up to 1 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_2GB: Use up to 2 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_4GB: Use up to 4 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_8GB: Use up to 8 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_10GB: Use up to 10 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    If you use BUILD_GENERAL1_SMALL:

    • For environment type LINUX_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs for builds.

    • For environment type LINUX_GPU_CONTAINER, you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.

    • For environment type ARM_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds.

    If you use BUILD_GENERAL1_LARGE:

    • For environment type LINUX_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs for builds.

    • For environment type LINUX_GPU_CONTAINER, you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.

    • For environment type ARM_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds.

    For more information, see On-demand environment types in the CodeBuild User Guide.

    " }, "computeConfiguration":{ "shape":"ComputeConfiguration", - "documentation":"

    The compute configuration of the compute fleet. This is only required if computeType is set to ATTRIBUTE_BASED_COMPUTE.

    " + "documentation":"

    The compute configuration of the compute fleet. This is only required if computeType is set to ATTRIBUTE_BASED_COMPUTE or CUSTOM_INSTANCE_TYPE.

    " }, "scalingConfiguration":{ "shape":"ScalingConfigurationOutput", @@ -3808,6 +3841,10 @@ "imagePullCredentialsType":{ "shape":"ImagePullCredentialsType", "documentation":"

    The type of credentials CodeBuild uses to pull images in your build. There are two valid values:

    • CODEBUILD specifies that CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust CodeBuild service principal.

    • SERVICE_ROLE specifies that CodeBuild uses your build project's service role.

    When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an CodeBuild curated image, you must use CODEBUILD credentials.

    " + }, + "dockerServer":{ + "shape":"DockerServer", + "documentation":"

    A DockerServer object to use for this build project.

    " } }, "documentation":"

    Information about the build environment of the build project.

    " @@ -5311,11 +5348,11 @@ }, "computeType":{ "shape":"ComputeType", - "documentation":"

    Information about the compute resources the compute fleet uses. Available values include:

    • ATTRIBUTE_BASED_COMPUTE: Specify the amount of vCPUs, memory, disk space, and the type of machine.

      If you use ATTRIBUTE_BASED_COMPUTE, you must define your attributes by using computeConfiguration. CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment types in the CodeBuild User Guide.

    • BUILD_GENERAL1_SMALL: Use up to 4 GiB memory and 2 vCPUs for builds.

    • BUILD_GENERAL1_MEDIUM: Use up to 8 GiB memory and 4 vCPUs for builds.

    • BUILD_GENERAL1_LARGE: Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type.

    • BUILD_GENERAL1_XLARGE: Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type.

    • BUILD_GENERAL1_2XLARGE: Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.

    • BUILD_LAMBDA_1GB: Use up to 1 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_2GB: Use up to 2 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_4GB: Use up to 4 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_8GB: Use up to 8 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_10GB: Use up to 10 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    If you use BUILD_GENERAL1_SMALL:

    • For environment type LINUX_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs for builds.

    • For environment type LINUX_GPU_CONTAINER, you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.

    • For environment type ARM_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds.

    If you use BUILD_GENERAL1_LARGE:

    • For environment type LINUX_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs for builds.

    • For environment type LINUX_GPU_CONTAINER, you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.

    • For environment type ARM_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds.

    For more information, see On-demand environment types in the CodeBuild User Guide.

    " + "documentation":"

    Information about the compute resources the compute fleet uses. Available values include:

    • ATTRIBUTE_BASED_COMPUTE: Specify the amount of vCPUs, memory, disk space, and the type of machine.

      If you use ATTRIBUTE_BASED_COMPUTE, you must define your attributes by using computeConfiguration. CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment types in the CodeBuild User Guide.

    • CUSTOM_INSTANCE_TYPE: Specify the instance type for your compute fleet. For a list of supported instance types, see Supported instance families in the CodeBuild User Guide.

    • BUILD_GENERAL1_SMALL: Use up to 4 GiB memory and 2 vCPUs for builds.

    • BUILD_GENERAL1_MEDIUM: Use up to 8 GiB memory and 4 vCPUs for builds.

    • BUILD_GENERAL1_LARGE: Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type.

    • BUILD_GENERAL1_XLARGE: Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type.

    • BUILD_GENERAL1_2XLARGE: Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.

    • BUILD_LAMBDA_1GB: Use up to 1 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_2GB: Use up to 2 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_4GB: Use up to 4 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_8GB: Use up to 8 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    • BUILD_LAMBDA_10GB: Use up to 10 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER.

    If you use BUILD_GENERAL1_SMALL:

    • For environment type LINUX_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs for builds.

    • For environment type LINUX_GPU_CONTAINER, you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.

    • For environment type ARM_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds.

    If you use BUILD_GENERAL1_LARGE:

    • For environment type LINUX_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs for builds.

    • For environment type LINUX_GPU_CONTAINER, you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.

    • For environment type ARM_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds.

    For more information, see On-demand environment types in the CodeBuild User Guide.

    " }, "computeConfiguration":{ "shape":"ComputeConfiguration", - "documentation":"

    The compute configuration of the compute fleet. This is only required if computeType is set to ATTRIBUTE_BASED_COMPUTE.

    " + "documentation":"

    The compute configuration of the compute fleet. This is only required if computeType is set to ATTRIBUTE_BASED_COMPUTE or CUSTOM_INSTANCE_TYPE.

    " }, "scalingConfiguration":{ "shape":"ScalingConfigurationInput", diff --git a/services/codecatalyst/pom.xml b/services/codecatalyst/pom.xml index 3b14934da2ba..35d670cfff8d 100644 --- a/services/codecatalyst/pom.xml +++ b/services/codecatalyst/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codecatalyst AWS Java SDK :: Services :: Code Catalyst diff --git a/services/codecatalyst/src/main/resources/codegen-resources/customization.config b/services/codecatalyst/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/codecatalyst/src/main/resources/codegen-resources/customization.config +++ b/services/codecatalyst/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/codecommit/pom.xml b/services/codecommit/pom.xml index f0e4691a04e8..92ab50473a62 100644 --- a/services/codecommit/pom.xml +++ b/services/codecommit/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codecommit AWS Java SDK :: Services :: AWS CodeCommit diff --git a/services/codecommit/src/main/resources/codegen-resources/customization.config b/services/codecommit/src/main/resources/codegen-resources/customization.config index 9e1e53b1a4f0..05c43b1144ac 100644 --- a/services/codecommit/src/main/resources/codegen-resources/customization.config +++ b/services/codecommit/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ "excludedSimpleMethods": [ "getBranch" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/codeconnections/pom.xml b/services/codeconnections/pom.xml index c66b3e4b6a70..7c4d9a4425dc 100644 --- a/services/codeconnections/pom.xml +++ b/services/codeconnections/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codeconnections AWS Java SDK :: Services :: Code Connections diff --git a/services/codeconnections/src/main/resources/codegen-resources/customization.config b/services/codeconnections/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/codeconnections/src/main/resources/codegen-resources/customization.config +++ b/services/codeconnections/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/codedeploy/pom.xml b/services/codedeploy/pom.xml index 0e5a091d30f3..4e1e89385ab6 100644 --- a/services/codedeploy/pom.xml +++ b/services/codedeploy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codedeploy AWS Java SDK :: Services :: AWS CodeDeploy diff --git a/services/codedeploy/src/main/resources/codegen-resources/customization.config b/services/codedeploy/src/main/resources/codegen-resources/customization.config index ad677bc71951..5fc15ca47f6f 100644 --- a/services/codedeploy/src/main/resources/codegen-resources/customization.config +++ b/services/codedeploy/src/main/resources/codegen-resources/customization.config @@ -31,6 +31,5 @@ "InstanceStatus", "InstanceSummary" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/codeguruprofiler/pom.xml b/services/codeguruprofiler/pom.xml index b1077a62d65e..33b043e4d69c 100644 --- a/services/codeguruprofiler/pom.xml +++ b/services/codeguruprofiler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codeguruprofiler AWS Java SDK :: Services :: CodeGuruProfiler diff --git a/services/codeguruprofiler/src/main/resources/codegen-resources/customization.config b/services/codeguruprofiler/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/codeguruprofiler/src/main/resources/codegen-resources/customization.config +++ b/services/codeguruprofiler/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/codegurureviewer/pom.xml b/services/codegurureviewer/pom.xml index 03c85f1644cf..f568e18c2323 100644 --- a/services/codegurureviewer/pom.xml +++ b/services/codegurureviewer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codegurureviewer AWS Java SDK :: Services :: CodeGuru Reviewer diff --git a/services/codegurureviewer/src/main/resources/codegen-resources/customization.config b/services/codegurureviewer/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/codegurureviewer/src/main/resources/codegen-resources/customization.config +++ b/services/codegurureviewer/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/codegurusecurity/pom.xml b/services/codegurusecurity/pom.xml index bb550b1ccadd..a0fd4989f499 100644 --- a/services/codegurusecurity/pom.xml +++ b/services/codegurusecurity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codegurusecurity AWS Java SDK :: Services :: Code Guru Security diff --git a/services/codegurusecurity/src/main/resources/codegen-resources/customization.config b/services/codegurusecurity/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/codegurusecurity/src/main/resources/codegen-resources/customization.config +++ b/services/codegurusecurity/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/codepipeline/pom.xml b/services/codepipeline/pom.xml index 3f87651cda0d..bc7516f37998 100644 --- a/services/codepipeline/pom.xml +++ b/services/codepipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codepipeline AWS Java SDK :: Services :: AWS CodePipeline diff --git a/services/codepipeline/src/main/resources/codegen-resources/customization.config b/services/codepipeline/src/main/resources/codegen-resources/customization.config index 58a0ff6b53c7..2c0df19be840 100644 --- a/services/codepipeline/src/main/resources/codegen-resources/customization.config +++ b/services/codepipeline/src/main/resources/codegen-resources/customization.config @@ -8,6 +8,5 @@ "deregisterWebhookWithThirdParty", "registerWebhookWithThirdParty" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/codepipeline/src/main/resources/codegen-resources/paginators-1.json b/services/codepipeline/src/main/resources/codegen-resources/paginators-1.json index 8479855006c7..7c782efb2d21 100644 --- a/services/codepipeline/src/main/resources/codegen-resources/paginators-1.json +++ b/services/codepipeline/src/main/resources/codegen-resources/paginators-1.json @@ -11,6 +11,12 @@ "output_token": "nextToken", "result_key": "actionTypes" }, + "ListDeployActionExecutionTargets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "targets" + }, "ListPipelineExecutions": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/codepipeline/src/main/resources/codegen-resources/service-2.json b/services/codepipeline/src/main/resources/codegen-resources/service-2.json index 239c60d3ef9a..ac7147e3525a 100644 --- a/services/codepipeline/src/main/resources/codegen-resources/service-2.json +++ b/services/codepipeline/src/main/resources/codegen-resources/service-2.json @@ -285,6 +285,22 @@ ], "documentation":"

    Gets a summary of all CodePipeline action types associated with your account.

    " }, + "ListDeployActionExecutionTargets":{ + "name":"ListDeployActionExecutionTargets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDeployActionExecutionTargetsInput"}, + "output":{"shape":"ListDeployActionExecutionTargetsOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"PipelineNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ActionExecutionNotFoundException"} + ], + "documentation":"

    Lists the targets for the deploy action.

    " + }, "ListPipelineExecutions":{ "name":"ListPipelineExecutions", "http":{ @@ -1108,6 +1124,14 @@ }, "documentation":"

    Input information used for an action execution.

    " }, + "ActionExecutionNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "documentation":"

    The action execution was not found.

    ", + "exception":true + }, "ActionExecutionOutput":{ "type":"structure", "members":{ @@ -2095,6 +2119,84 @@ "type":"structure", "members":{} }, + "DeployActionExecutionTarget":{ + "type":"structure", + "members":{ + "targetId":{ + "shape":"String", + "documentation":"

    The ID of the target for the deploy action.

    " + }, + "targetType":{ + "shape":"String", + "documentation":"

    The type of target for the deploy action.

    " + }, + "status":{ + "shape":"String", + "documentation":"

    The status of the deploy action.

    " + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    The start time for the deploy action.

    " + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

    The end time for the deploy action.

    " + }, + "events":{ + "shape":"DeployTargetEventList", + "documentation":"

    The lifecycle events for the deploy action.

    " + } + }, + "documentation":"

    The target for the deploy action.

    " + }, + "DeployActionExecutionTargetList":{ + "type":"list", + "member":{"shape":"DeployActionExecutionTarget"} + }, + "DeployTargetEvent":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The name of the event for the deploy action.

    " + }, + "status":{ + "shape":"String", + "documentation":"

    The status of the event for the deploy action.

    " + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

    The start time for the event for the deploy action.

    " + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

    The end time for the event for the deploy action.

    " + }, + "context":{ + "shape":"DeployTargetEventContext", + "documentation":"

    The context for the event for the deploy action.

    " + } + }, + "documentation":"

    A lifecycle event for the deploy action.

    " + }, + "DeployTargetEventContext":{ + "type":"structure", + "members":{ + "ssmCommandId":{ + "shape":"String", + "documentation":"

    The command ID for the event for the deploy action.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    The context message for the event for the deploy action.

    " + } + }, + "documentation":"

    The context for the event for the deploy action.

    " + }, + "DeployTargetEventList":{ + "type":"list", + "member":{"shape":"DeployTargetEvent"} + }, "DeregisterWebhookWithThirdPartyInput":{ "type":"structure", "members":{ @@ -3089,6 +3191,45 @@ }, "documentation":"

    Represents the output of a ListActionTypes action.

    " }, + "ListDeployActionExecutionTargetsInput":{ + "type":"structure", + "required":["actionExecutionId"], + "members":{ + "pipelineName":{ + "shape":"PipelineName", + "documentation":"

    The name of the pipeline with the deploy action.

    " + }, + "actionExecutionId":{ + "shape":"ActionExecutionId", + "documentation":"

    The execution ID for the deploy action.

    " + }, + "filters":{ + "shape":"TargetFilterList", + "documentation":"

    Filters the targets for a specified deploy action.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    An identifier that was returned from the previous list action types call, which can be used to return the next set of action types in the list.

    " + } + } + }, + "ListDeployActionExecutionTargetsOutput":{ + "type":"structure", + "members":{ + "targets":{ + "shape":"DeployActionExecutionTargetList", + "documentation":"

    The targets for the deploy action.

    " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

    An identifier that was returned from the previous list action types call, which can be used to return the next set of action types in the list.

    " + } + } + }, "ListPipelineExecutionsInput":{ "type":"structure", "required":["pipelineName"], @@ -5309,6 +5450,36 @@ "max":256, "min":0 }, + "TargetFilter":{ + "type":"structure", + "members":{ + "name":{ + "shape":"TargetFilterName", + "documentation":"

    The name on which to filter.

    " + }, + "values":{ + "shape":"TargetFilterValueList", + "documentation":"

    The values on which to filter.

    " + } + }, + "documentation":"

    Filters the list of targets.

    " + }, + "TargetFilterList":{ + "type":"list", + "member":{"shape":"TargetFilter"} + }, + "TargetFilterName":{ + "type":"string", + "enum":["TARGET_STATUS"] + }, + "TargetFilterValue":{ + "type":"string", + "min":1 + }, + "TargetFilterValueList":{ + "type":"list", + "member":{"shape":"TargetFilterValue"} + }, "ThirdPartyJob":{ "type":"structure", "members":{ diff --git a/services/codestarconnections/pom.xml b/services/codestarconnections/pom.xml index cef025b104e4..2d365e18090a 100644 --- a/services/codestarconnections/pom.xml +++ b/services/codestarconnections/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codestarconnections AWS Java SDK :: Services :: CodeStar connections diff --git a/services/codestarconnections/src/main/resources/codegen-resources/customization.config b/services/codestarconnections/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/codestarconnections/src/main/resources/codegen-resources/customization.config +++ b/services/codestarconnections/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/codestarnotifications/pom.xml b/services/codestarnotifications/pom.xml index 1a650442f663..3e825fa19f43 100644 --- a/services/codestarnotifications/pom.xml +++ b/services/codestarnotifications/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT codestarnotifications AWS Java SDK :: Services :: Codestar Notifications diff --git a/services/codestarnotifications/src/main/resources/codegen-resources/customization.config b/services/codestarnotifications/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/codestarnotifications/src/main/resources/codegen-resources/customization.config +++ b/services/codestarnotifications/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cognitoidentity/pom.xml b/services/cognitoidentity/pom.xml index 5feeae769410..10bd0fdf466e 100644 --- a/services/cognitoidentity/pom.xml +++ b/services/cognitoidentity/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cognitoidentity AWS Java SDK :: Services :: Amazon Cognito Identity diff --git a/services/cognitoidentity/src/main/resources/codegen-resources/customization.config b/services/cognitoidentity/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/cognitoidentity/src/main/resources/codegen-resources/customization.config +++ b/services/cognitoidentity/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/cognitoidentityprovider/pom.xml b/services/cognitoidentityprovider/pom.xml index 683213a24ed8..575579bf380a 100644 --- a/services/cognitoidentityprovider/pom.xml +++ b/services/cognitoidentityprovider/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cognitoidentityprovider AWS Java SDK :: Services :: Amazon Cognito Identity Provider Service diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config b/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config index 2f28cd534ac5..da9f806be319 100644 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/customization.config @@ -14,6 +14,5 @@ } ] } - }, - "enableFastUnmarshaller": true + } } diff --git a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json index d81c7daff4c3..af86e6e5748c 100644 --- a/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json +++ b/services/cognitoidentityprovider/src/main/resources/codegen-resources/service-2.json @@ -1055,6 +1055,8 @@ {"shape":"ForbiddenException"}, {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, {"shape":"NotAuthorizedException"}, {"shape":"ResourceNotFoundException"} ], @@ -1750,6 +1752,8 @@ {"shape":"ForbiddenException"}, {"shape":"InternalErrorException"}, {"shape":"InvalidParameterException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, {"shape":"NotAuthorizedException"} ], "documentation":"

    Generates a list of the currently signed-in user's registered passkey, or WebAuthn, credentials.

    Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

    Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

    ", @@ -2457,8 +2461,7 @@ }, "AddCustomAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the response from the server for the request to add custom attributes.

    " }, "AdminAddUserToGroupRequest":{ @@ -2507,8 +2510,7 @@ }, "AdminConfirmSignUpResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the response from the server for the request to confirm registration.

    " }, "AdminCreateUserConfigType":{ @@ -2615,8 +2617,7 @@ }, "AdminDeleteUserAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the response received from the server for a request to delete user attributes.

    " }, "AdminDeleteUserRequest":{ @@ -2656,8 +2657,7 @@ }, "AdminDisableProviderForUserResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "AdminDisableUserRequest":{ "type":"structure", @@ -2679,8 +2679,7 @@ }, "AdminDisableUserResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the response received from the server to disable the user as an administrator.

    " }, "AdminEnableUserRequest":{ @@ -2703,8 +2702,7 @@ }, "AdminEnableUserResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the response from the server for the request to enable a user as an administrator.

    " }, "AdminForgetDeviceRequest":{ @@ -2918,8 +2916,7 @@ }, "AdminLinkProviderForUserResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "AdminListDevicesRequest":{ "type":"structure", @@ -3083,8 +3080,7 @@ }, "AdminResetUserPasswordResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the response from the server to reset a user password as an administrator.

    " }, "AdminRespondToAuthChallengeRequest":{ @@ -3183,8 +3179,7 @@ }, "AdminSetUserMFAPreferenceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "AdminSetUserPasswordRequest":{ "type":"structure", @@ -3214,8 +3209,7 @@ }, "AdminSetUserPasswordResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "AdminSetUserSettingsRequest":{ "type":"structure", @@ -3242,8 +3236,7 @@ }, "AdminSetUserSettingsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the response from the server to set user settings as an administrator.

    " }, "AdminUpdateAuthEventFeedbackRequest":{ @@ -3275,8 +3268,7 @@ }, "AdminUpdateAuthEventFeedbackResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "AdminUpdateDeviceStatusRequest":{ "type":"structure", @@ -3307,8 +3299,7 @@ }, "AdminUpdateDeviceStatusResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The status response to the request to update the device, as an administrator.

    " }, "AdminUpdateUserAttributesRequest":{ @@ -3340,8 +3331,7 @@ }, "AdminUpdateUserAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the response from the server for the request to update user attributes as an administrator.

    " }, "AdminUserGlobalSignOutRequest":{ @@ -3364,8 +3354,7 @@ }, "AdminUserGlobalSignOutResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The global sign-out response, as an administrator.

    " }, "AdvancedSecurityAdditionalFlowsType":{ @@ -3833,8 +3822,7 @@ }, "ChangePasswordResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response from the server to the change password request.

    " }, "ClientIdType":{ @@ -3952,8 +3940,7 @@ }, "CompleteWebAuthnRegistrationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "CompletionMessageType":{ "type":"string", @@ -4093,8 +4080,7 @@ }, "ConfirmForgotPasswordResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response from the server that results from a user's request to retrieve a forgotten password.

    " }, "ConfirmSignUpRequest":{ @@ -4808,8 +4794,7 @@ }, "DeleteUserAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the response from the server to delete user attributes.

    " }, "DeleteUserPoolClientRequest":{ @@ -4849,8 +4834,7 @@ }, "DeleteUserPoolDomainResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteUserPoolRequest":{ "type":"structure", @@ -4893,8 +4877,7 @@ }, "DeleteWebAuthnCredentialResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeletionProtectionType":{ "type":"string", @@ -5238,8 +5221,7 @@ }, "Document":{ "type":"structure", - "members":{ - }, + "members":{}, "document":true }, "DomainDescriptionType":{ @@ -6036,8 +6018,7 @@ }, "GlobalSignOutResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response to the request to sign out all devices.

    " }, "GroupExistsException":{ @@ -7581,8 +7562,7 @@ }, "RevokeTokenResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "RiskConfigurationType":{ "type":"structure", @@ -7894,8 +7874,7 @@ }, "SetUserMFAPreferenceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "SetUserPoolMfaConfigRequest":{ "type":"structure", @@ -7972,8 +7951,7 @@ }, "SetUserSettingsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response from the server for a set user settings request.

    " }, "SignInPolicyType":{ @@ -8277,8 +8255,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValueType":{ "type":"string", @@ -8459,8 +8436,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateAuthEventFeedbackRequest":{ "type":"structure", @@ -8496,8 +8472,7 @@ }, "UpdateAuthEventFeedbackResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateDeviceStatusRequest":{ "type":"structure", @@ -8523,8 +8498,7 @@ }, "UpdateDeviceStatusResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response to the request to update the device status.

    " }, "UpdateGroupRequest":{ @@ -8951,8 +8925,7 @@ }, "UpdateUserPoolResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Represents the response from the server when you make a request to update the user pool.

    " }, "UserAttributeUpdateSettingsType":{ @@ -9722,8 +9695,7 @@ }, "VerifyUserAttributeResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A container representing the response from the server from the request to verify user attributes.

    " }, "WebAuthnAuthenticatorAttachmentType":{"type":"string"}, diff --git a/services/cognitosync/pom.xml b/services/cognitosync/pom.xml index 8987a41d1c67..f7ec8ba50cd5 100644 --- a/services/cognitosync/pom.xml +++ b/services/cognitosync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT cognitosync AWS Java SDK :: Services :: Amazon Cognito Sync diff --git a/services/cognitosync/src/main/resources/codegen-resources/customization.config b/services/cognitosync/src/main/resources/codegen-resources/customization.config index 2bb7c53bc5ac..ac7b0e0410a2 100644 --- a/services/cognitosync/src/main/resources/codegen-resources/customization.config +++ b/services/cognitosync/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listIdentityPoolUsage" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/comprehend/pom.xml b/services/comprehend/pom.xml index 0f9e30d765f3..9bbad68f9dc2 100644 --- a/services/comprehend/pom.xml +++ b/services/comprehend/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 comprehend diff --git a/services/comprehend/src/main/resources/codegen-resources/customization.config b/services/comprehend/src/main/resources/codegen-resources/customization.config index 6fa8fc617c44..eba942663a0a 100644 --- a/services/comprehend/src/main/resources/codegen-resources/customization.config +++ b/services/comprehend/src/main/resources/codegen-resources/customization.config @@ -9,6 +9,5 @@ "listSentimentDetectionJobs", "listTopicsDetectionJobs" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/comprehendmedical/pom.xml b/services/comprehendmedical/pom.xml index b6047338213f..28330ee75e5a 100644 --- a/services/comprehendmedical/pom.xml +++ b/services/comprehendmedical/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT comprehendmedical AWS Java SDK :: Services :: ComprehendMedical diff --git a/services/comprehendmedical/src/main/resources/codegen-resources/customization.config b/services/comprehendmedical/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/comprehendmedical/src/main/resources/codegen-resources/customization.config +++ b/services/comprehendmedical/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/computeoptimizer/pom.xml b/services/computeoptimizer/pom.xml index 7a8f390f61c0..21b3b50b3f2a 100644 --- a/services/computeoptimizer/pom.xml +++ b/services/computeoptimizer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT computeoptimizer AWS Java SDK :: Services :: Compute Optimizer diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/customization.config b/services/computeoptimizer/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/computeoptimizer/src/main/resources/codegen-resources/customization.config +++ b/services/computeoptimizer/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json index 39ec013fc277..debe50c3138a 100644 --- a/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json +++ b/services/computeoptimizer/src/main/resources/codegen-resources/service-2.json @@ -213,7 +213,7 @@ {"shape":"ThrottlingException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Export optimization recommendations for your Amazon Relational Database Service (Amazon RDS).

    Recommendations are exported in a comma-separated values (CSV) file, and its metadata in a JavaScript Object Notation (JSON) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

    You can have only one Amazon RDS export job in progress per Amazon Web Services Region.

    " + "documentation":"

    Export optimization recommendations for your Amazon Aurora and Amazon Relational Database Service (Amazon RDS) databases.

    Recommendations are exported in a comma-separated values (CSV) file, and its metadata in a JavaScript Object Notation (JSON) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide.

    You can have only one Amazon Aurora or RDS export job in progress per Amazon Web Services Region.

    " }, "GetAutoScalingGroupRecommendations":{ "name":"GetAutoScalingGroupRecommendations", @@ -469,7 +469,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns the projected metrics of Amazon RDS recommendations.

    " + "documentation":"

    Returns the projected metrics of Aurora and RDS database recommendations.

    " }, "GetRDSDatabaseRecommendations":{ "name":"GetRDSDatabaseRecommendations", @@ -489,7 +489,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns Amazon RDS recommendations.

    Compute Optimizer generates recommendations for Amazon RDS that meet a specific set of requirements. For more information, see the Supported resources and requirements in the Compute Optimizer User Guide.

    " + "documentation":"

    Returns Amazon Aurora and RDS database recommendations.

    Compute Optimizer generates recommendations for Amazon Aurora and RDS databases that meet a specific set of requirements. For more information, see the Supported resources and requirements in the Compute Optimizer User Guide.

    " }, "GetRecommendationPreferences":{ "name":"GetRecommendationPreferences", @@ -528,7 +528,7 @@ {"shape":"MissingAuthenticationToken"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns the optimization findings for an account.

    It returns the number of:

    • Amazon EC2 instances in an account that are Underprovisioned, Overprovisioned, or Optimized.

    • Auto Scaling groups in an account that are NotOptimized, or Optimized.

    • Amazon EBS volumes in an account that are NotOptimized, or Optimized.

    • Lambda functions in an account that are NotOptimized, or Optimized.

    • Amazon ECS services in an account that are Underprovisioned, Overprovisioned, or Optimized.

    " + "documentation":"

    Returns the optimization findings for an account.

    It returns the number of:

    • Amazon EC2 instances in an account that are Underprovisioned, Overprovisioned, or Optimized.

    • EC2Auto Scaling groups in an account that are NotOptimized, or Optimized.

    • Amazon EBS volumes in an account that are NotOptimized, or Optimized.

    • Lambda functions in an account that are NotOptimized, or Optimized.

    • Amazon ECS services in an account that are Underprovisioned, Overprovisioned, or Optimized.

    • Commercial software licenses in an account that are InsufficientMetrics, NotOptimized or Optimized.

    • Amazon Aurora and Amazon RDS databases in an account that are Underprovisioned, Overprovisioned, Optimized, or NotOptimized.

    " }, "PutRecommendationPreferences":{ "name":"PutRecommendationPreferences", @@ -952,23 +952,23 @@ "members":{ "storageType":{ "shape":"StorageType", - "documentation":"

    The type of RDS storage.

    " + "documentation":"

    The type of DB storage.

    " }, "allocatedStorage":{ "shape":"AllocatedStorage", - "documentation":"

    The size of the RDS storage in gigabytes (GB).

    " + "documentation":"

    The size of the DB storage in gigabytes (GB).

    " }, "iops":{ "shape":"NullableIOPS", - "documentation":"

    The provisioned IOPs of the RDS storage.

    " + "documentation":"

    The provisioned IOPs of the DB storage.

    " }, "maxAllocatedStorage":{ "shape":"NullableMaxAllocatedStorage", - "documentation":"

    The maximum limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the RDS instance.

    " + "documentation":"

    The maximum limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.

    " }, "storageThroughput":{ "shape":"NullableStorageThroughput", - "documentation":"

    The storage throughput of the RDS storage.

    " + "documentation":"

    The storage throughput of the DB storage.

    " } }, "documentation":"

    The configuration of the recommended RDS storage.

    " @@ -996,8 +996,7 @@ }, "DeleteRecommendationPreferencesResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeRecommendationExportJobsRequest":{ "type":"structure", @@ -1877,11 +1876,11 @@ "members":{ "accountIds":{ "shape":"AccountIds", - "documentation":"

    The Amazon Web Services account IDs for the export Amazon RDS recommendations.

    If your account is the management account or the delegated administrator of an organization, use this parameter to specify the member account you want to export recommendations to.

    This parameter can't be specified together with the include member accounts parameter. The parameters are mutually exclusive.

    If this parameter or the include member accounts parameter is omitted, the recommendations for member accounts aren't included in the export.

    You can specify multiple account IDs per request.

    " + "documentation":"

    The Amazon Web Services account IDs for the export Amazon Aurora and RDS database recommendations.

    If your account is the management account or the delegated administrator of an organization, use this parameter to specify the member account you want to export recommendations to.

    This parameter can't be specified together with the include member accounts parameter. The parameters are mutually exclusive.

    If this parameter or the include member accounts parameter is omitted, the recommendations for member accounts aren't included in the export.

    You can specify multiple account IDs per request.

    " }, "filters":{ "shape":"RDSDBRecommendationFilters", - "documentation":"

    An array of objects to specify a filter that exports a more specific set of Amazon RDS recommendations.

    " + "documentation":"

    An array of objects to specify a filter that exports a more specific set of Amazon Aurora and RDS recommendations.

    " }, "fieldsToExport":{ "shape":"ExportableRDSDBFields", @@ -2216,15 +2215,20 @@ "EngineVersion", "Idle", "MultiAZDBInstance", + "ClusterWriter", "CurrentDBInstanceClass", "CurrentStorageConfigurationStorageType", "CurrentStorageConfigurationAllocatedStorage", "CurrentStorageConfigurationMaxAllocatedStorage", "CurrentStorageConfigurationIOPS", "CurrentStorageConfigurationStorageThroughput", + "CurrentStorageEstimatedMonthlyVolumeIOPsCostVariation", "CurrentInstanceOnDemandHourlyPrice", "CurrentStorageOnDemandMonthlyPrice", "LookbackPeriodInDays", + "CurrentStorageEstimatedClusterInstanceOnDemandMonthlyCost", + "CurrentStorageEstimatedClusterStorageOnDemandMonthlyCost", + "CurrentStorageEstimatedClusterStorageIOOnDemandMonthlyCost", "CurrentInstancePerformanceRisk", "UtilizationMetricsCpuMaximum", "UtilizationMetricsMemoryMaximum", @@ -2244,6 +2248,9 @@ "UtilizationMetricsAuroraMemoryNumKillQueryTotalMaximum", "UtilizationMetricsReadIOPSEphemeralStorageMaximum", "UtilizationMetricsWriteIOPSEphemeralStorageMaximum", + "UtilizationMetricsVolumeBytesUsedAverage", + "UtilizationMetricsVolumeReadIOPsAverage", + "UtilizationMetricsVolumeWriteIOPsAverage", "InstanceFinding", "InstanceFindingReasonCodes", "StorageFinding", @@ -2258,6 +2265,7 @@ "StorageRecommendationOptionsIOPS", "StorageRecommendationOptionsStorageThroughput", "StorageRecommendationOptionsRank", + "StorageRecommendationOptionsEstimatedMonthlyVolumeIOPsCostVariation", "InstanceRecommendationOptionsInstanceOnDemandHourlyPrice", "InstanceRecommendationOptionsSavingsOpportunityPercentage", "InstanceRecommendationOptionsEstimatedMonthlySavingsCurrency", @@ -2266,6 +2274,9 @@ "InstanceRecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts", "InstanceRecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts", "StorageRecommendationOptionsOnDemandMonthlyPrice", + "StorageRecommendationOptionsEstimatedClusterInstanceOnDemandMonthlyCost", + "StorageRecommendationOptionsEstimatedClusterStorageOnDemandMonthlyCost", + "StorageRecommendationOptionsEstimatedClusterStorageIOOnDemandMonthlyCost", "StorageRecommendationOptionsSavingsOpportunityPercentage", "StorageRecommendationOptionsEstimatedMonthlySavingsCurrency", "StorageRecommendationOptionsEstimatedMonthlySavingsValue", @@ -2737,8 +2748,7 @@ }, "GetEnrollmentStatusRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetEnrollmentStatusResponse":{ "type":"structure", @@ -2933,7 +2943,7 @@ "members":{ "resourceArn":{ "shape":"ResourceArn", - "documentation":"

    The ARN that identifies the Amazon RDS.

    The following is the format of the ARN:

    arn:aws:rds:{region}:{accountId}:db:{resourceName}

    " + "documentation":"

    The ARN that identifies the Amazon Aurora or RDS database.

    The following is the format of the ARN:

    arn:aws:rds:{region}:{accountId}:db:{resourceName}

    " }, "stat":{ "shape":"MetricStatistic", @@ -2968,23 +2978,23 @@ "members":{ "resourceArns":{ "shape":"ResourceArns", - "documentation":"

    The ARN that identifies the Amazon RDS.

    The following is the format of the ARN:

    arn:aws:rds:{region}:{accountId}:db:{resourceName}

    The following is the format of a DB Cluster ARN:

    arn:aws:rds:{region}:{accountId}:cluster:{resourceName}

    " + "documentation":"

    The ARN that identifies the Amazon Aurora or RDS database.

    The following is the format of the ARN:

    arn:aws:rds:{region}:{accountId}:db:{resourceName}

    The following is the format of a DB Cluster ARN:

    arn:aws:rds:{region}:{accountId}:cluster:{resourceName}

    " }, "nextToken":{ "shape":"NextToken", - "documentation":"

    The token to advance to the next page of Amazon RDS recommendations.

    " + "documentation":"

    The token to advance to the next page of Amazon Aurora and RDS database recommendations.

    " }, "maxResults":{ "shape":"MaxResults", - "documentation":"

    The maximum number of Amazon RDS recommendations to return with a single request.

    To retrieve the remaining results, make another request with the returned nextToken value.

    " + "documentation":"

    The maximum number of Amazon Aurora and RDS database recommendations to return with a single request.

    To retrieve the remaining results, make another request with the returned nextToken value.

    " }, "filters":{ "shape":"RDSDBRecommendationFilters", - "documentation":"

    An array of objects to specify a filter that returns a more specific list of Amazon RDS recommendations.

    " + "documentation":"

    An array of objects to specify a filter that returns a more specific list of Amazon Aurora and RDS database recommendations.

    " }, "accountIds":{ "shape":"AccountIds", - "documentation":"

    Return the Amazon RDS recommendations to the specified Amazon Web Services account IDs.

    If your account is the management account or the delegated administrator of an organization, use this parameter to return the Amazon RDS recommendations to specific member accounts.

    You can only specify one account ID per request.

    " + "documentation":"

    Return the Amazon Aurora and RDS database recommendations to the specified Amazon Web Services account IDs.

    If your account is the management account or the delegated administrator of an organization, use this parameter to return the Amazon Aurora and RDS database recommendations to specific member accounts.

    You can only specify one account ID per request.

    " }, "recommendationPreferences":{"shape":"RecommendationPreferences"} } @@ -2994,11 +3004,11 @@ "members":{ "nextToken":{ "shape":"NextToken", - "documentation":"

    The token to advance to the next page of Amazon RDS recommendations.

    " + "documentation":"

    The token to advance to the next page of Amazon Aurora and RDS database recommendations.

    " }, "rdsDBRecommendations":{ "shape":"RDSDBRecommendations", - "documentation":"

    An array of objects that describe the Amazon RDS recommendations.

    " + "documentation":"

    An array of objects that describe the Amazon Aurora and RDS database recommendations.

    " }, "errors":{ "shape":"GetRecommendationErrors", @@ -4431,8 +4441,7 @@ }, "PutRecommendationPreferencesResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "RDSCurrentInstancePerformanceRisk":{ "type":"string", @@ -4448,27 +4457,27 @@ "members":{ "dbInstanceClass":{ "shape":"DBInstanceClass", - "documentation":"

    Describes the DB instance class recommendation option for your Amazon RDS instance.

    " + "documentation":"

    Describes the DB instance class recommendation option for your Amazon Aurora or RDS database.

    " }, "projectedUtilizationMetrics":{ "shape":"RDSDBProjectedUtilizationMetrics", - "documentation":"

    An array of objects that describe the projected utilization metrics of the RDS instance recommendation option.

    " + "documentation":"

    An array of objects that describe the projected utilization metrics of the DB instance recommendation option.

    " }, "performanceRisk":{ "shape":"PerformanceRisk", - "documentation":"

    The performance risk of the RDS instance recommendation option.

    " + "documentation":"

    The performance risk of the DB instance recommendation option.

    " }, "rank":{ "shape":"Rank", - "documentation":"

    The rank identifier of the RDS instance recommendation option.

    " + "documentation":"

    The rank identifier of the DB instance recommendation option.

    " }, "savingsOpportunity":{"shape":"SavingsOpportunity"}, "savingsOpportunityAfterDiscounts":{ "shape":"RDSInstanceSavingsOpportunityAfterDiscounts", - "documentation":"

    Describes the savings opportunity for Amazon RDS recommendations or for the recommendation option.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " + "documentation":"

    Describes the savings opportunity for Amazon Aurora and RDS database recommendations or for the recommendation option.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " } }, - "documentation":"

    Describes the recommendation options for an Amazon RDS instance.

    " + "documentation":"

    Describes the recommendation options for a DB instance.

    " }, "RDSDBInstanceRecommendationOptions":{ "type":"list", @@ -4494,7 +4503,10 @@ "AuroraMemoryNumKillConnTotal", "AuroraMemoryNumKillQueryTotal", "ReadIOPSEphemeralStorage", - "WriteIOPSEphemeralStorage" + "WriteIOPSEphemeralStorage", + "VolumeReadIOPs", + "VolumeBytesUsed", + "VolumeWriteIOPs" ] }, "RDSDBMetricStatistic":{ @@ -4514,15 +4526,15 @@ "members":{ "resourceArn":{ "shape":"ResourceArn", - "documentation":"

    The ARN of the current Amazon RDS.

    The following is the format of the ARN:

    arn:aws:rds:{region}:{accountId}:db:{resourceName}

    " + "documentation":"

    The ARN of the current Amazon Aurora or RDS database.

    The following is the format of the ARN:

    arn:aws:rds:{region}:{accountId}:db:{resourceName}

    " }, "accountId":{ "shape":"AccountId", - "documentation":"

    The Amazon Web Services account ID of the Amazon RDS.

    " + "documentation":"

    The Amazon Web Services account ID of the Amazon Aurora or RDS database.

    " }, "engine":{ "shape":"Engine", - "documentation":"

    The engine of the RDS instance.

    " + "documentation":"

    The engine of the DB instance.

    " }, "engineVersion":{ "shape":"EngineVersion", @@ -4534,11 +4546,11 @@ }, "currentDBInstanceClass":{ "shape":"CurrentDBInstanceClass", - "documentation":"

    The DB instance class of the current RDS instance.

    " + "documentation":"

    The DB instance class of the current Aurora or RDS DB instance.

    " }, "currentStorageConfiguration":{ "shape":"DBStorageConfiguration", - "documentation":"

    The configuration of the current RDS storage.

    " + "documentation":"

    The configuration of the current DB storage.

    " }, "dbClusterIdentifier":{ "shape":"DBClusterIdentifier", @@ -4546,72 +4558,76 @@ }, "idle":{ "shape":"Idle", - "documentation":"

    This indicates if the RDS instance is idle or not.

    " + "documentation":"

    This indicates if the DB instance is idle or not.

    " }, "instanceFinding":{ "shape":"RDSInstanceFinding", - "documentation":"

    The finding classification of an Amazon RDS instance.

    Findings for Amazon RDS instance include:

    • Underprovisioned — When Compute Optimizer detects that there’s not enough resource specifications, an Amazon RDS is considered under-provisioned.

    • Overprovisioned — When Compute Optimizer detects that there’s excessive resource specifications, an Amazon RDS is considered over-provisioned.

    • Optimized — When the specifications of your Amazon RDS instance meet the performance requirements of your workload, the service is considered optimized.

    " + "documentation":"

    The finding classification of an Amazon Aurora and RDS DB instance.

    For more information about finding classifications, see Finding classifications for Aurora and RDS databases in the Compute Optimizer User Guide.

    " }, "storageFinding":{ "shape":"RDSStorageFinding", - "documentation":"

    The finding classification of Amazon RDS storage.

    Findings for Amazon RDS instance include:

    • Underprovisioned — When Compute Optimizer detects that there’s not enough storage, an Amazon RDS is considered under-provisioned.

    • Overprovisioned — When Compute Optimizer detects that there’s excessive storage, an Amazon RDS is considered over-provisioned.

    • Optimized — When the storage of your Amazon RDS meet the performance requirements of your workload, the service is considered optimized.

    " + "documentation":"

    The finding classification of Amazon RDS DB instance storage.

    For more information about finding classifications, see Finding classifications for Aurora and RDS databases in the Compute Optimizer User Guide.

    " }, "instanceFindingReasonCodes":{ "shape":"RDSInstanceFindingReasonCodes", - "documentation":"

    The reason for the finding classification of an Amazon RDS instance.

    " + "documentation":"

    The reason for the finding classification of a DB instance.

    " }, "currentInstancePerformanceRisk":{ "shape":"RDSCurrentInstancePerformanceRisk", "documentation":"

    The performance risk for the current DB instance.

    " }, + "currentStorageEstimatedMonthlyVolumeIOPsCostVariation":{ + "shape":"RDSEstimatedMonthlyVolumeIOPsCostVariation", + "documentation":"

    The level of variation in monthly I/O costs for the current DB storage configuration.

    " + }, "storageFindingReasonCodes":{ "shape":"RDSStorageFindingReasonCodes", - "documentation":"

    The reason for the finding classification of Amazon RDS storage.

    " + "documentation":"

    The reason for the finding classification of RDS DB instance storage.

    " }, "instanceRecommendationOptions":{ "shape":"RDSDBInstanceRecommendationOptions", - "documentation":"

    An array of objects that describe the recommendation options for the Amazon RDS instance.

    " + "documentation":"

    An array of objects that describe the recommendation options for the RDS DB instance.

    " }, "storageRecommendationOptions":{ "shape":"RDSDBStorageRecommendationOptions", - "documentation":"

    An array of objects that describe the recommendation options for Amazon RDS storage.

    " + "documentation":"

    An array of objects that describe the recommendation options for DB instance storage.

    " }, "utilizationMetrics":{ "shape":"RDSDBUtilizationMetrics", - "documentation":"

    An array of objects that describe the utilization metrics of the Amazon RDS.

    " + "documentation":"

    An array of objects that describe the utilization metrics of the DB instance.

    " }, "effectiveRecommendationPreferences":{ "shape":"RDSEffectiveRecommendationPreferences", - "documentation":"

    Describes the effective recommendation preferences for Amazon RDS.

    " + "documentation":"

    Describes the effective recommendation preferences for DB instances.

    " }, "lookbackPeriodInDays":{ "shape":"LookBackPeriodInDays", - "documentation":"

    The number of days the Amazon RDS utilization metrics were analyzed.

    " + "documentation":"

    The number of days the DB instance utilization metrics were analyzed.

    " }, "lastRefreshTimestamp":{ "shape":"LastRefreshTimestamp", - "documentation":"

    The timestamp of when the Amazon RDS recommendation was last generated.

    " + "documentation":"

    The timestamp of when the DB instance recommendation was last generated.

    " }, "tags":{ "shape":"Tags", - "documentation":"

    A list of tags assigned to your Amazon RDS recommendations.

    " + "documentation":"

    A list of tags assigned to your DB instance recommendations.

    " } }, - "documentation":"

    Describes an Amazon RDS recommendation.

    " + "documentation":"

    Describes an Amazon Aurora and RDS database recommendation.

    " }, "RDSDBRecommendationFilter":{ "type":"structure", "members":{ "name":{ "shape":"RDSDBRecommendationFilterName", - "documentation":"

    The name of the filter.

    Specify Finding to return recommendations with a specific finding classification.

    You can filter your Amazon RDS recommendations by tag:key and tag-key tags.

    A tag:key is a key and value combination of a tag assigned to your Amazon RDS recommendations. Use the tag key in the filter name and the tag value as the filter value. For example, to find all Amazon RDS service recommendations that have a tag with the key of Owner and the value of TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    A tag-key is the key of a tag assigned to your Amazon RDS recommendations. Use this filter to find all of your Amazon RDS recommendations that have a tag with a specific key. This doesn’t consider the tag value. For example, you can find your Amazon RDS service recommendations with a tag key value of Owner or without any tag keys assigned.

    " + "documentation":"

    The name of the filter.

    Specify Finding to return recommendations with a specific finding classification.

    You can filter your DB instance recommendations by tag:key and tag-key tags.

    A tag:key is a key and value combination of a tag assigned to your DB instance recommendations. Use the tag key in the filter name and the tag value as the filter value. For example, to find all DB instance recommendations that have a tag with the key of Owner and the value of TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    A tag-key is the key of a tag assigned to your DB instance recommendations. Use this filter to find all of your DB instance recommendations that have a tag with a specific key. This doesn’t consider the tag value. For example, you can find your DB instance recommendations with a tag key value of Owner or without any tag keys assigned.

    " }, "values":{ "shape":"FilterValues", "documentation":"

    The value of the filter.

    " } }, - "documentation":"

    Describes a filter that returns a more specific list of Amazon RDS recommendations. Use this filter with the GetECSServiceRecommendations action.

    " + "documentation":"

    Describes a filter that returns a more specific list of DB instance recommendations. Use this filter with the GetECSServiceRecommendations action.

    " }, "RDSDBRecommendationFilterName":{ "type":"string", @@ -4640,15 +4656,19 @@ }, "rank":{ "shape":"Rank", - "documentation":"

    The rank identifier of the RDS storage recommendation option.

    " + "documentation":"

    The rank identifier of the DB storage recommendation option.

    " }, "savingsOpportunity":{"shape":"SavingsOpportunity"}, "savingsOpportunityAfterDiscounts":{ "shape":"RDSStorageSavingsOpportunityAfterDiscounts", - "documentation":"

    Describes the savings opportunity for Amazon RDS storage recommendations or for the recommendation option.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " + "documentation":"

    Describes the savings opportunity for DB storage recommendations or for the recommendation option.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " + }, + "estimatedMonthlyVolumeIOPsCostVariation":{ + "shape":"RDSEstimatedMonthlyVolumeIOPsCostVariation", + "documentation":"

    The projected level of variation in monthly I/O costs for the DB storage recommendation option.

    " } }, - "documentation":"

    Describes the recommendation options for Amazon RDS storage.

    " + "documentation":"

    Describes the recommendation options for DB storage.

    " }, "RDSDBStorageRecommendationOptions":{ "type":"list", @@ -4670,7 +4690,7 @@ "documentation":"

    The value of the utilization metric.

    " } }, - "documentation":"

    Describes the utilization metric of an Amazon RDS.

    To determine the performance difference between your current Amazon RDS and the recommended option, compare the utilization metric data of your service against its projected utilization metric data.

    " + "documentation":"

    Describes the utilization metric of an Amazon Aurora and RDS database.

    To determine the performance difference between your current DB instance and the recommended option, compare the utilization metric data of your service against its projected utilization metric data.

    " }, "RDSDBUtilizationMetrics":{ "type":"list", @@ -4692,7 +4712,7 @@ "documentation":"

    The values for the projected metric.

    " } }, - "documentation":"

    Describes the projected metrics of an Amazon RDS recommendation option.

    To determine the performance difference between your current Amazon RDS and the recommended option, compare the metric data of your service against its projected metric data.

    " + "documentation":"

    Describes the projected metrics of an Amazon Aurora and RDS database recommendation option.

    To determine the performance difference between your current Amazon Aurora and RDS database and the recommended option, compare the metric data of your service against its projected metric data.

    " }, "RDSDatabaseProjectedMetrics":{ "type":"list", @@ -4703,18 +4723,18 @@ "members":{ "recommendedDBInstanceClass":{ "shape":"RecommendedDBInstanceClass", - "documentation":"

    The recommended DB instance class for the Amazon RDS.

    " + "documentation":"

    The recommended DB instance class for the Amazon Aurora or RDS database.

    " }, "rank":{ "shape":"Rank", - "documentation":"

    The rank identifier of the RDS instance recommendation option.

    " + "documentation":"

    The rank identifier of the Amazon Aurora or RDS DB instance recommendation option.

    " }, "projectedMetrics":{ "shape":"RDSDatabaseProjectedMetrics", "documentation":"

    An array of objects that describe the projected metric.

    " } }, - "documentation":"

    Describes the projected metrics of an Amazon RDS recommendation option.

    To determine the performance difference between your current Amazon RDS and the recommended option, compare the metric data of your service against its projected metric data.

    " + "documentation":"

    Describes the projected metrics of an Amazon Aurora and RDS database recommendation option.

    To determine the performance difference between your current Amazon Aurora and RDS database and the recommended option, compare the metric data of your service against its projected metric data.

    " }, "RDSDatabaseRecommendedOptionProjectedMetrics":{ "type":"list", @@ -4725,7 +4745,7 @@ "members":{ "cpuVendorArchitectures":{ "shape":"CpuVendorArchitectures", - "documentation":"

    Describes the CPU vendor and architecture for Amazon RDS recommendations.

    " + "documentation":"

    Describes the CPU vendor and architecture for DB instance recommendations.

    " }, "enhancedInfrastructureMetrics":{ "shape":"EnhancedInfrastructureMetrics", @@ -4733,14 +4753,23 @@ }, "lookBackPeriod":{ "shape":"LookBackPeriodPreference", - "documentation":"

    The number of days the utilization metrics of the Amazon RDS are analyzed.

    " + "documentation":"

    The number of days the utilization metrics of the DB instance are analyzed.

    " }, "savingsEstimationMode":{ "shape":"RDSSavingsEstimationMode", - "documentation":"

    Describes the savings estimation mode preference applied for calculating savings opportunity for Amazon RDS.

    " + "documentation":"

    Describes the savings estimation mode preference applied for calculating savings opportunity for DB instances.

    " } }, - "documentation":"

    Describes the effective recommendation preferences for Amazon RDS.

    " + "documentation":"

    Describes the effective recommendation preferences for Amazon Aurora and RDS databases.

    " + }, + "RDSEstimatedMonthlyVolumeIOPsCostVariation":{ + "type":"string", + "enum":[ + "None", + "Low", + "Medium", + "High" + ] }, "RDSInstanceEstimatedMonthlySavings":{ "type":"structure", @@ -4751,10 +4780,10 @@ }, "value":{ "shape":"Value", - "documentation":"

    The value of the estimated monthly savings for Amazon RDS instances.

    " + "documentation":"

    The value of the estimated monthly savings for DB instances.

    " } }, - "documentation":"

    Describes the estimated monthly savings possible for Amazon RDS instances by adopting Compute Optimizer recommendations. This is based on Amazon RDS pricing after applying Savings Plans discounts.

    " + "documentation":"

    Describes the estimated monthly savings possible for DB instances by adopting Compute Optimizer recommendations. This is based on DB instance pricing after applying Savings Plans discounts.

    " }, "RDSInstanceFinding":{ "type":"string", @@ -4792,24 +4821,24 @@ "members":{ "savingsOpportunityPercentage":{ "shape":"SavingsOpportunityPercentage", - "documentation":"

    The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s Amazon RDS instance recommendations. This includes any applicable Savings Plans discounts.

    " + "documentation":"

    The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s DB instance recommendations. This includes any applicable Savings Plans discounts.

    " }, "estimatedMonthlySavings":{ "shape":"RDSInstanceEstimatedMonthlySavings", - "documentation":"

    The estimated monthly savings possible by adopting Compute Optimizer’s Amazon RDS instance recommendations. This includes any applicable Savings Plans discounts.

    " + "documentation":"

    The estimated monthly savings possible by adopting Compute Optimizer’s DB instance recommendations. This includes any applicable Savings Plans discounts.

    " } }, - "documentation":"

    Describes the savings opportunity for Amazon RDS instance recommendations after applying Savings Plans discounts.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " + "documentation":"

    Describes the savings opportunity for DB instance recommendations after applying Savings Plans discounts.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " }, "RDSSavingsEstimationMode":{ "type":"structure", "members":{ "source":{ "shape":"RDSSavingsEstimationModeSource", - "documentation":"

    Describes the source for calculating the savings opportunity for Amazon RDS.

    " + "documentation":"

    Describes the source for calculating the savings opportunity for DB instances.

    " } }, - "documentation":"

    Describes the savings estimation mode used for calculating savings opportunity for Amazon RDS.

    " + "documentation":"

    Describes the savings estimation mode used for calculating savings opportunity for DB instances.

    " }, "RDSSavingsEstimationModeSource":{ "type":"string", @@ -4828,17 +4857,18 @@ }, "value":{ "shape":"Value", - "documentation":"

    The value of the estimated monthly savings for Amazon RDS storage.

    " + "documentation":"

    The value of the estimated monthly savings for DB instance storage.

    " } }, - "documentation":"

    Describes the estimated monthly savings possible for Amazon RDS storage by adopting Compute Optimizer recommendations. This is based on Amazon RDS pricing after applying Savings Plans discounts.

    " + "documentation":"

    Describes the estimated monthly savings possible for DB instance storage by adopting Compute Optimizer recommendations. This is based on DB instance pricing after applying Savings Plans discounts.

    " }, "RDSStorageFinding":{ "type":"string", "enum":[ "Optimized", "Underprovisioned", - "Overprovisioned" + "Overprovisioned", + "NotOptimized" ] }, "RDSStorageFindingReasonCode":{ @@ -4848,7 +4878,9 @@ "EBSVolumeThroughputUnderprovisioned", "EBSVolumeIOPSOverprovisioned", "EBSVolumeThroughputOverprovisioned", - "NewGenerationStorageTypeAvailable" + "NewGenerationStorageTypeAvailable", + "DBClusterStorageOptionAvailable", + "DBClusterStorageSavingsAvailable" ] }, "RDSStorageFindingReasonCodes":{ @@ -4860,11 +4892,11 @@ "members":{ "savingsOpportunityPercentage":{ "shape":"SavingsOpportunityPercentage", - "documentation":"

    The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s Amazon RDS storage recommendations. This includes any applicable Savings Plans discounts.

    " + "documentation":"

    The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s DB instance storage recommendations. This includes any applicable Savings Plans discounts.

    " }, "estimatedMonthlySavings":{ "shape":"RDSStorageEstimatedMonthlySavings", - "documentation":"

    The estimated monthly savings possible by adopting Compute Optimizer’s Amazon RDS storage recommendations. This includes any applicable Savings Plans discounts.

    " + "documentation":"

    The estimated monthly savings possible by adopting Compute Optimizer’s DB instance storage recommendations. This includes any applicable Savings Plans discounts.

    " } }, "documentation":"

    Describes the savings opportunity for Amazon RDS storage recommendations after applying Savings Plans discounts.

    Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation.

    " @@ -5026,7 +5058,8 @@ "EcsService", "License", "RdsDBInstance", - "RdsDBInstanceStorage" + "RdsDBInstanceStorage", + "AuroraDBClusterStorage" ] }, "RecommendationSources":{ @@ -5123,6 +5156,7 @@ "EcsService", "License", "RdsDBInstance", + "AuroraDBClusterStorage", "Idle" ] }, diff --git a/services/config/pom.xml b/services/config/pom.xml index 94f44fed538b..9c72e904465a 100644 --- a/services/config/pom.xml +++ b/services/config/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT config AWS Java SDK :: Services :: AWS Config diff --git a/services/config/src/main/resources/codegen-resources/customization.config b/services/config/src/main/resources/codegen-resources/customization.config index dc7929b1369b..f126840772d3 100644 --- a/services/config/src/main/resources/codegen-resources/customization.config +++ b/services/config/src/main/resources/codegen-resources/customization.config @@ -19,6 +19,5 @@ "excludedSimpleMethods": [ "startConfigRulesEvaluation" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/config/src/main/resources/codegen-resources/service-2.json b/services/config/src/main/resources/codegen-resources/service-2.json index 02cf3e9e119d..68960fac84df 100644 --- a/services/config/src/main/resources/codegen-resources/service-2.json +++ b/services/config/src/main/resources/codegen-resources/service-2.json @@ -2755,8 +2755,7 @@ "ConfigurationStateId":{"type":"string"}, "ConflictException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    For PutServiceLinkedConfigurationRecorder, you cannot create a service-linked recorder because a service-linked recorder already exists for the specified service.

    For DeleteServiceLinkedConfigurationRecorder, you cannot delete the service-linked recorder because it is currently in use by the linked Amazon Web Services service.

    For DeleteDeliveryChannel, you cannot delete the specified delivery channel because the customer managed configuration recorder is running. Use the StopConfigurationRecorder operation to stop the customer managed configuration recorder.

    For AssociateResourceTypes and DisassociateResourceTypes, one of the following errors:

    • For service-linked configuration recorders, the configuration recorder is not in use by the service. No association or dissociation of resource types is permitted.

    • For service-linked configuration recorders, your requested change to the configuration recorder has been denied by its linked Amazon Web Services service.

    ", "exception":true }, @@ -3112,8 +3111,7 @@ }, "ConformancePackTemplateValidationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have specified a template that is not valid or supported.

    ", "exception":true }, @@ -3240,8 +3238,7 @@ }, "DeleteEvaluationResultsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The output when you delete the evaluation results for the specified Config rule.

    " }, "DeleteOrganizationConfigRuleRequest":{ @@ -3297,8 +3294,7 @@ }, "DeleteRemediationConfigurationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteRemediationExceptionsRequest":{ "type":"structure", @@ -3392,8 +3388,7 @@ }, "DeleteStoredQueryResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeliverConfigSnapshotRequest":{ "type":"structure", @@ -5307,114 +5302,98 @@ "IncludeGlobalResourceTypes":{"type":"boolean"}, "InsufficientDeliveryPolicyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Your Amazon S3 bucket policy does not allow Config to write to it.

    ", "exception":true }, "InsufficientPermissionsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Indicates one of the following errors:

    • For PutConfigRule, the rule cannot be created because the IAM role assigned to Config lacks permissions to perform the config:Put* action.

    • For PutConfigRule, the Lambda function cannot be invoked. Check the function ARN, and check the function's permissions.

    • For PutOrganizationConfigRule, organization Config rule cannot be created because you do not have permissions to call IAM GetRole action or create a service-linked role.

    • For PutConformancePack and PutOrganizationConformancePack, a conformance pack cannot be created because you do not have the following permissions:

      • You do not have permission to call IAM GetRole action or create a service-linked role.

      • You do not have permission to read Amazon S3 bucket or call SSM:GetDocument.

    • For PutServiceLinkedConfigurationRecorder, a service-linked configuration recorder cannot be created because you do not have the following permissions: IAM CreateServiceLinkedRole.

    ", "exception":true }, "Integer":{"type":"integer"}, "InvalidConfigurationRecorderNameException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have provided a name for the customer managed configuration recorder that is not valid.

    ", "exception":true }, "InvalidDeliveryChannelNameException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified delivery channel name is not valid.

    ", "exception":true }, "InvalidExpressionException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The syntax of the query is incorrect.

    ", "exception":true }, "InvalidLimitException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified limit is outside the allowable range.

    ", "exception":true }, "InvalidNextTokenException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified next token is not valid. Specify the nextToken string that was returned in the previous response to get the next page of results.

    ", "exception":true }, "InvalidParameterValueException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    One or more of the specified parameters are not valid. Verify that your parameters are valid and try again.

    ", "exception":true }, "InvalidRecordingGroupException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    One of the following errors:

    • You have provided a combination of parameter values that is not valid. For example:

    • Every parameter is either null, false, or empty.

    • You have reached the limit of the number of resource types you can provide for the recording group.

    • You have provided resource types or a recording strategy that are not valid.

    ", "exception":true }, "InvalidResultTokenException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified ResultToken is not valid.

    ", "exception":true }, "InvalidRoleException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have provided a null or empty Amazon Resource Name (ARN) for the IAM role assumed by Config and used by the customer managed configuration recorder.

    ", "exception":true }, "InvalidS3KeyPrefixException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified Amazon S3 key prefix is not valid.

    ", "exception":true }, "InvalidS3KmsKeyArnException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified Amazon KMS Key ARN is not valid.

    ", "exception":true }, "InvalidSNSTopicARNException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified Amazon SNS topic does not exist.

    ", "exception":true }, "InvalidTimeRangeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified time range is not valid. The earlier time is not chronologically before the later time.

    ", "exception":true }, "LastDeliveryChannelDeleteFailedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You cannot delete the delivery channel you specified because the customer managed configuration recorder is running.

    ", "exception":true }, @@ -5427,8 +5406,7 @@ }, "LimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    For PutServiceLinkedConfigurationRecorder API, this exception is thrown if the number of service-linked roles in the account exceeds the limit.

    For StartConfigRulesEvaluation API, this exception is thrown if an evaluation is in progress or if you call the StartConfigRulesEvaluation API more than once per minute.

    For PutConfigurationAggregator API, this exception is thrown if the number of accounts and aggregators exceeds the limit.

    ", "exception":true }, @@ -5687,57 +5665,49 @@ "Long":{"type":"long"}, "MaxActiveResourcesExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have reached the limit of active custom resource types in your account. There is a limit of 100,000. Delete unused resources using DeleteResourceConfig .

    ", "exception":true }, "MaxNumberOfConfigRulesExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Failed to add the Config rule because the account already contains the maximum number of 1000 rules. Consider deleting any deactivated rules before you add new rules.

    ", "exception":true }, "MaxNumberOfConfigurationRecordersExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have reached the limit of the number of configuration recorders you can create.

    ", "exception":true }, "MaxNumberOfConformancePacksExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have reached the limit of the number of conformance packs you can create in an account. For more information, see Service Limits in the Config Developer Guide.

    ", "exception":true }, "MaxNumberOfDeliveryChannelsExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have reached the limit of the number of delivery channels you can create.

    ", "exception":true }, "MaxNumberOfOrganizationConfigRulesExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have reached the limit of the number of organization Config rules you can create. For more information, see see Service Limits in the Config Developer Guide.

    ", "exception":true }, "MaxNumberOfOrganizationConformancePacksExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have reached the limit of the number of organization conformance packs you can create in an account. For more information, see Service Limits in the Config Developer Guide.

    ", "exception":true }, "MaxNumberOfRetentionConfigurationsExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Failed to add the retention configuration because a retention configuration with that name already exists.

    ", "exception":true }, @@ -5818,121 +5788,104 @@ "NextToken":{"type":"string"}, "NoAvailableConfigurationRecorderException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    There are no customer managed configuration recorders available to record your resources. Use the PutConfigurationRecorder operation to create the customer managed configuration recorder.

    ", "exception":true }, "NoAvailableDeliveryChannelException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    There is no delivery channel available to record configurations.

    ", "exception":true }, "NoAvailableOrganizationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Organization is no longer available.

    ", "exception":true }, "NoRunningConfigurationRecorderException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    There is no configuration recorder running.

    ", "exception":true }, "NoSuchBucketException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified Amazon S3 bucket does not exist.

    ", "exception":true }, "NoSuchConfigRuleException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The Config rule in the request is not valid. Verify that the rule is an Config Process Check rule, that the rule name is correct, and that valid Amazon Resouce Names (ARNs) are used before trying again.

    ", "exception":true }, "NoSuchConfigRuleInConformancePackException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Config rule that you passed in the filter does not exist.

    ", "exception":true }, "NoSuchConfigurationAggregatorException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have specified a configuration aggregator that does not exist.

    ", "exception":true }, "NoSuchConfigurationRecorderException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have specified a configuration recorder that does not exist.

    ", "exception":true }, "NoSuchConformancePackException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You specified one or more conformance packs that do not exist.

    ", "exception":true }, "NoSuchDeliveryChannelException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have specified a delivery channel that does not exist.

    ", "exception":true }, "NoSuchOrganizationConfigRuleException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The Config rule in the request is not valid. Verify that the rule is an organization Config Process Check rule, that the rule name is correct, and that valid Amazon Resouce Names (ARNs) are used before trying again.

    ", "exception":true }, "NoSuchOrganizationConformancePackException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Config organization conformance pack that you passed in the filter does not exist.

    For DeleteOrganizationConformancePack, you tried to delete an organization conformance pack that does not exist.

    ", "exception":true }, "NoSuchRemediationConfigurationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You specified an Config rule without a remediation configuration.

    ", "exception":true }, "NoSuchRemediationExceptionException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You tried to delete a remediation exception that does not exist.

    ", "exception":true }, "NoSuchRetentionConfigurationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have specified a retention configuration that does not exist.

    ", "exception":true }, "OrderingTimestamp":{"type":"timestamp"}, "OrganizationAccessDeniedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    For PutConfigurationAggregator API, you can see this exception for the following reasons:

    • No permission to call EnableAWSServiceAccess API

    • The configuration aggregator cannot be updated because your Amazon Web Services Organization management account or the delegated administrator role changed. Delete this aggregator and create a new one with the current Amazon Web Services Organization.

    • The configuration aggregator is associated with a previous Amazon Web Services Organization and Config cannot aggregate data with current Amazon Web Services Organization. Delete this aggregator and create a new one with the current Amazon Web Services Organization.

    • You are not a registered delegated administrator for Config with permissions to call ListDelegatedAdministrators API. Ensure that the management account registers delagated administrator for Config service principal name before the delegated administrator creates an aggregator.

    For all OrganizationConfigRule and OrganizationConformancePack APIs, Config throws an exception if APIs are called from member accounts. All APIs must be called from organization management account.

    ", "exception":true }, @@ -5957,8 +5910,7 @@ }, "OrganizationAllFeaturesNotEnabledException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Config resource cannot be created because your organization does not have all features enabled.

    ", "exception":true }, @@ -6203,8 +6155,7 @@ }, "OrganizationConformancePackTemplateValidationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have specified a template that is not valid or supported.

    ", "exception":true }, @@ -6455,8 +6406,7 @@ }, "OversizedConfigurationItemException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The configuration item size is outside the allowable range.

    ", "exception":true }, @@ -6714,8 +6664,7 @@ }, "PutExternalEvaluationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "PutOrganizationConfigRuleRequest":{ "type":"structure", @@ -7347,8 +7296,7 @@ }, "RemediationInProgressException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    Remediation action is in progress. You can either cancel execution in Amazon Web Services Systems Manager or wait and try again later.

    ", "exception":true }, @@ -7585,8 +7533,7 @@ }, "ResourceInUseException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You see this exception in the following cases:

    • For DeleteConfigRule, Config is deleting this rule. Try your request again later.

    • For DeleteConfigRule, the rule is deleting your evaluation results. Try your request again later.

    • For DeleteConfigRule, a remediation action is associated with the rule and Config cannot delete this rule. Delete the remediation action associated with the rule before deleting the rule and try your request again later.

    • For PutConfigOrganizationRule, organization Config rule deletion is in progress. Try your request again later.

    • For DeleteOrganizationConfigRule, organization Config rule creation is in progress. Try your request again later.

    • For PutConformancePack and PutOrganizationConformancePack, a conformance pack creation, update, and deletion is in progress. Try your request again later.

    • For DeleteConformancePack, a conformance pack creation, update, and deletion is in progress. Try your request again later.

    ", "exception":true }, @@ -7617,15 +7564,13 @@ "ResourceName":{"type":"string"}, "ResourceNotDiscoveredException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have specified a resource that is either unknown or has not been discovered.

    ", "exception":true }, "ResourceNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have specified a resource that does not exist.

    ", "exception":true }, @@ -8038,7 +7983,42 @@ "AWS::RDS::OptionGroup", "AWS::Redshift::EndpointAccess", "AWS::Route53Resolver::FirewallRuleGroup", - "AWS::SSM::Document" + "AWS::SSM::Document", + "AWS::AppConfig::ExtensionAssociation", + "AWS::AppIntegrations::Application", + "AWS::AppSync::ApiCache", + "AWS::Bedrock::Guardrail", + "AWS::Bedrock::KnowledgeBase", + "AWS::Cognito::IdentityPool", + "AWS::Connect::Rule", + "AWS::Connect::User", + "AWS::EC2::ClientVpnTargetNetworkAssociation", + "AWS::EC2::EIPAssociation", + "AWS::EC2::IPAMResourceDiscovery", + "AWS::EC2::IPAMResourceDiscoveryAssociation", + "AWS::EC2::InstanceConnectEndpoint", + "AWS::EC2::SnapshotBlockPublicAccess", + "AWS::EC2::VPCBlockPublicAccessExclusion", + "AWS::EC2::VPCBlockPublicAccessOptions", + "AWS::EC2::VPCEndpointConnectionNotification", + "AWS::EC2::VPNConnectionRoute", + "AWS::Evidently::Segment", + "AWS::IAM::OIDCProvider", + "AWS::InspectorV2::Activation", + "AWS::MSK::ClusterPolicy", + "AWS::MSK::VpcConnection", + "AWS::MediaConnect::Gateway", + "AWS::MemoryDB::SubnetGroup", + "AWS::OpenSearchServerless::Collection", + "AWS::OpenSearchServerless::VpcEndpoint", + "AWS::Redshift::EndpointAuthorization", + "AWS::Route53Profiles::Profile", + "AWS::S3::StorageLensGroup", + "AWS::S3Express::BucketPolicy", + "AWS::S3Express::DirectoryBucket", + "AWS::SageMaker::InferenceExperiment", + "AWS::SecurityHub::Standard", + "AWS::Transfer::Profile" ] }, "ResourceTypeList":{ @@ -8354,8 +8334,7 @@ }, "StartConfigRulesEvaluationResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The output when you start the evaluation for the specified Config rule.

    " }, "StartConfigurationRecorderRequest":{ @@ -8690,15 +8669,13 @@ }, "TooManyTagsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You have reached the limit of the number of tags you can use. For more information, see Service Limits in the Config Developer Guide.

    ", "exception":true }, "UnmodifiableEntityException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The requested operation is not valid.

    For PutConfigurationRecorder, you will see this exception because you cannot use this operation to create a service-linked configuration recorder. Use the PutServiceLinkedConfigurationRecorder operation to create a service-linked configuration recorder.

    For DeleteConfigurationRecorder, you will see this exception because you cannot use this operation to delete a service-linked configuration recorder. Use the DeleteServiceLinkedConfigurationRecorder operation to delete a service-linked configuration recorder.

    For StartConfigurationRecorder and StopConfigurationRecorder, you will see this exception because these operations do not affect service-linked configuration recorders. Service-linked configuration recorders are always recording. To stop recording, you must delete the service-linked configuration recorder. Use the DeleteServiceLinkedConfigurationRecorder operation to delete a service-linked configuration recorder.

    ", "exception":true }, @@ -8725,8 +8702,7 @@ }, "ValidationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The requested operation is not valid. You will see this exception if there are missing required fields or if the input value fails the validation.

    For PutStoredQuery, one of the following errors:

    • There are missing required fields.

    • The input value fails the validation.

    • You are trying to create more than 300 queries.

    For DescribeConfigurationRecorders and DescribeConfigurationRecorderStatus, one of the following errors:

    • You have specified more than one configuration recorder.

    • You have provided a service principal for service-linked configuration recorder that is not valid.

    For AssociateResourceTypes and DisassociateResourceTypes, one of the following errors:

    • Your configuraiton recorder has a recording strategy that does not allow the association or disassociation of resource types.

    • One or more of the specified resource types are already associated or disassociated with the configuration recorder.

    • For service-linked configuration recorders, the configuration recorder does not record one or more of the specified resource types.

    ", "exception":true }, diff --git a/services/connect/pom.xml b/services/connect/pom.xml index 28025e5987af..8bd2265e73f8 100644 --- a/services/connect/pom.xml +++ b/services/connect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT connect AWS Java SDK :: Services :: Connect diff --git a/services/connect/src/main/resources/codegen-resources/customization.config b/services/connect/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/connect/src/main/resources/codegen-resources/customization.config +++ b/services/connect/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/connect/src/main/resources/codegen-resources/service-2.json b/services/connect/src/main/resources/codegen-resources/service-2.json index 7dc7ff34ca6c..2e33e442286e 100644 --- a/services/connect/src/main/resources/codegen-resources/service-2.json +++ b/services/connect/src/main/resources/codegen-resources/service-2.json @@ -438,7 +438,7 @@ {"shape":"ConflictException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Only the EMAIL and VOICE channels are supported. The supported initiation methods for EMAIL are: OUTBOUND, AGENT_REPLY, and FLOW. For VOICE the supported initiation methods are TRANSFER and the subtype connect:ExternalAudio.

    Creates a new EMAIL or VOICE contact.

    " + "documentation":"

    Only the VOICE, EMAIL, and TASK channels are supported.

    • For VOICE: The supported initiation method is TRANSFER. The contacts created with this initiation method have a subtype connect:ExternalAudio.

    • For EMAIL: The supported initiation methods are OUTBOUND, AGENT_REPLY, and FLOW.

    • For TASK: The supported initiation method is API. Contacts created with this API have a sub-type of connect:ExternalTask.

    Creates a new VOICE, EMAIL, or TASK contact.

    After a contact is created, you can move it to the desired state by using the InitiateAs parameter. While you can use API to create task contacts that are in the COMPLETED state, you must contact Amazon Web Services Support before using it for bulk import use cases. Bulk import causes your requests to be throttled or fail if your CreateContact limits aren't high enough.

    " }, "CreateContactFlow":{ "name":"CreateContactFlow", @@ -1748,7 +1748,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Describes the specified routing profile.

    " + "documentation":"

    Describes the specified routing profile.

    DescribeRoutingProfile does not populate AssociatedQueueIds in its response. The example Response Syntax shown on this page is incorrect; we are working to update it. SearchRoutingProfiles does include AssociatedQueueIds.

    " }, "DescribeRule":{ "name":"DescribeRule", @@ -3533,7 +3533,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Searches routing profiles in an Amazon Connect instance, with optional filtering.

    " + "documentation":"

    Searches routing profiles in an Amazon Connect instance, with optional filtering.

    SearchRoutingProfiles does not populate LastModifiedRegion, LastModifiedTime, MediaConcurrencies.CrossChannelBehavior, and AgentAvailabilityTimer in its response, but DescribeRoutingProfile does.

    " }, "SearchSecurityProfiles":{ "name":"SearchSecurityProfiles", @@ -6674,6 +6674,44 @@ "member":{"shape":"Channel"}, "max":4 }, + "ChatContactMetrics":{ + "type":"structure", + "members":{ + "MultiParty":{ + "shape":"NullableBoolean", + "documentation":"

    A boolean flag indicating whether multiparty chat or supervisor barge were enabled on this contact.

    " + }, + "TotalMessages":{ + "shape":"Count", + "documentation":"

    The number of chat messages on the contact.

    " + }, + "TotalBotMessages":{ + "shape":"Count", + "documentation":"

    The total number of bot and automated messages on a chat contact.

    " + }, + "TotalBotMessageLengthInChars":{ + "shape":"Count", + "documentation":"

    The total number of characters from bot and automated messages on a chat contact.

    " + }, + "ConversationCloseTimeInMillis":{ + "shape":"DurationMillis", + "documentation":"

    The time it took for a contact to end after the last customer message.

    " + }, + "ConversationTurnCount":{ + "shape":"Count", + "documentation":"

    The number of conversation turns in a chat contact, which represents the back-and-forth exchanges between customer and other participants.

    " + }, + "AgentFirstResponseTimestamp":{ + "shape":"timestamp", + "documentation":"

    The agent first response timestamp for a chat contact.

    " + }, + "AgentFirstResponseTimeInMillis":{ + "shape":"DurationMillis", + "documentation":"

    The time for an agent to respond after obtaining a chat contact.

    " + } + }, + "documentation":"

    Information about the overall participant interactions at the contact level.

    " + }, "ChatContent":{ "type":"string", "max":16384, @@ -6734,6 +6772,24 @@ }, "documentation":"

    A chat message.

    " }, + "ChatMetrics":{ + "type":"structure", + "members":{ + "ChatContactMetrics":{ + "shape":"ChatContactMetrics", + "documentation":"

    Information about the overall participant interactions at the contact level.

    " + }, + "AgentMetrics":{ + "shape":"ParticipantMetrics", + "documentation":"

    Information about agent interactions in a contact.

    " + }, + "CustomerMetrics":{ + "shape":"ParticipantMetrics", + "documentation":"

    Information about customer interactions in a contact.

    " + } + }, + "documentation":"

    Information about how agent, bot, and customer interact in a chat contact.

    " + }, "ChatParticipantRoleConfig":{ "type":"structure", "required":["ParticipantTimerConfigList"], @@ -7120,6 +7176,10 @@ "shape":"QualityMetrics", "documentation":"

    Information about the quality of the participant's media connection.

    " }, + "ChatMetrics":{ + "shape":"ChatMetrics", + "documentation":"

    Information about how agent, bot, and customer interact in a chat contact.

    " + }, "DisconnectDetails":{ "shape":"DisconnectDetails", "documentation":"

    Information about the call disconnect experience.

    " @@ -7893,6 +7953,7 @@ }, "documentation":"

    An object that can be used to specify Tag conditions or Hierarchy Group conditions inside the SearchFilter.

    This accepts an OR of AND (List of List) input where:

    • The top level list specifies conditions that need to be applied with OR operator

    • The inner list specifies conditions that need to be applied with AND operator.

    Only one field can be populated. Maximum number of allowed Tag conditions is 25. Maximum number of allowed Hierarchy Group conditions is 20.

    " }, + "Count":{"type":"integer"}, "CreateAgentStatusRequest":{ "type":"structure", "required":[ @@ -8150,15 +8211,15 @@ }, "References":{ "shape":"ContactReferences", - "documentation":"

    A formatted URL that is shown to an agent in the Contact Control Panel (CCP). Tasks can have the following reference types at the time of creation: URL | NUMBER | STRING | DATE | EMAIL | ATTACHMENT.

    " + "documentation":"

    A formatted URL that is shown to an agent in the Contact Control Panel (CCP). Tasks can have the following reference types at the time of creation: URL | NUMBER | STRING | DATE | EMAIL | ATTACHMENT.

    " }, "Channel":{ "shape":"Channel", - "documentation":"

    The channel for the contact

    CreateContact only supports the EMAIL and VOICE channels. The following information that states other channels are supported is incorrect. We are working to update this topic.

    " + "documentation":"

    The channel for the contact.

    The CHAT channel is not supported. The following information is incorrect. We're working to correct it.

    " }, "InitiationMethod":{ "shape":"ContactInitiationMethod", - "documentation":"

    Indicates how the contact was initiated.

    CreateContact only supports the following initiation methods:

    • For EMAIL: OUTBOUND, AGENT_REPLY, and FLOW.

    • For VOICE: TRANSFER and the subtype connect:ExternalAudio.

    The following information that states other initiation methods are supported is incorrect. We are working to update this topic.

    " + "documentation":"

    Indicates how the contact was initiated.

    CreateContact only supports the following initiation methods. Valid values by channel are:

    • For VOICE: TRANSFER and the subtype connect:ExternalAudio

    • For EMAIL: OUTBOUND | AGENT_REPLY | FLOW

    • For TASK: API

    The other channels listed below are incorrect. We're working to correct this information.

    " }, "ExpiryDurationInMinutes":{ "shape":"ExpiryDurationInMinutes", @@ -8166,11 +8227,11 @@ }, "UserInfo":{ "shape":"UserInfo", - "documentation":"

    User details for the contact

    " + "documentation":"

    User details for the contact

    UserInfo is required when creating an EMAIL contact with OUTBOUND and AGENT_REPLY contact initiation methods.

    " }, "InitiateAs":{ "shape":"InitiateAs", - "documentation":"

    Initial state of the contact when it's created

    " + "documentation":"

    Initial state of the contact when it's created. Only TASK channel contacts can be initiated with COMPLETED state.

    " }, "Name":{ "shape":"Name", @@ -8222,7 +8283,7 @@ }, "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

    The email address with the instance, in [^\\s@]+@[^\\s@]+\\.[^\\s@]+ format.

    " + "documentation":"

    The email address, including the domain.

    " }, "DisplayName":{ "shape":"EmailAddressDisplayName", @@ -10642,7 +10703,7 @@ }, "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

    The email address with the instance, in [^\\s@]+@[^\\s@]+\\.[^\\s@]+ format.

    " + "documentation":"

    The email address, including the domain.

    " }, "DisplayName":{ "shape":"EmailAddressDisplayName", @@ -11835,6 +11896,10 @@ "min":0 }, "DurationInSeconds":{"type":"integer"}, + "DurationMillis":{ + "type":"long", + "min":0 + }, "EffectiveHoursOfOperationList":{ "type":"list", "member":{"shape":"EffectiveHoursOfOperations"} @@ -11886,14 +11951,14 @@ "members":{ "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

    The email address with the instance, in [^\\s@]+@[^\\s@]+\\.[^\\s@]+ format.

    " + "documentation":"

    The email address, including the domain.

    " }, "DisplayName":{ "shape":"EmailAddressDisplayName", "documentation":"

    The display name of email address.

    " } }, - "documentation":"

    Contains information about a source or destination email address

    " + "documentation":"

    Contains information about a source or destination email address.

    " }, "EmailAddressList":{ "type":"list", @@ -11912,7 +11977,7 @@ }, "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

    The email address with the instance, in [^\\s@]+@[^\\s@]+\\.[^\\s@]+ format.

    " + "documentation":"

    The email address, including the domain.

    " }, "Description":{ "shape":"Description", @@ -11928,7 +11993,7 @@ "EmailAddressRecipientList":{ "type":"list", "member":{"shape":"EmailAddressInfo"}, - "max":10, + "max":50, "min":1 }, "EmailAddressSearchConditionList":{ @@ -13751,15 +13816,15 @@ }, "Filters":{ "shape":"FiltersV2List", - "documentation":"

    The filters to apply to returned metrics. You can filter on the following resources:

    • Agents

    • Campaigns

    • Channels

    • Feature

    • Queues

    • Routing profiles

    • Routing step expression

    • User hierarchy groups

    At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups.

    For metrics for outbound campaigns analytics, you can also use campaigns to satisfy at least one filter requirement.

    To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator Guide.

    Note the following limits:

    • Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | BOT_ID | BOT_ALIAS | BOT_VERSION | BOT_LOCALE | BOT_INTENT_NAME | CAMPAIGN | CAMPAIGN_DELIVERY_EVENT_TYPE |CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | EVALUATION_FORM | EVALUATION_SECTION | EVALUATION_QUESTION | EVALUATION_SOURCE | FEATURE | FLOW_ACTION_ID | FLOW_TYPE | FLOWS_MODULE_RESOURCE_ID | FLOWS_NEXT_RESOURCE_ID | FLOWS_NEXT_RESOURCE_QUEUE_ID | FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | FORM_VERSION | INITIATION_METHOD | INVOKING_RESOURCE_PUBLISHED_TIMESTAMP | INVOKING_RESOURCE_TYPE | PARENT_FLOWS_RESOURCE_ID | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | Q_CONNECT_ENABLED |

    • Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters.

      contact_lens_conversational_analytics is a valid filterValue for the FEATURE filter key. It is available only to contacts analyzed by Contact Lens conversational analytics.

      connect:Chat, connect:SMS, connect:Telephony, and connect:WebRTC are valid filterValue examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter key.

      ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length. This filter is case and order sensitive. JSON string fields must be sorted in ascending order and JSON array order should be kept as is.

      Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the Q_CONNECT_ENABLED filter key.

      • TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow.

      • FALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow

      This filter is available only for contact record-driven metrics.

      Campaign ARNs are valid filterValues for the CAMPAIGN filter key.

    " + "documentation":"

    The filters to apply to returned metrics. You can filter on the following resources:

    • Agents

    • Campaigns

    • Channels

    • Feature

    • Queues

    • Routing profiles

    • Routing step expression

    • User hierarchy groups

    At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups.

    For metrics for outbound campaigns analytics, you can also use campaigns to satisfy at least one filter requirement.

    To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator Guide.

    Note the following limits:

    • Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | BOT_ID | BOT_ALIAS | BOT_VERSION | BOT_LOCALE | BOT_INTENT_NAME | CAMPAIGN | CAMPAIGN_DELIVERY_EVENT_TYPE | CAMPAIGN_EXCLUDED_EVENT_TYPE | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | EVALUATION_FORM | EVALUATION_SECTION | EVALUATION_QUESTION | EVALUATION_SOURCE | FEATURE | FLOW_ACTION_ID | FLOW_TYPE | FLOWS_MODULE_RESOURCE_ID | FLOWS_NEXT_RESOURCE_ID | FLOWS_NEXT_RESOURCE_QUEUE_ID | FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | FORM_VERSION | INITIATION_METHOD | INVOKING_RESOURCE_PUBLISHED_TIMESTAMP | INVOKING_RESOURCE_TYPE | PARENT_FLOWS_RESOURCE_ID | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | Q_CONNECT_ENABLED |

    • Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters.

      contact_lens_conversational_analytics is a valid filterValue for the FEATURE filter key. It is available only to contacts analyzed by Contact Lens conversational analytics.

      connect:Chat, connect:SMS, connect:Telephony, and connect:WebRTC are valid filterValue examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter key.

      ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length. This filter is case and order sensitive. JSON string fields must be sorted in ascending order and JSON array order should be kept as is.

      Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the Q_CONNECT_ENABLED filter key.

      • TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow.

      • FALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow

      This filter is available only for contact record-driven metrics.

      Campaign ARNs are valid filterValues for the CAMPAIGN filter key.

    " }, "Groupings":{ "shape":"GroupingsV2", - "documentation":"

    The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues.

    If no grouping is specified, a summary of all metrics is returned.

    Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | BOT_ID | BOT_ALIAS | BOT_VERSION | BOT_LOCALE | BOT_INTENT_NAME | CAMPAIGN | CAMPAIGN_DELIVERY_EVENT_TYPE | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | EVALUATION_FORM | EVALUATION_SECTION | EVALUATION_QUESTION | EVALUATION_SOURCE | FLOWS_RESOURCE_ID | FLOWS_MODULE_RESOURCE_ID | FLOW_ACTION_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE | FORM_VERSION | INITIATION_METHOD | INVOKING_RESOURCE_PUBLISHED_TIMESTAMP | INVOKING_RESOURCE_TYPE | PARENT_FLOWS_RESOURCE_ID | Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION

    Type: Array of strings

    Array Members: Maximum number of 4 items

    Required: No

    " + "documentation":"

    The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues.

    If no grouping is specified, a summary of all metrics is returned.

    Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | BOT_ID | BOT_ALIAS | BOT_VERSION | BOT_LOCALE | BOT_INTENT_NAME | CAMPAIGN | CAMPAIGN_DELIVERY_EVENT_TYPE | CAMPAIGN_EXCLUDED_EVENT_TYPE | CAMPAIGN_EXECUTION_TIMESTAMP | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | EVALUATION_FORM | EVALUATION_SECTION | EVALUATION_QUESTION | EVALUATION_SOURCE | FLOWS_RESOURCE_ID | FLOWS_MODULE_RESOURCE_ID | FLOW_ACTION_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE | FORM_VERSION | INITIATION_METHOD | INVOKING_RESOURCE_PUBLISHED_TIMESTAMP | INVOKING_RESOURCE_TYPE | PARENT_FLOWS_RESOURCE_ID | Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION

    Type: Array of strings

    Array Members: Maximum number of 4 items

    Required: No

    " }, "Metrics":{ "shape":"MetricsV2", - "documentation":"

    The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Metrics definition in the Amazon Connect Administrator Guide.

    ABANDONMENT_RATE

    Unit: Percent

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Abandonment rate

    AGENT_ADHERENT_TIME

    This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Adherent time

    AGENT_ANSWER_RATE

    Unit: Percent

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Agent answer rate

    AGENT_NON_ADHERENT_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Non-adherent time

    AGENT_NON_RESPONSE

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Agent non-response

    AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

    UI name: Agent non-response without customer abandons

    AGENT_OCCUPANCY

    Unit: Percentage

    Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

    UI name: Occupancy

    AGENT_SCHEDULE_ADHERENCE

    This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

    Unit: Percent

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Adherence

    AGENT_SCHEDULED_TIME

    This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Scheduled time

    AVG_ABANDON_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average queue abandon time

    AVG_ACTIVE_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Average active time

    AVG_AFTER_CONTACT_WORK_TIME

    Unit: Seconds

    Valid metric filter key: INITIATION_METHOD

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average after contact work time

    Feature is a valid filter but not a valid grouping.

    AVG_AGENT_CONNECTING_TIME

    Unit: Seconds

    Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Average agent API connecting time

    The Negate key in metric-level filters is not applicable for this metric.

    AVG_AGENT_PAUSE_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Average agent pause time

    AVG_BOT_CONVERSATION_TIME

    Unit: Seconds

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID

    UI name: Average bot conversation time

    AVG_BOT_CONVERSATION_TURNS

    Unit: Count

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID

    UI name: Average bot conversation turns

    AVG_CASE_RELATED_CONTACTS

    Unit: Count

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Average contacts per case

    AVG_CASE_RESOLUTION_TIME

    Unit: Seconds

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Average case resolution time

    AVG_CONTACT_DURATION

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average contact duration

    Feature is a valid filter but not a valid grouping.

    AVG_CONVERSATION_DURATION

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average conversation duration

    AVG_DIALS_PER_MINUTE

    This metric is available only for outbound campaigns that use the agent assisted voice and automated voice delivery modes.

    Unit: Count

    Valid groupings and filters: Agent, Campaign, Queue, Routing Profile

    UI name: Average dials per minute

    AVG_EVALUATION_SCORE

    Unit: Percent

    Valid groupings and filters: Agent, Agent Hierarchy, Channel, Evaluation Form ID, Evaluation Section ID, Evaluation Question ID, Evaluation Source, Form Version, Queue, Routing Profile

    UI name: Average evaluation score

    AVG_FLOW_TIME

    Unit: Seconds

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

    UI name: Average flow time

    AVG_GREETING_TIME_AGENT

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average agent greeting time

    AVG_HANDLE_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression

    UI name: Average handle time

    Feature is a valid filter but not a valid grouping.

    AVG_HOLD_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average customer hold time

    Feature is a valid filter but not a valid grouping.

    AVG_HOLD_TIME_ALL_CONTACTS

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average customer hold time all contacts

    AVG_HOLDS

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average holds

    Feature is a valid filter but not a valid grouping.

    AVG_INTERACTION_AND_HOLD_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average agent interaction and customer hold time

    AVG_INTERACTION_TIME

    Unit: Seconds

    Valid metric filter key: INITIATION_METHOD

    Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average agent interaction time

    Feature is a valid filter but not a valid grouping.

    AVG_INTERRUPTIONS_AGENT

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average agent interruptions

    AVG_INTERRUPTION_TIME_AGENT

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average agent interruption time

    AVG_NON_TALK_TIME

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average non-talk time

    AVG_QUEUE_ANSWER_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average queue answer time

    Feature is a valid filter but not a valid grouping.

    AVG_RESOLUTION_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average resolution time

    AVG_TALK_TIME

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average talk time

    AVG_TALK_TIME_AGENT

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average agent talk time

    AVG_TALK_TIME_CUSTOMER

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average customer talk time

    AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION

    This metric is available only for outbound campaigns that use the agent assisted voice and automated voice delivery modes.

    Unit: Seconds

    Valid groupings and filters: Campaign

    UI name: Average wait time after customer connection

    AVG_WEIGHTED_EVALUATION_SCORE

    Unit: Percent

    Valid groupings and filters: Agent, Agent Hierarchy, Channel, Evaluation Form Id, Evaluation Section ID, Evaluation Question ID, Evaluation Source, Form Version, Queue, Routing Profile

    UI name: Average weighted evaluation score

    BOT_CONVERSATIONS_COMPLETED

    Unit: Count

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID

    UI name: Bot conversations completed

    BOT_INTENTS_COMPLETED

    Unit: Count

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Bot intent name, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID

    UI name: Bot intents completed

    CAMPAIGN_CONTACTS_ABANDONED_AFTER_X

    This metric is available only for outbound campaigns using the agent assisted voice and automated voice delivery modes.

    Unit: Count

    Valid groupings and filters: Agent, Campaign

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than).

    UI name: Campaign contacts abandoned after X

    CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE

    This metric is available only for outbound campaigns using the agent assisted voice and automated voice delivery modes.

    Unit: Percent

    Valid groupings and filters: Agent, Campaign

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than).

    UI name: Campaign contacts abandoned after X rate

    CAMPAIGN_INTERACTIONS

    This metric is available only for outbound campaigns using the email delivery mode.

    Unit: Count

    Valid metric filter key: CAMPAIGN_INTERACTION_EVENT_TYPE

    Valid groupings and filters: Campaign

    UI name: Campaign interactions

    CAMPAIGN_SEND_ATTEMPTS

    This metric is available only for outbound campaigns.

    Unit: Count

    Valid groupings and filters: Campaign, Channel, contact/segmentAttributes/connect:Subtype

    UI name: Campaign send attempts

    CASES_CREATED

    Unit: Count

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Cases created

    CONTACTS_CREATED

    Unit: Count

    Valid metric filter key: INITIATION_METHOD

    Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts created

    Feature is a valid filter but not a valid grouping.

    CONTACTS_HANDLED

    Unit: Count

    Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

    UI name: API contacts handled

    Feature is a valid filter but not a valid grouping.

    CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT

    Unit: Count

    Valid metric filter key: INITIATION_METHOD

    Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts handled (connected to agent timestamp)

    CONTACTS_HOLD_ABANDONS

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts hold disconnect

    CONTACTS_ON_HOLD_AGENT_DISCONNECT

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contacts hold agent disconnect

    CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contacts hold customer disconnect

    CONTACTS_PUT_ON_HOLD

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contacts put on hold

    CONTACTS_TRANSFERRED_OUT_EXTERNAL

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contacts transferred out external

    CONTACTS_TRANSFERRED_OUT_INTERNAL

    Unit: Percent

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contacts transferred out internal

    CONTACTS_QUEUED

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts queued

    CONTACTS_QUEUED_BY_ENQUEUE

    Unit: Count

    Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

    UI name: Contacts queued (enqueue timestamp)

    CONTACTS_REMOVED_FROM_QUEUE_IN_X

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for \"Less than\") or LTE (for \"Less than equal\").

    UI name: Contacts removed from queue in X seconds

    CONTACTS_RESOLVED_IN_X

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for \"Less than\") or LTE (for \"Less than equal\").

    UI name: Contacts resolved in X

    CONTACTS_TRANSFERRED_OUT

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts transferred out

    Feature is a valid filter but not a valid grouping.

    CONTACTS_TRANSFERRED_OUT_BY_AGENT

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts transferred out by agent

    CONTACTS_TRANSFERRED_OUT_FROM_QUEUE

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts transferred out queue

    CURRENT_CASES

    Unit: Count

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Current cases

    DELIVERY_ATTEMPTS

    This metric is available only for outbound campaigns.

    Unit: Count

    Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, CAMPAIGN_DELIVERY_EVENT_TYPE, DISCONNECT_REASON

    Valid groupings and filters: Agent, Answering Machine Detection Status, Campaign, Campaign Delivery EventType, Channel, contact/segmentAttributes/connect:Subtype, Disconnect Reason, Queue, Routing Profile

    UI name: Delivery attempts

    Campaign Delivery EventType filter and grouping are only available for SMS and Email campaign delivery modes. Agent, Queue, Routing Profile, Answering Machine Detection Status and Disconnect Reason are only available for agent assisted voice and automated voice delivery modes.

    DELIVERY_ATTEMPT_DISPOSITION_RATE

    This metric is available only for outbound campaigns. Dispositions for the agent assisted voice and automated voice delivery modes are only available with answering machine detection enabled.

    Unit: Percent

    Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, CAMPAIGN_DELIVERY_EVENT_TYPE, DISCONNECT_REASON

    Valid groupings and filters: Agent, Answering Machine Detection Status, Campaign, Channel, contact/segmentAttributes/connect:Subtype, Disconnect Reason, Queue, Routing Profile

    UI name: Delivery attempt disposition rate

    Campaign Delivery Event Type filter and grouping are only available for SMS and Email campaign delivery modes. Agent, Queue, Routing Profile, Answering Machine Detection Status and Disconnect Reason are only available for agent assisted voice and automated voice delivery modes.

    EVALUATIONS_PERFORMED

    Unit: Count

    Valid groupings and filters: Agent, Agent Hierarchy, Channel, Evaluation Form ID, Evaluation Source, Form Version, Queue, Routing Profile

    UI name: Evaluations performed

    FLOWS_OUTCOME

    Unit: Count

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

    UI name: Flows outcome

    FLOWS_STARTED

    Unit: Count

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp

    UI name: Flows started

    HUMAN_ANSWERED_CALLS

    This metric is available only for outbound campaigns. Dispositions for the agent assisted voice and automated voice delivery modes are only available with answering machine detection enabled.

    Unit: Count

    Valid groupings and filters: Agent, Campaign

    UI name: Human answered

    MAX_FLOW_TIME

    Unit: Seconds

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

    UI name: Maximum flow time

    MAX_QUEUED_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Maximum queued time

    MIN_FLOW_TIME

    Unit: Seconds

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

    UI name: Minimum flow time

    PERCENT_AUTOMATIC_FAILS

    Unit: Percent

    Valid groupings and filters: Agent, Agent Hierarchy, Channel, Evaluation Form ID, Evaluation Source, Form Version, Queue, Routing Profile

    UI name: Automatic fails percent

    PERCENT_BOT_CONVERSATIONS_OUTCOME

    Unit: Percent

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID

    UI name: Percent bot conversations outcome

    PERCENT_BOT_INTENTS_OUTCOME

    Unit: Percent

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Bot intent name, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID

    UI name: Percent bot intents outcome

    PERCENT_CASES_FIRST_CONTACT_RESOLVED

    Unit: Percent

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Cases resolved on first contact

    PERCENT_CONTACTS_STEP_EXPIRED

    Unit: Percent

    Valid groupings and filters: Queue, RoutingStepExpression

    UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.

    PERCENT_CONTACTS_STEP_JOINED

    Unit: Percent

    Valid groupings and filters: Queue, RoutingStepExpression

    UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.

    PERCENT_FLOWS_OUTCOME

    Unit: Percent

    Valid metric filter key: FLOWS_OUTCOME_TYPE

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

    UI name: Flows outcome percentage.

    The FLOWS_OUTCOME_TYPE is not a valid grouping.

    PERCENT_NON_TALK_TIME

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Percentage

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Non-talk time percent

    PERCENT_TALK_TIME

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Percentage

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Talk time percent

    PERCENT_TALK_TIME_AGENT

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Percentage

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Agent talk time percent

    PERCENT_TALK_TIME_CUSTOMER

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Percentage

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Customer talk time percent

    REOPENED_CASE_ACTIONS

    Unit: Count

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Cases reopened

    RESOLVED_CASE_ACTIONS

    Unit: Count

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Cases resolved

    SERVICE_LEVEL

    You can include up to 20 SERVICE_LEVEL metrics in a request.

    Unit: Percent

    Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for \"Less than\") or LTE (for \"Less than equal\").

    UI name: Service level X

    STEP_CONTACTS_QUEUED

    Unit: Count

    Valid groupings and filters: Queue, RoutingStepExpression

    UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.

    SUM_AFTER_CONTACT_WORK_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: After contact work time

    SUM_CONNECTING_TIME_AGENT

    Unit: Seconds

    Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Agent API connecting time

    The Negate key in metric-level filters is not applicable for this metric.

    CONTACTS_ABANDONED

    Unit: Count

    Metric filter:

    • Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

    UI name: Contact abandoned

    SUM_CONTACTS_ABANDONED_IN_X

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for \"Less than\") or LTE (for \"Less than equal\").

    UI name: Contacts abandoned in X seconds

    SUM_CONTACTS_ANSWERED_IN_X

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for \"Less than\") or LTE (for \"Less than equal\").

    UI name: Contacts answered in X seconds

    SUM_CONTACT_FLOW_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contact flow time

    SUM_CONTACT_TIME_AGENT

    Unit: Seconds

    Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

    UI name: Agent on contact time

    SUM_CONTACTS_DISCONNECTED

    Valid metric filter key: DISCONNECT_REASON

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contact disconnected

    SUM_ERROR_STATUS_TIME_AGENT

    Unit: Seconds

    Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

    UI name: Error status time

    SUM_HANDLE_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contact handle time

    SUM_HOLD_TIME

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Customer hold time

    SUM_IDLE_TIME_AGENT

    Unit: Seconds

    Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

    UI name: Agent idle time

    SUM_INTERACTION_AND_HOLD_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Agent interaction and hold time

    SUM_INTERACTION_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Agent interaction time

    SUM_NON_PRODUCTIVE_TIME_AGENT

    Unit: Seconds

    Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

    UI name: Agent non-productive time

    SUM_ONLINE_TIME_AGENT

    Unit: Seconds

    Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

    UI name: Online time

    SUM_RETRY_CALLBACK_ATTEMPTS

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Callback attempts

    " + "documentation":"

    The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Metrics definition in the Amazon Connect Administrator Guide.

    ABANDONMENT_RATE

    Unit: Percent

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Abandonment rate

    AGENT_ADHERENT_TIME

    This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Adherent time

    AGENT_ANSWER_RATE

    Unit: Percent

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Agent answer rate

    AGENT_NON_ADHERENT_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Non-adherent time

    AGENT_NON_RESPONSE

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Agent non-response

    AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

    UI name: Agent non-response without customer abandons

    AGENT_OCCUPANCY

    Unit: Percentage

    Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

    UI name: Occupancy

    AGENT_SCHEDULE_ADHERENCE

    This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

    Unit: Percent

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Adherence

    AGENT_SCHEDULED_TIME

    This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Scheduled time

    AVG_ABANDON_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average queue abandon time

    AVG_ACTIVE_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Average active time

    AVG_AFTER_CONTACT_WORK_TIME

    Unit: Seconds

    Valid metric filter key: INITIATION_METHOD

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average after contact work time

    Feature is a valid filter but not a valid grouping.

    AVG_AGENT_CONNECTING_TIME

    Unit: Seconds

    Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Average agent API connecting time

    The Negate key in metric-level filters is not applicable for this metric.

    AVG_AGENT_PAUSE_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Average agent pause time

    AVG_BOT_CONVERSATION_TIME

    Unit: Seconds

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID

    UI name: Average bot conversation time

    AVG_BOT_CONVERSATION_TURNS

    Unit: Count

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID

    UI name: Average bot conversation turns

    AVG_CASE_RELATED_CONTACTS

    Unit: Count

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Average contacts per case

    AVG_CASE_RESOLUTION_TIME

    Unit: Seconds

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Average case resolution time

    AVG_CONTACT_DURATION

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average contact duration

    Feature is a valid filter but not a valid grouping.

    AVG_CONVERSATION_DURATION

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average conversation duration

    AVG_DIALS_PER_MINUTE

    This metric is available only for outbound campaigns that use the agent assisted voice and automated voice delivery modes.

    Unit: Count

    Valid groupings and filters: Agent, Campaign, Queue, Routing Profile

    UI name: Average dials per minute

    AVG_EVALUATION_SCORE

    Unit: Percent

    Valid groupings and filters: Agent, Agent Hierarchy, Channel, Evaluation Form ID, Evaluation Section ID, Evaluation Question ID, Evaluation Source, Form Version, Queue, Routing Profile

    UI name: Average evaluation score

    AVG_FLOW_TIME

    Unit: Seconds

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

    UI name: Average flow time

    AVG_GREETING_TIME_AGENT

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average agent greeting time

    AVG_HANDLE_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression

    UI name: Average handle time

    Feature is a valid filter but not a valid grouping.

    AVG_HOLD_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average customer hold time

    Feature is a valid filter but not a valid grouping.

    AVG_HOLD_TIME_ALL_CONTACTS

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average customer hold time all contacts

    AVG_HOLDS

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average holds

    Feature is a valid filter but not a valid grouping.

    AVG_INTERACTION_AND_HOLD_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average agent interaction and customer hold time

    AVG_INTERACTION_TIME

    Unit: Seconds

    Valid metric filter key: INITIATION_METHOD

    Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average agent interaction time

    Feature is a valid filter but not a valid grouping.

    AVG_INTERRUPTIONS_AGENT

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average agent interruptions

    AVG_INTERRUPTION_TIME_AGENT

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average agent interruption time

    AVG_NON_TALK_TIME

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average non-talk time

    AVG_QUEUE_ANSWER_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average queue answer time

    Feature is a valid filter but not a valid grouping.

    AVG_RESOLUTION_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average resolution time

    AVG_TALK_TIME

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average talk time

    AVG_TALK_TIME_AGENT

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average agent talk time

    AVG_TALK_TIME_CUSTOMER

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Average customer talk time

    AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION

    This metric is available only for outbound campaigns that use the agent assisted voice and automated voice delivery modes.

    Unit: Seconds

    Valid groupings and filters: Campaign

    UI name: Average wait time after customer connection

    AVG_WEIGHTED_EVALUATION_SCORE

    Unit: Percent

    Valid groupings and filters: Agent, Agent Hierarchy, Channel, Evaluation Form Id, Evaluation Section ID, Evaluation Question ID, Evaluation Source, Form Version, Queue, Routing Profile

    UI name: Average weighted evaluation score

    BOT_CONVERSATIONS_COMPLETED

    Unit: Count

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID

    UI name: Bot conversations completed

    BOT_INTENTS_COMPLETED

    Unit: Count

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Bot intent name, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID

    UI name: Bot intents completed

    CAMPAIGN_CONTACTS_ABANDONED_AFTER_X

    This metric is available only for outbound campaigns using the agent assisted voice and automated voice delivery modes.

    Unit: Count

    Valid groupings and filters: Agent, Campaign

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than).

    UI name: Campaign contacts abandoned after X

    CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE

    This metric is available only for outbound campaigns using the agent assisted voice and automated voice delivery modes.

    Unit: Percent

    Valid groupings and filters: Agent, Campaign

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than).

    UI name: Campaign contacts abandoned after X rate

    CAMPAIGN_INTERACTIONS

    This metric is available only for outbound campaigns using the email delivery mode.

    Unit: Count

    Valid metric filter key: CAMPAIGN_INTERACTION_EVENT_TYPE

    Valid groupings and filters: Campaign

    UI name: Campaign interactions

    CAMPAIGN_PROGRESS_RATE

    This metric is only available for outbound campaigns initiated using a customer segment. It is not available for event triggered campaigns.

    Unit: Percent

    Valid groupings and filters: Campaign, Campaign Execution Timestamp

    UI name: Campaign progress rate

    CAMPAIGN_SEND_ATTEMPTS

    This metric is available only for outbound campaigns.

    Unit: Count

    Valid groupings and filters: Campaign, Channel, contact/segmentAttributes/connect:Subtype

    UI name: Campaign send attempts

    CAMPAIGN_SEND_EXCLUSIONS

    This metric is available only for outbound campaigns.

    Valid metric filter key: CAMPAIGN_EXCLUDED_EVENT_TYPE

    Unit: Count

    Valid groupings and filters: Campaign, Campaign Excluded Event Type, Campaign Execution Timestamp

    UI name: Campaign send exclusions

    CASES_CREATED

    Unit: Count

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Cases created

    CONTACTS_CREATED

    Unit: Count

    Valid metric filter key: INITIATION_METHOD

    Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts created

    Feature is a valid filter but not a valid grouping.

    CONTACTS_HANDLED

    Unit: Count

    Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

    UI name: API contacts handled

    Feature is a valid filter but not a valid grouping.

    CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT

    Unit: Count

    Valid metric filter key: INITIATION_METHOD

    Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts handled (connected to agent timestamp)

    CONTACTS_HOLD_ABANDONS

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts hold disconnect

    CONTACTS_ON_HOLD_AGENT_DISCONNECT

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contacts hold agent disconnect

    CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contacts hold customer disconnect

    CONTACTS_PUT_ON_HOLD

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contacts put on hold

    CONTACTS_TRANSFERRED_OUT_EXTERNAL

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contacts transferred out external

    CONTACTS_TRANSFERRED_OUT_INTERNAL

    Unit: Percent

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contacts transferred out internal

    CONTACTS_QUEUED

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts queued

    CONTACTS_QUEUED_BY_ENQUEUE

    Unit: Count

    Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

    UI name: Contacts queued (enqueue timestamp)

    CONTACTS_REMOVED_FROM_QUEUE_IN_X

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for \"Less than\") or LTE (for \"Less than equal\").

    UI name: Contacts removed from queue in X seconds

    CONTACTS_RESOLVED_IN_X

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for \"Less than\") or LTE (for \"Less than equal\").

    UI name: Contacts resolved in X

    CONTACTS_TRANSFERRED_OUT

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts transferred out

    Feature is a valid filter but not a valid grouping.

    CONTACTS_TRANSFERRED_OUT_BY_AGENT

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts transferred out by agent

    CONTACTS_TRANSFERRED_OUT_FROM_QUEUE

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contacts transferred out queue

    CURRENT_CASES

    Unit: Count

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Current cases

    DELIVERY_ATTEMPTS

    This metric is available only for outbound campaigns.

    Unit: Count

    Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, CAMPAIGN_DELIVERY_EVENT_TYPE, DISCONNECT_REASON

    Valid groupings and filters: Agent, Answering Machine Detection Status, Campaign, Campaign Delivery EventType, Channel, contact/segmentAttributes/connect:Subtype, Disconnect Reason, Queue, Routing Profile

    UI name: Delivery attempts

    Campaign Delivery EventType filter and grouping are only available for SMS and Email campaign delivery modes. Agent, Queue, Routing Profile, Answering Machine Detection Status and Disconnect Reason are only available for agent assisted voice and automated voice delivery modes.

    DELIVERY_ATTEMPT_DISPOSITION_RATE

    This metric is available only for outbound campaigns. Dispositions for the agent assisted voice and automated voice delivery modes are only available with answering machine detection enabled.

    Unit: Percent

    Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, CAMPAIGN_DELIVERY_EVENT_TYPE, DISCONNECT_REASON

    Valid groupings and filters: Agent, Answering Machine Detection Status, Campaign, Channel, contact/segmentAttributes/connect:Subtype, Disconnect Reason, Queue, Routing Profile

    UI name: Delivery attempt disposition rate

    Campaign Delivery Event Type filter and grouping are only available for SMS and Email campaign delivery modes. Agent, Queue, Routing Profile, Answering Machine Detection Status and Disconnect Reason are only available for agent assisted voice and automated voice delivery modes.

    EVALUATIONS_PERFORMED

    Unit: Count

    Valid groupings and filters: Agent, Agent Hierarchy, Channel, Evaluation Form ID, Evaluation Source, Form Version, Queue, Routing Profile

    UI name: Evaluations performed

    FLOWS_OUTCOME

    Unit: Count

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

    UI name: Flows outcome

    FLOWS_STARTED

    Unit: Count

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp

    UI name: Flows started

    HUMAN_ANSWERED_CALLS

    This metric is available only for outbound campaigns. Dispositions for the agent assisted voice and automated voice delivery modes are only available with answering machine detection enabled.

    Unit: Count

    Valid groupings and filters: Agent, Campaign

    UI name: Human answered

    MAX_FLOW_TIME

    Unit: Seconds

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

    UI name: Maximum flow time

    MAX_QUEUED_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Maximum queued time

    MIN_FLOW_TIME

    Unit: Seconds

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

    UI name: Minimum flow time

    PERCENT_AUTOMATIC_FAILS

    Unit: Percent

    Valid groupings and filters: Agent, Agent Hierarchy, Channel, Evaluation Form ID, Evaluation Source, Form Version, Queue, Routing Profile

    UI name: Automatic fails percent

    PERCENT_BOT_CONVERSATIONS_OUTCOME

    Unit: Percent

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID

    UI name: Percent bot conversations outcome

    PERCENT_BOT_INTENTS_OUTCOME

    Unit: Percent

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Bot ID, Bot alias, Bot version, Bot locale, Bot intent name, Flows resource ID, Flows module resource ID, Flow type, Flow action ID, Invoking resource published timestamp, Initiation method, Invoking resource type, Parent flows resource ID

    UI name: Percent bot intents outcome

    PERCENT_CASES_FIRST_CONTACT_RESOLVED

    Unit: Percent

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Cases resolved on first contact

    PERCENT_CONTACTS_STEP_EXPIRED

    Unit: Percent

    Valid groupings and filters: Queue, RoutingStepExpression

    UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.

    PERCENT_CONTACTS_STEP_JOINED

    Unit: Percent

    Valid groupings and filters: Queue, RoutingStepExpression

    UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.

    PERCENT_FLOWS_OUTCOME

    Unit: Percent

    Valid metric filter key: FLOWS_OUTCOME_TYPE

    Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp

    UI name: Flows outcome percentage.

    The FLOWS_OUTCOME_TYPE is not a valid grouping.

    PERCENT_NON_TALK_TIME

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Percentage

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Non-talk time percent

    PERCENT_TALK_TIME

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Percentage

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Talk time percent

    PERCENT_TALK_TIME_AGENT

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Percentage

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Agent talk time percent

    PERCENT_TALK_TIME_CUSTOMER

    This metric is available only for contacts analyzed by Contact Lens conversational analytics.

    Unit: Percentage

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Customer talk time percent

    RECIPIENTS_ATTEMPTED

    This metric is only available for outbound campaigns initiated using a customer segment. It is not available for event triggered campaigns.

    Unit: Count

    Valid groupings and filters: Campaign, Campaign Execution Timestamp

    UI name: Recipients attempted

    RECIPIENTS_INTERACTED

    This metric is only available for outbound campaigns initiated using a customer segment. It is not available for event triggered campaigns.

    Valid metric filter key: CAMPAIGN_INTERACTION_EVENT_TYPE

    Unit: Count

    Valid groupings and filters: Campaign, Channel, contact/segmentAttributes/connect:Subtype, Campaign Execution Timestamp

    UI name: Recipients interacted

    RECIPIENTS_TARGETED

    This metric is only available for outbound campaigns initiated using a customer segment. It is not available for event triggered campaigns.

    Unit: Count

    Valid groupings and filters: Campaign, Campaign Execution Timestamp

    UI name: Recipients targeted

    REOPENED_CASE_ACTIONS

    Unit: Count

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Cases reopened

    RESOLVED_CASE_ACTIONS

    Unit: Count

    Required filter key: CASE_TEMPLATE_ARN

    Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

    UI name: Cases resolved

    SERVICE_LEVEL

    You can include up to 20 SERVICE_LEVEL metrics in a request.

    Unit: Percent

    Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for \"Less than\") or LTE (for \"Less than equal\").

    UI name: Service level X

    STEP_CONTACTS_QUEUED

    Unit: Count

    Valid groupings and filters: Queue, RoutingStepExpression

    UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI.

    SUM_AFTER_CONTACT_WORK_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: After contact work time

    SUM_CONNECTING_TIME_AGENT

    Unit: Seconds

    Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Agent API connecting time

    The Negate key in metric-level filters is not applicable for this metric.

    CONTACTS_ABANDONED

    Unit: Count

    Metric filter:

    • Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

    UI name: Contact abandoned

    SUM_CONTACTS_ABANDONED_IN_X

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for \"Less than\") or LTE (for \"Less than equal\").

    UI name: Contacts abandoned in X seconds

    SUM_CONTACTS_ANSWERED_IN_X

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

    Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for \"Less than\") or LTE (for \"Less than equal\").

    UI name: Contacts answered in X seconds

    SUM_CONTACT_FLOW_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contact flow time

    SUM_CONTACT_TIME_AGENT

    Unit: Seconds

    Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

    UI name: Agent on contact time

    SUM_CONTACTS_DISCONNECTED

    Valid metric filter key: DISCONNECT_REASON

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Contact disconnected

    SUM_ERROR_STATUS_TIME_AGENT

    Unit: Seconds

    Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

    UI name: Error status time

    SUM_HANDLE_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Contact handle time

    SUM_HOLD_TIME

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Customer hold time

    SUM_IDLE_TIME_AGENT

    Unit: Seconds

    Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

    UI name: Agent idle time

    SUM_INTERACTION_AND_HOLD_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

    UI name: Agent interaction and hold time

    SUM_INTERACTION_TIME

    Unit: Seconds

    Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

    UI name: Agent interaction time

    SUM_NON_PRODUCTIVE_TIME_AGENT

    Unit: Seconds

    Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

    UI name: Agent non-productive time

    SUM_ONLINE_TIME_AGENT

    Unit: Seconds

    Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

    UI name: Online time

    SUM_RETRY_CALLBACK_ATTEMPTS

    Unit: Count

    Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect

    UI name: Callback attempts

    " }, "NextToken":{ "shape":"NextToken2500", @@ -14701,14 +14766,14 @@ "members":{ "ToAddresses":{ "shape":"EmailAddressRecipientList", - "documentation":"

    The additional recipients information present in to list.

    " + "documentation":"

    The additional recipients information present in to list. You must have 1 required recipient (DestinationEmailAddress). You can then specify up to 49 additional recipients (across ToAddresses and CcAddresses), for a total of 50 recipients.

    " }, "CcAddresses":{ "shape":"EmailAddressRecipientList", - "documentation":"

    The additional recipients information present in cc list.

    " + "documentation":"

    The additional recipients information present in cc list. You must have 1 required recipient (DestinationEmailAddress). You can then specify up to 49 additional recipients (across ToAddresses and CcAddresses), for a total of 50 recipients.

    " } }, - "documentation":"

    The additional TO CC recipients information of inbound email.

    " + "documentation":"

    Information about the additional TO and CC recipients of an inbound email contact.

    You can include up to 50 email addresses in total, distributed across DestinationEmailAddress, ToAddresses, and CcAddresses. This total must include one required DestinationEmailAddress. You can then specify up to 49 addresses allocated across ToAddresses and CcAddresses as needed.

    " }, "InboundCallsEnabled":{"type":"boolean"}, "InboundEmailContent":{ @@ -14767,7 +14832,10 @@ "Index":{"type":"integer"}, "InitiateAs":{ "type":"string", - "enum":["CONNECTED_TO_USER"] + "enum":[ + "CONNECTED_TO_USER", + "COMPLETED" + ] }, "InitiationMethodList":{ "type":"list", @@ -17945,6 +18013,7 @@ }, "documentation":"

    The type of notification recipient.

    " }, + "NullableBoolean":{"type":"boolean"}, "NullableProficiencyLevel":{ "type":"float", "max":5.0, @@ -18059,10 +18128,10 @@ "members":{ "CcEmailAddresses":{ "shape":"EmailAddressRecipientList", - "documentation":"

    The additional CC email address recipients information.

    " + "documentation":"

    Information about the additional CC email address recipients. Email recipients are limited to 50 total addresses: 1 required recipient in the DestinationEmailAddress field and up to 49 recipients in the 'CcEmailAddresses' field.

    " } }, - "documentation":"

    The additional recipients information of outbound email.

    " + "documentation":"

    Information about the additional recipients of outbound email.

    " }, "OutboundCallerConfig":{ "type":"structure", @@ -18108,7 +18177,7 @@ "documentation":"

    The identifier of the email address.

    " } }, - "documentation":"

    The outbound email address Id.

    " + "documentation":"

    The outbound email address ID.

    " }, "OutboundEmailContent":{ "type":"structure", @@ -18260,6 +18329,48 @@ "max":256, "min":1 }, + "ParticipantMetrics":{ + "type":"structure", + "members":{ + "ParticipantId":{ + "shape":"ParticipantId", + "documentation":"

    The Participant's ID.

    " + }, + "ParticipantType":{ + "shape":"ParticipantType", + "documentation":"

    Information about the conversation participant. Following are the participant types: [Agent, Customer, Supervisor].

    " + }, + "ConversationAbandon":{ + "shape":"NullableBoolean", + "documentation":"

    A boolean flag indicating whether the chat conversation was abandoned by a Participant.

    " + }, + "MessagesSent":{ + "shape":"Count", + "documentation":"

    Number of chat messages sent by Participant.

    " + }, + "NumResponses":{ + "shape":"Count", + "documentation":"

    Number of chat messages sent by Participant.

    " + }, + "MessageLengthInChars":{ + "shape":"Count", + "documentation":"

    Number of chat characters sent by Participant.

    " + }, + "TotalResponseTimeInMillis":{ + "shape":"DurationMillis", + "documentation":"

    Total chat response time by Participant.

    " + }, + "MaxResponseTimeInMillis":{ + "shape":"DurationMillis", + "documentation":"

    Maximum chat response time by Participant.

    " + }, + "LastMessageTimestamp":{ + "shape":"timestamp", + "documentation":"

    Timestamp of last chat message by Participant.

    " + } + }, + "documentation":"

    Information about a participant's interactions in a contact.

    " + }, "ParticipantRole":{ "type":"string", "enum":[ @@ -22718,7 +22829,7 @@ }, "InitialMessage":{ "shape":"ChatMessage", - "documentation":"

    The initial message to be sent to the newly created chat. If you have a Lex bot in your flow, the initial message is not delivered to the Lex bot.

    " + "documentation":"

    The initial message to be sent to the newly created chat.

    " }, "ClientToken":{ "shape":"ClientToken", @@ -22906,7 +23017,7 @@ }, "DestinationEmailAddress":{ "shape":"EmailAddress", - "documentation":"

    The email address associated with the instance.

    " + "documentation":"

    The email address associated with the Amazon Connect instance.

    " }, "Description":{ "shape":"Description", @@ -23041,7 +23152,7 @@ }, "FromEmailAddress":{ "shape":"EmailAddressInfo", - "documentation":"

    The email address associated with the instance.

    " + "documentation":"

    The email address associated with the Amazon Connect instance.

    " }, "DestinationEmailAddress":{ "shape":"EmailAddressInfo", @@ -23049,7 +23160,7 @@ }, "AdditionalRecipients":{ "shape":"OutboundAdditionalRecipients", - "documentation":"

    The addtional recipients address of email in CC.

    " + "documentation":"

    The additional recipients address of email in CC.

    " }, "EmailMessage":{ "shape":"OutboundEmailContent", diff --git a/services/connectcampaigns/pom.xml b/services/connectcampaigns/pom.xml index ee58d3fc40bc..72b4c27fcf53 100644 --- a/services/connectcampaigns/pom.xml +++ b/services/connectcampaigns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT connectcampaigns AWS Java SDK :: Services :: Connect Campaigns diff --git a/services/connectcampaigns/src/main/resources/codegen-resources/customization.config b/services/connectcampaigns/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/connectcampaigns/src/main/resources/codegen-resources/customization.config +++ b/services/connectcampaigns/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/connectcampaignsv2/pom.xml b/services/connectcampaignsv2/pom.xml index 9b4077c064b2..c30a2822734f 100644 --- a/services/connectcampaignsv2/pom.xml +++ b/services/connectcampaignsv2/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT connectcampaignsv2 AWS Java SDK :: Services :: Connect Campaigns V2 diff --git a/services/connectcampaignsv2/src/main/resources/codegen-resources/customization.config b/services/connectcampaignsv2/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/connectcampaignsv2/src/main/resources/codegen-resources/customization.config +++ b/services/connectcampaignsv2/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/connectcampaignsv2/src/main/resources/codegen-resources/service-2.json b/services/connectcampaignsv2/src/main/resources/codegen-resources/service-2.json index d1a02e3c219a..4ef9cd3f1bb8 100644 --- a/services/connectcampaignsv2/src/main/resources/codegen-resources/service-2.json +++ b/services/connectcampaignsv2/src/main/resources/codegen-resources/service-2.json @@ -231,6 +231,23 @@ ], "documentation":"

    Get the specific Connect instance config.

    " }, + "GetInstanceCommunicationLimits":{ + "name":"GetInstanceCommunicationLimits", + "http":{ + "method":"GET", + "requestUri":"/v2/connect-instance/{connectInstanceId}/communication-limits", + "responseCode":200 + }, + "input":{"shape":"GetInstanceCommunicationLimitsRequest"}, + "output":{"shape":"GetInstanceCommunicationLimitsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Get the instance communication limits.

    " + }, "GetInstanceOnboardingJobStatus":{ "name":"GetInstanceOnboardingJobStatus", "http":{ @@ -338,6 +355,24 @@ "documentation":"

    Put or update the integration for the specified Amazon Connect instance.

    ", "idempotent":true }, + "PutInstanceCommunicationLimits":{ + "name":"PutInstanceCommunicationLimits", + "http":{ + "method":"PUT", + "requestUri":"/v2/connect-instance/{connectInstanceId}/communication-limits", + "responseCode":200 + }, + "input":{"shape":"PutInstanceCommunicationLimitsRequest"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

    Put the instance communication limits. This API is idempotent.

    ", + "idempotent":true + }, "PutOutboundRequestBatch":{ "name":"PutOutboundRequestBatch", "http":{ @@ -900,7 +935,8 @@ "CommunicationLimitsConfig":{ "type":"structure", "members":{ - "allChannelSubtypes":{"shape":"CommunicationLimits"} + "allChannelSubtypes":{"shape":"CommunicationLimits"}, + "instanceLimitsHandling":{"shape":"InstanceLimitsHandling"} }, "documentation":"

    Communication limits config

    " }, @@ -1415,6 +1451,25 @@ }, "documentation":"

    The response for GetConnectInstanceConfig API.

    " }, + "GetInstanceCommunicationLimitsRequest":{ + "type":"structure", + "required":["connectInstanceId"], + "members":{ + "connectInstanceId":{ + "shape":"InstanceId", + "location":"uri", + "locationName":"connectInstanceId" + } + }, + "documentation":"

    The request for GetInstanceCommunicationLimits API.

    " + }, + "GetInstanceCommunicationLimitsResponse":{ + "type":"structure", + "members":{ + "communicationLimitsConfig":{"shape":"InstanceCommunicationLimitsConfig"} + }, + "documentation":"

    The response for GetInstanceCommunicationLimits API.

    " + }, "GetInstanceOnboardingJobStatusRequest":{ "type":"structure", "required":["connectInstanceId"], @@ -1434,6 +1489,13 @@ }, "documentation":"

    The response for GetInstanceOnboardingJobStatus API.

    " }, + "InstanceCommunicationLimitsConfig":{ + "type":"structure", + "members":{ + "allChannelSubtypes":{"shape":"CommunicationLimits"} + }, + "documentation":"

    Instance Communication limits config

    " + }, "InstanceConfig":{ "type":"structure", "required":[ @@ -1472,6 +1534,14 @@ "documentation":"

    Operators for Connect instance identifier filter

    ", "enum":["Eq"] }, + "InstanceLimitsHandling":{ + "type":"string", + "documentation":"

    Instance limits handling

    ", + "enum":[ + "OPT_IN", + "OPT_OUT" + ] + }, "InstanceOnboardingJobFailureCode":{ "type":"string", "documentation":"

    Enumeration of the possible failure codes for instance onboarding job

    ", @@ -1840,6 +1910,22 @@ }, "documentation":"

    The request for PutConnectInstanceIntegration API.

    " }, + "PutInstanceCommunicationLimitsRequest":{ + "type":"structure", + "required":[ + "connectInstanceId", + "communicationLimitsConfig" + ], + "members":{ + "connectInstanceId":{ + "shape":"InstanceId", + "location":"uri", + "locationName":"connectInstanceId" + }, + "communicationLimitsConfig":{"shape":"InstanceCommunicationLimitsConfig"} + }, + "documentation":"

    The request for PutInstanceCommunicationLimits API.

    " + }, "PutOutboundRequestBatchRequest":{ "type":"structure", "required":[ diff --git a/services/connectcases/pom.xml b/services/connectcases/pom.xml index f702d888a7d5..3b9a55170f9c 100644 --- a/services/connectcases/pom.xml +++ b/services/connectcases/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT connectcases AWS Java SDK :: Services :: Connect Cases diff --git a/services/connectcases/src/main/resources/codegen-resources/customization.config b/services/connectcases/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/connectcases/src/main/resources/codegen-resources/customization.config +++ b/services/connectcases/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/connectcontactlens/pom.xml b/services/connectcontactlens/pom.xml index 31d0f9474d53..b8fc04d596bb 100644 --- a/services/connectcontactlens/pom.xml +++ b/services/connectcontactlens/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT connectcontactlens AWS Java SDK :: Services :: Connect Contact Lens diff --git a/services/connectcontactlens/src/main/resources/codegen-resources/customization.config b/services/connectcontactlens/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/connectcontactlens/src/main/resources/codegen-resources/customization.config +++ b/services/connectcontactlens/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/connectparticipant/pom.xml b/services/connectparticipant/pom.xml index 483eb88c9060..6e01cbea3144 100644 --- a/services/connectparticipant/pom.xml +++ b/services/connectparticipant/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT connectparticipant AWS Java SDK :: Services :: ConnectParticipant diff --git a/services/connectparticipant/src/main/resources/codegen-resources/customization.config b/services/connectparticipant/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/connectparticipant/src/main/resources/codegen-resources/customization.config +++ b/services/connectparticipant/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/controlcatalog/pom.xml b/services/controlcatalog/pom.xml index 63cbce681410..38a8a4eef83c 100644 --- a/services/controlcatalog/pom.xml +++ b/services/controlcatalog/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT controlcatalog AWS Java SDK :: Services :: Control Catalog diff --git a/services/controlcatalog/src/main/resources/codegen-resources/customization.config b/services/controlcatalog/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/controlcatalog/src/main/resources/codegen-resources/customization.config +++ b/services/controlcatalog/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/controlcatalog/src/main/resources/codegen-resources/paginators-1.json b/services/controlcatalog/src/main/resources/codegen-resources/paginators-1.json index 2ff838410cfc..16cfd2c30469 100644 --- a/services/controlcatalog/src/main/resources/codegen-resources/paginators-1.json +++ b/services/controlcatalog/src/main/resources/codegen-resources/paginators-1.json @@ -6,6 +6,12 @@ "limit_key": "MaxResults", "result_key": "CommonControls" }, + "ListControlMappings": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ControlMappings" + }, "ListControls": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/controlcatalog/src/main/resources/codegen-resources/service-2.json b/services/controlcatalog/src/main/resources/codegen-resources/service-2.json index 7a2f6a6de542..61f793fe6c99 100644 --- a/services/controlcatalog/src/main/resources/codegen-resources/service-2.json +++ b/services/controlcatalog/src/main/resources/codegen-resources/service-2.json @@ -48,6 +48,23 @@ ], "documentation":"

    Returns a paginated list of common controls from the Amazon Web Services Control Catalog.

    You can apply an optional filter to see common controls that have a specific objective. If you don’t provide a filter, the operation returns all common controls.

    " }, + "ListControlMappings":{ + "name":"ListControlMappings", + "http":{ + "method":"POST", + "requestUri":"/list-control-mappings", + "responseCode":200 + }, + "input":{"shape":"ListControlMappingsRequest"}, + "output":{"shape":"ListControlMappingsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

    Returns a paginated list of control mappings from the Control Catalog. Control mappings show relationships between controls and other entities, such as common controls or compliance frameworks.

    " + }, "ListControls":{ "name":"ListControls", "http":{ @@ -63,7 +80,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns a paginated list of all available controls in the Amazon Web Services Control Catalog library. Allows you to discover available controls. The list of controls is given as structures of type controlSummary. The ARN is returned in the global controlcatalog format, as shown in the examples.

    " + "documentation":"

    Returns a paginated list of all available controls in the Control Catalog library. Allows you to discover available controls. The list of controls is given as structures of type controlSummary. The ARN is returned in the global controlcatalog format, as shown in the examples.

    " }, "ListDomains":{ "name":"ListDomains", @@ -80,7 +97,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns a paginated list of domains from the Amazon Web Services Control Catalog.

    " + "documentation":"

    Returns a paginated list of domains from the Control Catalog.

    " }, "ListObjectives":{ "name":"ListObjectives", @@ -97,7 +114,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Returns a paginated list of objectives from the Amazon Web Services Control Catalog.

    You can apply an optional filter to see the objectives that belong to a specific domain. If you don’t provide a filter, the operation returns all objectives.

    " + "documentation":"

    Returns a paginated list of objectives from the Control Catalog.

    You can apply an optional filter to see the objectives that belong to a specific domain. If you don’t provide a filter, the operation returns all objectives.

    " } }, "shapes":{ @@ -147,16 +164,33 @@ "min":41, "pattern":"arn:(aws(?:[-a-z]*)?):controlcatalog:::common-control/[0-9a-z]+" }, + "CommonControlArnFilterList":{ + "type":"list", + "member":{"shape":"CommonControlArn"}, + "max":1, + "min":1 + }, "CommonControlFilter":{ "type":"structure", "members":{ "Objectives":{ "shape":"ObjectiveResourceFilterList", - "documentation":"

    The objective that's used as filter criteria.

    You can use this parameter to specify one objective ARN at a time. Passing multiple ARNs in the CommonControlFilter isn’t currently supported.

    " + "documentation":"

    The objective that's used as filter criteria.

    You can use this parameter to specify one objective ARN at a time. Passing multiple ARNs in the CommonControlFilter isn’t supported.

    " } }, "documentation":"

    An optional filter that narrows the results to a specific objective.

    " }, + "CommonControlMappingDetails":{ + "type":"structure", + "required":["CommonControlArn"], + "members":{ + "CommonControlArn":{ + "shape":"CommonControlArn", + "documentation":"

    The Amazon Resource Name (ARN) that identifies the common control in the mapping.

    " + } + }, + "documentation":"

    A structure that contains details about a common control mapping. In particular, it returns the Amazon Resource Name (ARN) of the common control.

    " + }, "CommonControlSummary":{ "type":"structure", "required":[ @@ -204,12 +238,26 @@ "type":"list", "member":{"shape":"CommonControlSummary"} }, + "ControlAlias":{ + "type":"string", + "pattern":"[a-zA-Z0-9](?:[a-zA-Z0-9_.-]{0,254}[a-zA-Z0-9])" + }, + "ControlAliases":{ + "type":"list", + "member":{"shape":"ControlAlias"} + }, "ControlArn":{ "type":"string", "max":2048, "min":34, "pattern":"arn:(aws(?:[-a-z]*)?):(controlcatalog|controltower):[a-zA-Z0-9-]*::control/[0-9a-zA-Z_\\-]+" }, + "ControlArnFilterList":{ + "type":"list", + "member":{"shape":"ControlArn"}, + "max":1, + "min":1 + }, "ControlBehavior":{ "type":"string", "enum":[ @@ -218,6 +266,61 @@ "DETECTIVE" ] }, + "ControlFilter":{ + "type":"structure", + "members":{ + "Implementations":{ + "shape":"ImplementationFilter", + "documentation":"

    A filter that narrows the results to controls with specific implementation types or identifiers. This field allows you to find controls that are implemented by specific Amazon Web Services services or with specific service identifiers.

    " + } + }, + "documentation":"

    A structure that defines filtering criteria for the ListControls operation. You can use this filter to narrow down the list of controls based on their implementation details.

    " + }, + "ControlMapping":{ + "type":"structure", + "required":[ + "ControlArn", + "MappingType", + "Mapping" + ], + "members":{ + "ControlArn":{ + "shape":"ControlArn", + "documentation":"

    The Amazon Resource Name (ARN) that identifies the control in the mapping.

    " + }, + "MappingType":{ + "shape":"MappingType", + "documentation":"

    The type of mapping relationship between the control and other entities. Indicates whether the mapping is to a framework or common control.

    " + }, + "Mapping":{ + "shape":"Mapping", + "documentation":"

    The details of the mapping relationship, containing either framework or common control information.

    " + } + }, + "documentation":"

    A structure that contains information about a control mapping, including the control ARN, mapping type, and mapping details.

    " + }, + "ControlMappingFilter":{ + "type":"structure", + "members":{ + "ControlArns":{ + "shape":"ControlArnFilterList", + "documentation":"

    A list of control ARNs to filter the mappings. When specified, only mappings associated with these controls are returned.

    " + }, + "CommonControlArns":{ + "shape":"CommonControlArnFilterList", + "documentation":"

    A list of common control ARNs to filter the mappings. When specified, only mappings associated with these common controls are returned.

    " + }, + "MappingTypes":{ + "shape":"MappingTypeFilterList", + "documentation":"

    A list of mapping types to filter the mappings. When specified, only mappings of these types are returned.

    " + } + }, + "documentation":"

    A structure that defines filtering criteria for the ListControlMappings operation. You can use this filter to narrow down the list of control mappings based on control ARNs, common control ARNs, or mapping types.

    " + }, + "ControlMappings":{ + "type":"list", + "member":{"shape":"ControlMapping"} + }, "ControlParameter":{ "type":"structure", "required":["Name"], @@ -261,6 +364,10 @@ "shape":"ControlArn", "documentation":"

    The Amazon Resource Name (ARN) of the control.

    " }, + "Aliases":{ + "shape":"ControlAliases", + "documentation":"

    A list of alternative identifiers for the control. These are human-readable designators, such as SH.S3.1. Several aliases can refer to the same control across different Amazon Web Services services or compliance frameworks.

    " + }, "Name":{ "shape":"String", "documentation":"

    The display name of the control.

    " @@ -284,6 +391,10 @@ "CreateTime":{ "shape":"Timestamp", "documentation":"

    A timestamp that notes the time when the control was released (start of its life) as a governance capability in Amazon Web Services.

    " + }, + "GovernedResources":{ + "shape":"GovernedResources", + "documentation":"

    A list of Amazon Web Services resource types that are governed by this control. This information helps you understand which controls can govern certain types of resources, and conversely, which resources are affected when the control is implemented. The resources are represented as Amazon Web Services CloudFormation resource types. If GovernedResources cannot be represented by available CloudFormation resource types, it’s returned as an empty list.

    " } }, "documentation":"

    Overview of information about a control.

    " @@ -353,6 +464,34 @@ "type":"list", "member":{"shape":"DomainSummary"} }, + "FrameworkItem":{ + "type":"string", + "max":250, + "min":3 + }, + "FrameworkMappingDetails":{ + "type":"structure", + "required":[ + "Name", + "Item" + ], + "members":{ + "Name":{ + "shape":"FrameworkName", + "documentation":"

    The name of the compliance framework that the control maps to.

    " + }, + "Item":{ + "shape":"FrameworkItem", + "documentation":"

    The specific item or requirement within the framework that the control maps to.

    " + } + }, + "documentation":"

    A structure that contains details about a framework mapping, including the framework name and specific item within the framework that the control maps to.

    " + }, + "FrameworkName":{ + "type":"string", + "max":250, + "min":3 + }, "GetControlRequest":{ "type":"structure", "required":["ControlArn"], @@ -377,6 +516,10 @@ "shape":"ControlArn", "documentation":"

    The Amazon Resource Name (ARN) of the control.

    " }, + "Aliases":{ + "shape":"ControlAliases", + "documentation":"

    A list of alternative identifiers for the control. These are human-readable designators, such as SH.S3.1. Several aliases can refer to the same control across different Amazon Web Services services or compliance frameworks.

    " + }, "Name":{ "shape":"String", "documentation":"

    The display name of the control.

    " @@ -405,9 +548,21 @@ "CreateTime":{ "shape":"Timestamp", "documentation":"

    A timestamp that notes the time when the control was released (start of its life) as a governance capability in Amazon Web Services.

    " + }, + "GovernedResources":{ + "shape":"GovernedResources", + "documentation":"

    A list of Amazon Web Services resource types that are governed by this control. This information helps you understand which controls can govern certain types of resources, and conversely, which resources are affected when the control is implemented. The resources are represented as Amazon Web Services CloudFormation resource types. If GovernedResources cannot be represented by available CloudFormation resource types, it’s returned as an empty list.

    " } } }, + "GovernedResource":{ + "type":"string", + "pattern":"[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}" + }, + "GovernedResources":{ + "type":"list", + "member":{"shape":"GovernedResource"} + }, "ImplementationDetails":{ "type":"structure", "required":["Type"], @@ -423,11 +578,31 @@ }, "documentation":"

    An object that describes the implementation type for a control.

    Our ImplementationDetails Type format has three required segments:

    • SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME

    For example, AWS::Config::ConfigRule or AWS::SecurityHub::SecurityControl resources have the format with three required segments.

    Our ImplementationDetails Type format has an optional fourth segment, which is present for applicable implementation types. The format is as follows:

    • SERVICE-PROVIDER::SERVICE-NAME::RESOURCE-NAME::RESOURCE-TYPE-DESCRIPTION

    For example, AWS::Organizations::Policy::SERVICE_CONTROL_POLICY or AWS::CloudFormation::Type::HOOK have the format with four segments.

    Although the format is similar, the values for the Type field do not match any Amazon Web Services CloudFormation values.

    " }, + "ImplementationFilter":{ + "type":"structure", + "members":{ + "Types":{ + "shape":"ImplementationTypeFilterList", + "documentation":"

    A list of implementation types that can serve as filters. For example, you can filter for controls implemented as Amazon Web Services Config Rules by specifying AWS::Config::ConfigRule as a type.

    " + }, + "Identifiers":{ + "shape":"ImplementationIdentifierFilterList", + "documentation":"

    A list of service-specific identifiers that can serve as filters. For example, you can filter for controls with specific Amazon Web Services Config Rule IDs or Security Hub Control IDs.

    " + } + }, + "documentation":"

    A structure that defines filtering criteria for control implementations. You can use this filter to find controls that are implemented by specific Amazon Web Services services or with specific service identifiers.

    " + }, "ImplementationIdentifier":{ "type":"string", "max":256, "min":1, - "pattern":"[a-z0-9-]+" + "pattern":"[a-zA-Z0-9_\\.-]+" + }, + "ImplementationIdentifierFilterList":{ + "type":"list", + "member":{"shape":"ImplementationIdentifier"}, + "max":1, + "min":1 }, "ImplementationSummary":{ "type":"structure", @@ -450,6 +625,12 @@ "min":7, "pattern":"[A-Za-z0-9]+(::[A-Za-z0-9_]+){2,3}" }, + "ImplementationTypeFilterList":{ + "type":"list", + "member":{"shape":"ImplementationType"}, + "max":1, + "min":1 + }, "InternalServerException":{ "type":"structure", "members":{ @@ -478,7 +659,7 @@ }, "CommonControlFilter":{ "shape":"CommonControlFilter", - "documentation":"

    An optional filter that narrows the results to a specific objective.

    This filter allows you to specify one objective ARN at a time. Passing multiple ARNs in the CommonControlFilter isn’t currently supported.

    " + "documentation":"

    An optional filter that narrows the results to a specific objective.

    This filter allows you to specify one objective ARN at a time. Passing multiple ARNs in the CommonControlFilter isn’t supported.

    " } } }, @@ -496,6 +677,41 @@ } } }, + "ListControlMappingsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    The pagination token that's used to fetch the next set of results.

    ", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxListControlMappingsResults", + "documentation":"

    The maximum number of results on a page or for an API request call.

    ", + "location":"querystring", + "locationName":"maxResults" + }, + "Filter":{ + "shape":"ControlMappingFilter", + "documentation":"

    An optional filter that narrows the results to specific control mappings based on control ARNs, common control ARNs, or mapping types.

    " + } + } + }, + "ListControlMappingsResponse":{ + "type":"structure", + "required":["ControlMappings"], + "members":{ + "ControlMappings":{ + "shape":"ControlMappings", + "documentation":"

    The list of control mappings that the ListControlMappings API returns.

    " + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

    The pagination token that's used to fetch the next set of results.

    " + } + } + }, "ListControlsRequest":{ "type":"structure", "members":{ @@ -510,6 +726,10 @@ "documentation":"

    The maximum number of results on a page or for an API request call.

    ", "location":"querystring", "locationName":"maxResults" + }, + "Filter":{ + "shape":"ControlFilter", + "documentation":"

    An optional filter that narrows the results to controls with specific implementation types or identifiers. If you don't provide a filter, the operation returns all available controls.

    " } } }, @@ -575,7 +795,7 @@ }, "ObjectiveFilter":{ "shape":"ObjectiveFilter", - "documentation":"

    An optional filter that narrows the results to a specific domain.

    This filter allows you to specify one domain ARN at a time. Passing multiple ARNs in the ObjectiveFilter isn’t currently supported.

    " + "documentation":"

    An optional filter that narrows the results to a specific domain.

    This filter allows you to specify one domain ARN at a time. Passing multiple ARNs in the ObjectiveFilter isn’t supported.

    " } } }, @@ -593,12 +813,46 @@ } } }, + "Mapping":{ + "type":"structure", + "members":{ + "Framework":{ + "shape":"FrameworkMappingDetails", + "documentation":"

    The framework mapping details when the mapping type relates to a compliance framework.

    " + }, + "CommonControl":{ + "shape":"CommonControlMappingDetails", + "documentation":"

    The common control mapping details when the mapping type relates to a common control.

    " + } + }, + "documentation":"

    A structure that contains the details of a mapping relationship, which can be either to a framework or to a common control.

    ", + "union":true + }, + "MappingType":{ + "type":"string", + "enum":[ + "FRAMEWORK", + "COMMON_CONTROL" + ] + }, + "MappingTypeFilterList":{ + "type":"list", + "member":{"shape":"MappingType"}, + "max":1, + "min":1 + }, "MaxListCommonControlsResults":{ "type":"integer", "box":true, "max":100, "min":1 }, + "MaxListControlMappingsResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, "MaxListControlsResults":{ "type":"integer", "box":true, @@ -628,7 +882,7 @@ "members":{ "Domains":{ "shape":"DomainResourceFilterList", - "documentation":"

    The domain that's used as filter criteria.

    You can use this parameter to specify one domain ARN at a time. Passing multiple ARNs in the ObjectiveFilter isn’t currently supported.

    " + "documentation":"

    The domain that's used as filter criteria.

    You can use this parameter to specify one domain ARN at a time. Passing multiple ARNs in the ObjectiveFilter isn’t supported.

    " } }, "documentation":"

    An optional filter that narrows the list of objectives to a specific domain.

    " @@ -711,7 +965,7 @@ "documentation":"

    Regions in which the control is available to be deployed.

    " } }, - "documentation":"

    Returns information about the control, including the scope of the control, if enabled, and the Regions in which the control currently is available for deployment. For more information about scope, see Global services.

    If you are applying controls through an Amazon Web Services Control Tower landing zone environment, remember that the values returned in the RegionConfiguration API operation are not related to the governed Regions in your landing zone. For example, if you are governing Regions A,B,and C while the control is available in Regions A, B, C, and D, you'd see a response with DeployableRegions of A, B, C, and D for a control with REGIONAL scope, even though you may not intend to deploy the control in Region D, because you do not govern it through your landing zone.

    " + "documentation":"

    Returns information about the control, including the scope of the control, if enabled, and the Regions in which the control is available for deployment. For more information about scope, see Global services.

    If you are applying controls through an Amazon Web Services Control Tower landing zone environment, remember that the values returned in the RegionConfiguration API operation are not related to the governed Regions in your landing zone. For example, if you are governing Regions A,B,and C while the control is available in Regions A, B, C, and D, you'd see a response with DeployableRegions of A, B, C, and D for a control with REGIONAL scope, even though you may not intend to deploy the control in Region D, because you do not govern it through your landing zone.

    " }, "ResourceNotFoundException":{ "type":"structure", @@ -753,5 +1007,5 @@ "exception":true } }, - "documentation":"

    Welcome to the Amazon Web Services Control Catalog API reference. This guide is for developers who need detailed information about how to programmatically identify and filter the common controls and related metadata that are available to Amazon Web Services customers. This API reference provides descriptions, syntax, and usage examples for each of the actions and data types that are supported by Amazon Web Services Control Catalog.

    Use the following links to get started with the Amazon Web Services Control Catalog API:

    • Actions: An alphabetical list of all Control Catalog API operations.

    • Data types: An alphabetical list of all Control Catalog data types.

    • Common parameters: Parameters that all operations can use.

    • Common errors: Client and server errors that all operations can return.

    " + "documentation":"

    Welcome to the Control Catalog API reference. This guide is for developers who need detailed information about how to programmatically identify and filter the common controls and related metadata that are available to Amazon Web Services customers. This API reference provides descriptions, syntax, and usage examples for each of the actions and data types that are supported by Control Catalog.

    Use the following links to get started with the Control Catalog API:

    • Actions: An alphabetical list of all Control Catalog API operations.

    • Data types: An alphabetical list of all Control Catalog data types.

    • Common parameters: Parameters that all operations can use.

    • Common errors: Client and server errors that all operations can return.

    " } diff --git a/services/controltower/pom.xml b/services/controltower/pom.xml index 5d8d5801a75c..d1849c689704 100644 --- a/services/controltower/pom.xml +++ b/services/controltower/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT controltower AWS Java SDK :: Services :: Control Tower diff --git a/services/controltower/src/main/resources/codegen-resources/customization.config b/services/controltower/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/controltower/src/main/resources/codegen-resources/customization.config +++ b/services/controltower/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/controltower/src/main/resources/codegen-resources/service-2.json b/services/controltower/src/main/resources/codegen-resources/service-2.json index e14e24a11360..8f4a69eba540 100644 --- a/services/controltower/src/main/resources/codegen-resources/service-2.json +++ b/services/controltower/src/main/resources/codegen-resources/service-2.json @@ -24,8 +24,8 @@ "input":{"shape":"CreateLandingZoneInput"}, "output":{"shape":"CreateLandingZoneOutput"}, "errors":[ - {"shape":"ValidationException"}, {"shape":"ConflictException"}, + {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"} @@ -42,12 +42,12 @@ "input":{"shape":"DeleteLandingZoneInput"}, "output":{"shape":"DeleteLandingZoneOutput"}, "errors":[ - {"shape":"ValidationException"}, {"shape":"ConflictException"}, + {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Decommissions a landing zone. This API call starts an asynchronous operation that deletes Amazon Web Services Control Tower resources deployed in accounts managed by Amazon Web Services Control Tower.

    ", "idempotent":true @@ -62,13 +62,13 @@ "input":{"shape":"DisableBaselineInput"}, "output":{"shape":"DisableBaselineOutput"}, "errors":[ - {"shape":"ValidationException"}, {"shape":"ConflictException"}, + {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Disable an EnabledBaseline resource on the specified Target. This API starts an asynchronous operation to remove all resources deployed as part of the baseline enablement. The resource will vary depending on the enabled baseline. For usage examples, see the Amazon Web Services Control Tower User Guide .

    ", "idempotent":true @@ -83,13 +83,13 @@ "input":{"shape":"DisableControlInput"}, "output":{"shape":"DisableControlOutput"}, "errors":[ - {"shape":"ValidationException"}, {"shape":"ConflictException"}, + {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    This API call turns off a control. It starts an asynchronous operation that deletes Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. For usage examples, see the Controls Reference Guide .

    " }, @@ -103,13 +103,13 @@ "input":{"shape":"EnableBaselineInput"}, "output":{"shape":"EnableBaselineOutput"}, "errors":[ - {"shape":"ValidationException"}, {"shape":"ConflictException"}, + {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Enable (apply) a Baseline to a Target. This API starts an asynchronous operation to deploy resources specified by the Baseline to the specified Target. For usage examples, see the Amazon Web Services Control Tower User Guide .

    " }, @@ -123,13 +123,13 @@ "input":{"shape":"EnableControlInput"}, "output":{"shape":"EnableControlOutput"}, "errors":[ - {"shape":"ValidationException"}, {"shape":"ConflictException"}, + {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage examples, see the Controls Reference Guide .

    " }, @@ -146,8 +146,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Retrieve details about an existing Baseline resource by specifying its identifier. For usage examples, see the Amazon Web Services Control Tower User Guide .

    " }, @@ -164,8 +164,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns the details of an asynchronous baseline operation, as initiated by any of these APIs: EnableBaseline, DisableBaseline, UpdateEnabledBaseline, ResetEnabledBaseline. A status message is displayed in case of operation failure. For usage examples, see the Amazon Web Services Control Tower User Guide .

    " }, @@ -182,8 +182,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage examples, see the Controls Reference Guide .

    " }, @@ -200,8 +200,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Retrieve details of an EnabledBaseline resource by specifying its identifier.

    " }, @@ -218,8 +218,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Retrieves details about an enabled control. For usage examples, see the Controls Reference Guide .

    " }, @@ -236,8 +236,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns details about the landing zone. Displays a message in case of error.

    " }, @@ -254,8 +254,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Returns the status of the specified landing zone operation. Details for an operation are available for 90 days.

    " }, @@ -323,8 +323,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Lists the controls enabled by Amazon Web Services Control Tower on the specified organizational unit and the accounts it contains. For usage examples, see the Controls Reference Guide .

    " }, @@ -388,13 +388,13 @@ "input":{"shape":"ResetEnabledBaselineInput"}, "output":{"shape":"ResetEnabledBaselineOutput"}, "errors":[ - {"shape":"ValidationException"}, {"shape":"ConflictException"}, + {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Re-enables an EnabledBaseline resource. For example, this API can re-apply the existing Baseline after a new member account is moved to the target OU. For usage examples, see the Amazon Web Services Control Tower User Guide .

    " }, @@ -408,13 +408,13 @@ "input":{"shape":"ResetEnabledControlInput"}, "output":{"shape":"ResetEnabledControlOutput"}, "errors":[ - {"shape":"ValidationException"}, {"shape":"ConflictException"}, + {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Resets an enabled control.

    " }, @@ -428,12 +428,12 @@ "input":{"shape":"ResetLandingZoneInput"}, "output":{"shape":"ResetLandingZoneOutput"}, "errors":[ - {"shape":"ValidationException"}, {"shape":"ConflictException"}, + {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    This API call resets a landing zone. It starts an asynchronous operation that resets the landing zone to the parameters specified in the original configuration, which you specified in the manifest file. Nothing in the manifest file's original landing zone configuration is changed during the reset process, by default. This API is not the same as a rollback of a landing zone version, which is not a supported operation.

    " }, @@ -479,13 +479,13 @@ "input":{"shape":"UpdateEnabledBaselineInput"}, "output":{"shape":"UpdateEnabledBaselineOutput"}, "errors":[ - {"shape":"ValidationException"}, {"shape":"ConflictException"}, + {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Updates an EnabledBaseline resource's applied parameters or version. For usage examples, see the Amazon Web Services Control Tower User Guide .

    " }, @@ -499,13 +499,13 @@ "input":{"shape":"UpdateEnabledControlInput"}, "output":{"shape":"UpdateEnabledControlOutput"}, "errors":[ - {"shape":"ValidationException"}, {"shape":"ConflictException"}, + {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    Updates the configuration of an already enabled control.

    If the enabled control shows an EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request.

    If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services Control Tower updates the control to match any valid parameters that you supply.

    If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling the ResetEnabledControl API. Alternatively, you can call DisableControl and then call EnableControl again. Also, you can run an extending governance operation to repair drift. For usage examples, see the Controls Reference Guide .

    " }, @@ -519,12 +519,12 @@ "input":{"shape":"UpdateLandingZoneInput"}, "output":{"shape":"UpdateLandingZoneOutput"}, "errors":[ - {"shape":"ValidationException"}, {"shape":"ConflictException"}, + {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} ], "documentation":"

    This API call updates the landing zone. It starts an asynchronous operation that updates the landing zone based on the new landing zone version, or on the changed parameters specified in the updated manifest file.

    " } @@ -1053,6 +1053,10 @@ "shape":"String", "documentation":"

    The enabled version of the Baseline.

    " }, + "driftStatusSummary":{ + "shape":"EnabledBaselineDriftStatusSummary", + "documentation":"

    The drift status of the enabled baseline.

    " + }, "parameters":{ "shape":"EnabledBaselineParameterSummaries", "documentation":"

    Shows the parameters that are applied when enabling this Baseline.

    " @@ -1069,6 +1073,45 @@ }, "documentation":"

    Details of the EnabledBaseline resource.

    " }, + "EnabledBaselineDriftStatus":{ + "type":"string", + "enum":[ + "IN_SYNC", + "DRIFTED" + ] + }, + "EnabledBaselineDriftStatusSummary":{ + "type":"structure", + "members":{ + "types":{ + "shape":"EnabledBaselineDriftTypes", + "documentation":"

    The types of drift that can be detected for an enabled baseline. Amazon Web Services Control Tower detects inheritance drift on enabled baselines that apply at the OU level.

    " + } + }, + "documentation":"

    The drift summary of the enabled baseline. Amazon Web Services Control Tower reports inheritance drift when an enabled baseline configuration of a member account is different than the configuration that applies to the OU. Amazon Web Services Control Tower reports this type of drift for a parent or child enabled baseline. One way to repair this drift by resetting the parent enabled baseline, on the OU.

    For example, you may see this type of drift if you move accounts between OUs, but the accounts are not yet (re-)enrolled.

    " + }, + "EnabledBaselineDriftStatuses":{ + "type":"list", + "member":{"shape":"EnabledBaselineDriftStatus"}, + "max":1, + "min":1 + }, + "EnabledBaselineDriftTypes":{ + "type":"structure", + "members":{ + "inheritance":{ + "shape":"EnabledBaselineInheritanceDrift", + "documentation":"

    At least one account within the target OU does not match the baseline configuration defined on that OU. An account is in inheritance drift when it does not match the configuration of a parent OU, possibly a new parent OU, if the account is moved.

    " + } + }, + "documentation":"

    The types of drift that can be detected for an enabled baseline.

    • Amazon Web Services Control Tower detects inheritance drift on the enabled baselines that target OUs: AWSControlTowerBaseline and BackupBaseline.

    • Amazon Web Services Control Tower does not detect drift on the baselines that apply to your landing zone: IdentityCenterBaseline, AuditBaseline, LogArchiveBaseline, BackupCentralVaultBaseline, or BackupAdminBaseline. For more information, see Types of baselines.

    Baselines enabled on an OU are inherited by its member accounts as child EnabledBaseline resources. The baseline on the OU serves as the parent EnabledBaseline, which governs the configuration of each child EnabledBaseline.

    If the baseline configuration of a member account in an OU does not match the configuration of the parent OU, the parent and child baseline is in a state of inheritance drift. This drift could occur in the AWSControlTowerBaseline or the BackupBaseline related to that account.

    " + }, + "EnabledBaselineEnablementStatuses":{ + "type":"list", + "member":{"shape":"EnablementStatus"}, + "max":1, + "min":1 + }, "EnabledBaselineFilter":{ "type":"structure", "members":{ @@ -1076,10 +1119,18 @@ "shape":"EnabledBaselineBaselineIdentifiers", "documentation":"

    Identifiers for the Baseline objects returned as part of the filter operation.

    " }, + "inheritanceDriftStatuses":{ + "shape":"EnabledBaselineDriftStatuses", + "documentation":"

    A list of EnabledBaselineDriftStatus items for enabled baselines.

    " + }, "parentIdentifiers":{ "shape":"EnabledBaselineParentIdentifiers", "documentation":"

    An optional filter that sets up a list of parentIdentifiers to filter the results of the ListEnabledBaseline output.

    " }, + "statuses":{ + "shape":"EnabledBaselineEnablementStatuses", + "documentation":"

    A list of EnablementStatus items.

    " + }, "targetIdentifiers":{ "shape":"EnabledBaselineTargetIdentifiers", "documentation":"

    Identifiers for the targets of the Baseline filter operation.

    " @@ -1087,6 +1138,16 @@ }, "documentation":"

    A filter applied on the ListEnabledBaseline operation. Allowed filters are baselineIdentifiers and targetIdentifiers. The filter can be applied for either, or both.

    " }, + "EnabledBaselineInheritanceDrift":{ + "type":"structure", + "members":{ + "status":{ + "shape":"EnabledBaselineDriftStatus", + "documentation":"

    The inheritance drift status for enabled baselines.

    " + } + }, + "documentation":"

    The inheritance drift summary for the enabled baseline. Inheritance drift occurs when any accounts in the target OU do not match the baseline configuration defined on that OU.

    " + }, "EnabledBaselineParameter":{ "type":"structure", "required":[ @@ -1164,6 +1225,10 @@ "shape":"String", "documentation":"

    The enabled version of the baseline.

    " }, + "driftStatusSummary":{ + "shape":"EnabledBaselineDriftStatusSummary", + "documentation":"

    The drift status of the enabled baseline.

    " + }, "parentIdentifier":{ "shape":"Arn", "documentation":"

    An ARN that represents an object returned by ListEnabledBaseline, to describe an enabled baseline.

    " @@ -2046,7 +2111,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

    The request would cause a service quota to be exceeded. The limit is 10 concurrent operations.

    ", + "documentation":"

    The request would cause a service quota to be exceeded. The limit is 100 concurrent operations.

    ", "error":{ "httpStatusCode":402, "senderFault":true @@ -2283,5 +2348,5 @@ "exception":true } }, - "documentation":"

    Amazon Web Services Control Tower offers application programming interface (API) operations that support programmatic interaction with these types of resources:

    For more information about these types of resources, see the Amazon Web Services Control Tower User Guide .

    About control APIs

    These interfaces allow you to apply the Amazon Web Services library of pre-defined controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms \"control\" and \"guardrail\" are synonyms.

    To call these APIs, you'll need to know:

    • the controlIdentifier for the control--or guardrail--you are targeting.

    • the ARN associated with the target organizational unit (OU), which we call the targetIdentifier.

    • the ARN associated with a resource that you wish to tag or untag.

    To get the controlIdentifier for your Amazon Web Services Control Tower control:

    The controlIdentifier is an ARN that is specified for each control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation.

    About identifiers for Amazon Web Services Control Tower

    The Amazon Web Services Control Tower controlIdentifier is unique in each Amazon Web Services Region for each control. You can find the controlIdentifier for each Region and control in the Tables of control metadata or the Control availability by Region tables in the Amazon Web Services Control Tower Controls Reference Guide.

    A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and Elective controls is given in Resource identifiers for APIs and controls in the Amazon Web Services Control Tower Controls Reference Guide . Remember that Mandatory controls cannot be added or removed.

    Some controls have two identifiers

    • ARN format for Amazon Web Services Control Tower: arn:aws:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID}

      Example:

      arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED

    • ARN format for Amazon Web Services Control Catalog: arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID}

    You can find the {CONTROL_CATALOG_OPAQUE_ID} in the Amazon Web Services Control Tower Controls Reference Guide , or in the Amazon Web Services Control Tower console, on the Control details page.

    The Amazon Web Services Control Tower APIs for enabled controls, such as GetEnabledControl and ListEnabledControls always return an ARN of the same type given when the control was enabled.

    To get the targetIdentifier:

    The targetIdentifier is the ARN for an OU.

    In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU.

    OU ARN format:

    arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}

    About landing zone APIs

    You can configure and launch an Amazon Web Services Control Tower landing zone with APIs. For an introduction and steps, see Getting started with Amazon Web Services Control Tower using APIs.

    For an overview of landing zone API operations, see Amazon Web Services Control Tower supports landing zone APIs. The individual API operations for landing zones are detailed in this document, the API reference manual, in the \"Actions\" section.

    About baseline APIs

    You can apply the AWSControlTowerBaseline baseline to an organizational unit (OU) as a way to register the OU with Amazon Web Services Control Tower, programmatically. For a general overview of this capability, see Amazon Web Services Control Tower supports APIs for OU registration and configuration with baselines.

    You can call the baseline API operations to view the baselines that Amazon Web Services Control Tower enables for your landing zone, on your behalf, when setting up the landing zone. These baselines are read-only baselines.

    The individual API operations for baselines are detailed in this document, the API reference manual, in the \"Actions\" section. For usage examples, see Baseline API input and output examples with CLI.

    About Amazon Web Services Control Catalog identifiers

    • The EnableControl and DisableControl API operations can be called by specifying either the Amazon Web Services Control Tower identifer or the Amazon Web Services Control Catalog identifier. The API response returns the same type of identifier that you specified when calling the API.

    • If you use an Amazon Web Services Control Tower identifier to call the EnableControl API, and then call EnableControl again with an Amazon Web Services Control Catalog identifier, Amazon Web Services Control Tower returns an error message stating that the control is already enabled. Similar behavior applies to the DisableControl API operation.

    • Mandatory controls and the landing-zone-level Region deny control have Amazon Web Services Control Tower identifiers only.

    Details and examples

    To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower

    Recording API Requests

    Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made the request and when, and so on. For more about Amazon Web Services Control Tower and its support for CloudTrail, see Logging Amazon Web Services Control Tower Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User Guide.

    " + "documentation":"

    Amazon Web Services Control Tower offers application programming interface (API) operations that support programmatic interaction with these types of resources:

    For more information about these types of resources, see the Amazon Web Services Control Tower User Guide .

    About control APIs

    These interfaces allow you to apply the Amazon Web Services library of pre-defined controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms \"control\" and \"guardrail\" are synonyms.

    To call these APIs, you'll need to know:

    • the controlIdentifier for the control--or guardrail--you are targeting.

    • the ARN associated with the target organizational unit (OU), which we call the targetIdentifier.

    • the ARN associated with a resource that you wish to tag or untag.

    To get the controlIdentifier for your Amazon Web Services Control Tower control:

    The controlIdentifier is an ARN that is specified for each control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation.

    About identifiers for Amazon Web Services Control Tower

    The Amazon Web Services Control Tower controlIdentifier is unique in each Amazon Web Services Region for each control. You can find the controlIdentifier for each Region and control in the Tables of control metadata or the Control availability by Region tables in the Amazon Web Services Control Tower Controls Reference Guide.

    A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and Elective controls is given in Resource identifiers for APIs and controls in the Amazon Web Services Control Tower Controls Reference Guide . Remember that Mandatory controls cannot be added or removed.

    Some controls have two identifiers

    • ARN format for Amazon Web Services Control Tower: arn:aws:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID}

      Example:

      arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED

    • ARN format for Amazon Web Services Control Catalog: arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID}

    You can find the {CONTROL_CATALOG_OPAQUE_ID} in the Amazon Web Services Control Tower Controls Reference Guide , or in the Amazon Web Services Control Tower console, on the Control details page.

    The Amazon Web Services Control Tower APIs for enabled controls, such as GetEnabledControl and ListEnabledControls always return an ARN of the same type given when the control was enabled.

    To get the targetIdentifier:

    The targetIdentifier is the ARN for an OU.

    In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU.

    OU ARN format:

    arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}

    About landing zone APIs

    You can configure and launch an Amazon Web Services Control Tower landing zone with APIs. For an introduction and steps, see Getting started with Amazon Web Services Control Tower using APIs.

    For an overview of landing zone API operations, see Amazon Web Services Control Tower supports landing zone APIs. The individual API operations for landing zones are detailed in this document, the API reference manual, in the \"Actions\" section.

    About baseline APIs

    You can apply the AWSControlTowerBaseline baseline to an organizational unit (OU) as a way to register the OU with Amazon Web Services Control Tower, programmatically. For a general overview of this capability, see Amazon Web Services Control Tower supports APIs for OU registration and configuration with baselines.

    You can call the baseline API operations to view the baselines that Amazon Web Services Control Tower enables for your landing zone, on your behalf, when setting up the landing zone. These baselines are read-only baselines.

    The individual API operations for baselines are detailed in this document, the API reference manual, in the \"Actions\" section. For usage examples, see Baseline API input and output examples with CLI.

    About Amazon Web Services Control Catalog identifiers

    • The EnableControl and DisableControl API operations can be called by specifying either the Amazon Web Services Control Tower identifer or the Amazon Web Services Control Catalog identifier. The API response returns the same type of identifier that you specified when calling the API.

    • If you use an Amazon Web Services Control Tower identifier to call the EnableControl API, and then call EnableControl again with an Amazon Web Services Control Catalog identifier, Amazon Web Services Control Tower returns an error message stating that the control is already enabled. Similar behavior applies to the DisableControl API operation.

    • Mandatory controls and the landing-zone-level Region deny control have Amazon Web Services Control Tower identifiers only.

    Details and examples

    To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower

    Recording API Requests

    Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made the request and when, and so on. For more about Amazon Web Services Control Tower and its support for CloudTrail, see Logging Amazon Web Services Control Tower Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User Guide.

    " } diff --git a/services/costandusagereport/pom.xml b/services/costandusagereport/pom.xml index c5a24cd03e60..0595c98e9816 100644 --- a/services/costandusagereport/pom.xml +++ b/services/costandusagereport/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT costandusagereport AWS Java SDK :: Services :: AWS Cost and Usage Report diff --git a/services/costandusagereport/src/main/resources/codegen-resources/customization.config b/services/costandusagereport/src/main/resources/codegen-resources/customization.config index 19839ac2e98f..c45786626403 100644 --- a/services/costandusagereport/src/main/resources/codegen-resources/customization.config +++ b/services/costandusagereport/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ "excludedSimpleMethods": [ "deleteReportDefinition" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/costexplorer/pom.xml b/services/costexplorer/pom.xml index ae03266fd875..f83dca7f85c3 100644 --- a/services/costexplorer/pom.xml +++ b/services/costexplorer/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 costexplorer diff --git a/services/costexplorer/src/main/resources/codegen-resources/customization.config b/services/costexplorer/src/main/resources/codegen-resources/customization.config index 61775c3dbe4f..604d181bf4fe 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/customization.config +++ b/services/costexplorer/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "excludedSimpleMethods": [ "getCostAndUsage" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json index 12e77aabd32b..6e774dcfc1d4 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/costexplorer/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -1,12 +1,6 @@ { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -26,6 +20,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -57,564 +57,616 @@ "type": "error" }, { - "conditions": [ + "conditions": [], + "rules": [ { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, - true - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree" } ], "type": "tree" }, { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], + "conditions": [], "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { "ref": "Region" } - ], - "assign": "PartitionResult" + ] } ], "rules": [ { "conditions": [ { - "fn": "stringEquals", + "fn": "aws.partition", "argv": [ { - "fn": "getAttr", + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", "argv": [ { - "ref": "PartitionResult" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] }, - "name" + "aws" ] }, - "aws" - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + true + ] + } + ], + "endpoint": { + "url": "https://ce.us-east-1.api.aws", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } ] }, - "aws-cn" - ] + "headers": {} + }, + "type": "endpoint" }, { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-cn" + ] }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "cn-northwest-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + true + ] + } + ], + "endpoint": { + "url": "https://ce.cn-northwest-1.api.amazonwebservices.com.cn", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } ] }, - "aws-iso" - ] + "headers": {} + }, + "type": "endpoint" }, { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso" + ] }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.us-iso-east-1.c2s.ic.gov", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-iso-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + false + ] + } + ], + "endpoint": { + "url": "https://ce.us-iso-east-1.c2s.ic.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } ] }, - "aws-iso-b" - ] + "headers": {} + }, + "type": "endpoint" }, { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-b" + ] }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.us-isob-east-1.sc2s.sgov.gov", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-isob-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + false + ] + } + ], + "endpoint": { + "url": "https://ce.us-isob-east-1.sc2s.sgov.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } ] }, - "aws-iso-e" - ] + "headers": {} + }, + "type": "endpoint" }, { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-e" + ] }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.eu-isoe-west-1.cloud.adc-e.uk", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "eu-isoe-west-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + false + ] + } + ], + "endpoint": { + "url": "https://ce.eu-isoe-west-1.cloud.adc-e.uk", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } ] }, - "aws-iso-f" - ] + "headers": {} + }, + "type": "endpoint" }, { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-f" + ] }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - false - ] - } - ], - "endpoint": { - "url": "https://ce.us-isof-south-1.csp.hci.ic.gov", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-isof-south-1" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" + ], + "endpoint": { + "url": "https://ce.us-isof-south-1.csp.hci.ic.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] }, - true - ] + "headers": {} + }, + "type": "endpoint" }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] }, { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ce-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://ce-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ], "type": "tree" }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "UseFIPS" }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ce-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://ce-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ], "type": "tree" }, { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - true - ] - } - ], - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ce.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://ce.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ], "type": "tree" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://ce.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://ce.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ], "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ], "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] } \ No newline at end of file diff --git a/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json b/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json index e57583a59213..39c2631147c5 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/costexplorer/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,31 +1,50 @@ { "testCases": [ { - "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region not set and fips disabled", "expect": { "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-east-1" - } - ] - }, - "url": "https://ce.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "Region": "aws-global", + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://ce-fips.us-east-1.api.aws" } }, @@ -39,6 +58,14 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://ce-fips.us-east-1.amazonaws.com" } }, @@ -52,6 +79,14 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://ce.us-east-1.api.aws" } }, @@ -69,7 +104,6 @@ "authSchemes": [ { "name": "sigv4", - "signingName": "ce", "signingRegion": "us-east-1" } ] @@ -84,75 +118,76 @@ } }, { - "documentation": "For region aws-cn-global with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "ce", "signingRegion": "cn-northwest-1" } ] }, - "url": "https://ce.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "aws-cn-global", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://ce-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://ce-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ce-fips.cn-north-1.amazonaws.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://ce-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://ce.cn-north-1.api.amazonwebservices.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://ce.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "ce", "signingRegion": "cn-northwest-1" } ] @@ -161,81 +196,91 @@ } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://ce-fips.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://ce-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://ce-fips.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://ce-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://ce.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://ce.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://ce.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region aws-iso-global with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-iso-east-1" + "signingRegion": "us-gov-west-1" } ] }, - "url": "https://ce.us-iso-east-1.c2s.ic.gov" + "url": "https://ce.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "aws-iso-global", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } @@ -255,6 +300,14 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://ce-fips.us-iso-east-1.c2s.ic.gov" } }, @@ -283,7 +336,6 @@ "authSchemes": [ { "name": "sigv4", - "signingName": "ce", "signingRegion": "us-iso-east-1" } ] @@ -297,28 +349,6 @@ "UseDualStack": false } }, - { - "documentation": "For region aws-iso-b-global with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-isob-east-1" - } - ] - }, - "url": "https://ce.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "aws-iso-b-global", - "UseFIPS": false, - "UseDualStack": false - } - }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -334,6 +364,14 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://ce-fips.us-isob-east-1.sc2s.sgov.gov" } }, @@ -362,7 +400,6 @@ "authSchemes": [ { "name": "sigv4", - "signingName": "ce", "signingRegion": "us-isob-east-1" } ] @@ -377,98 +414,131 @@ } }, { - "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "ce", "signingRegion": "eu-isoe-west-1" } ] }, - "url": "https://ce.eu-isoe-west-1.cloud.adc-e.uk" + "url": "https://ce-fips.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { "Region": "eu-isoe-west-1", - "UseFIPS": false, + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "ce", - "signingRegion": "us-isof-south-1" + "signingRegion": "eu-isoe-west-1" } ] }, - "url": "https://ce.us-isof-south-1.csp.hci.ic.gov" + "url": "https://ce.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-isof-south-1", + "Region": "eu-isoe-west-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://example.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://ce-fips.us-isof-south-1.csp.hci.ic.gov" } }, "params": { - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://ce.us-isof-south-1.csp.hci.ic.gov" + } }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { diff --git a/services/costexplorer/src/main/resources/codegen-resources/paginators-1.json b/services/costexplorer/src/main/resources/codegen-resources/paginators-1.json index 5f4362b57f37..7c7eb23b0d7b 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/paginators-1.json +++ b/services/costexplorer/src/main/resources/codegen-resources/paginators-1.json @@ -18,6 +18,18 @@ "limit_key": "MaxResults", "result_key": "AnomalySubscriptions" }, + "GetCostAndUsageComparisons": { + "input_token": "NextPageToken", + "output_token": "NextPageToken", + "limit_key": "MaxResults", + "result_key": "CostAndUsageComparisons" + }, + "GetCostComparisonDrivers": { + "input_token": "NextPageToken", + "output_token": "NextPageToken", + "limit_key": "MaxResults", + "result_key": "CostComparisonDrivers" + }, "GetSavingsPlansCoverage": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/costexplorer/src/main/resources/codegen-resources/service-2.json b/services/costexplorer/src/main/resources/codegen-resources/service-2.json index cbb41a5e2cd6..c72f631eb61d 100644 --- a/services/costexplorer/src/main/resources/codegen-resources/service-2.json +++ b/services/costexplorer/src/main/resources/codegen-resources/service-2.json @@ -204,6 +204,22 @@ ], "documentation":"

    Retrieves cost and usage metrics for your account. You can specify which cost and usage-related metric that you want the request to return. For example, you can specify BlendedCosts or UsageQuantity. You can also filter and group your data by various dimensions, such as SERVICE or AZ, in a specific time range. For a complete list of valid dimensions, see the GetDimensionValues operation. Management account in an organization in Organizations have access to all member accounts.

    For information about filter limitations, see Quotas and restrictions in the Billing and Cost Management User Guide.

    " }, + "GetCostAndUsageComparisons":{ + "name":"GetCostAndUsageComparisons", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCostAndUsageComparisonsRequest"}, + "output":{"shape":"GetCostAndUsageComparisonsResponse"}, + "errors":[ + {"shape":"DataUnavailableException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Retrieves cost and usage comparisons for your account between two periods within the last 13 months. If you have enabled multi-year data at monthly granularity, you can go back up to 38 months.

    " + }, "GetCostAndUsageWithResources":{ "name":"GetCostAndUsageWithResources", "http":{ @@ -240,6 +256,22 @@ ], "documentation":"

    Retrieves an array of Cost Category names and values incurred cost.

    If some Cost Category names and values are not associated with any cost, they will not be returned by this API.

    " }, + "GetCostComparisonDrivers":{ + "name":"GetCostComparisonDrivers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCostComparisonDriversRequest"}, + "output":{"shape":"GetCostComparisonDriversResponse"}, + "errors":[ + {"shape":"DataUnavailableException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Retrieves key factors driving cost changes between two time periods within the last 13 months, such as usage changes, discount changes, and commitment-based savings. If you have enabled multi-year data at monthly granularity, you can go back up to 38 months.

    " + }, "GetCostForecast":{ "name":"GetCostForecast", "http":{ @@ -1023,6 +1055,33 @@ }, "documentation":"

    The configuration for the commitment purchase analysis.

    " }, + "ComparisonMetricValue":{ + "type":"structure", + "members":{ + "BaselineTimePeriodAmount":{ + "shape":"GenericString", + "documentation":"

    The numeric value for the baseline time period measurement.

    " + }, + "ComparisonTimePeriodAmount":{ + "shape":"GenericString", + "documentation":"

    The numeric value for the comparison time period measurement.

    " + }, + "Difference":{ + "shape":"GenericString", + "documentation":"

    The calculated difference between ComparisonTimePeriodAmount and BaselineTimePeriodAmount.

    " + }, + "Unit":{ + "shape":"GenericString", + "documentation":"

    The unit of measurement applicable to all numeric values in this comparison.

    " + } + }, + "documentation":"

    Contains cost or usage metric values for comparing two time periods. Each value includes amounts for the baseline and comparison time periods, their difference, and the unit of measurement.

    " + }, + "ComparisonMetrics":{ + "type":"map", + "key":{"shape":"MetricName"}, + "value":{"shape":"ComparisonMetricValue"} + }, "Context":{ "type":"string", "enum":[ @@ -1157,6 +1216,27 @@ "max":1000, "min":1 }, + "CostAndUsageComparison":{ + "type":"structure", + "members":{ + "CostAndUsageSelector":{"shape":"Expression"}, + "Metrics":{ + "shape":"ComparisonMetrics", + "documentation":"

    A mapping of metric names to their comparison values.

    " + } + }, + "documentation":"

    Represents a comparison of cost and usage metrics between two time periods.

    " + }, + "CostAndUsageComparisons":{ + "type":"list", + "member":{"shape":"CostAndUsageComparison"} + }, + "CostAndUsageComparisonsMaxResults":{ + "type":"integer", + "box":true, + "max":2000, + "min":1 + }, "CostCategory":{ "type":"structure", "required":[ @@ -1443,6 +1523,53 @@ "type":"list", "member":{"shape":"CostCategoryValue"} }, + "CostComparisonDriver":{ + "type":"structure", + "members":{ + "CostSelector":{"shape":"Expression"}, + "Metrics":{ + "shape":"ComparisonMetrics", + "documentation":"

    A mapping of metric names to their comparison values.

    " + }, + "CostDrivers":{ + "shape":"CostDrivers", + "documentation":"

    An array of cost drivers, each representing a cost difference between the baseline and comparison time periods. Each entry also includes a metric delta (for example, usage change) that contributed to the cost variance, along with the identifier and type of change.

    " + } + }, + "documentation":"

    Represents a collection of cost drivers and their associated metrics for cost comparison analysis.

    " + }, + "CostComparisonDrivers":{ + "type":"list", + "member":{"shape":"CostComparisonDriver"} + }, + "CostComparisonDriversMaxResults":{ + "type":"integer", + "box":true, + "max":10, + "min":1 + }, + "CostDriver":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"GenericString", + "documentation":"

    The category or classification of the cost driver.

    Values include: BUNDLED_DISCOUNT, CREDIT, OUT_OF_CYCLE_CHARGE, REFUND, RECURRING_RESERVATION_FEE, RESERVATION_USAGE, RI_VOLUME_DISCOUNT, SAVINGS_PLAN_USAGE, SAVINGS_PLAN_NEGATION, SAVINGS_PLAN_RECURRING_FEE, SUPPORT_FEE, TAX, UPFRONT_RESERVATION_FEE, USAGE_CHANGE, COMMITMENT

    " + }, + "Name":{ + "shape":"GenericString", + "documentation":"

    The specific identifier of the cost driver.

    " + }, + "Metrics":{ + "shape":"ComparisonMetrics", + "documentation":"

    A mapping of metric names to their comparison values, measuring the impact of this cost driver.

    " + } + }, + "documentation":"

    Represents factors that contribute to cost variations between the baseline and comparison time periods, including the type of driver, an identifier of the driver, and associated metrics.

    " + }, + "CostDrivers":{ + "type":"list", + "member":{"shape":"CostDriver"} + }, "Coverage":{ "type":"structure", "members":{ @@ -1717,8 +1844,7 @@ }, "DeleteAnomalyMonitorResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteAnomalySubscriptionRequest":{ "type":"structure", @@ -1732,8 +1858,7 @@ }, "DeleteAnomalySubscriptionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteCostCategoryDefinitionRequest":{ "type":"structure", @@ -2444,6 +2569,63 @@ } } }, + "GetCostAndUsageComparisonsRequest":{ + "type":"structure", + "required":[ + "BaselineTimePeriod", + "ComparisonTimePeriod", + "MetricForComparison" + ], + "members":{ + "BillingViewArn":{ + "shape":"BillingViewArn", + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API.

    " + }, + "BaselineTimePeriod":{ + "shape":"DateInterval", + "documentation":"

    The reference time period for comparison. This time period serves as the baseline against which other cost and usage data will be compared. The interval must start and end on the first day of a month, with a duration of exactly one month.

    " + }, + "ComparisonTimePeriod":{ + "shape":"DateInterval", + "documentation":"

    The comparison time period for analysis. This time period's cost and usage data will be compared against the baseline time period. The interval must start and end on the first day of a month, with a duration of exactly one month.

    " + }, + "MetricForComparison":{ + "shape":"MetricName", + "documentation":"

    The cost and usage metric to compare. Valid values are AmortizedCost, BlendedCost, NetAmortizedCost, NetUnblendedCost, NormalizedUsageAmount, UnblendedCost, and UsageQuantity.

    " + }, + "Filter":{"shape":"Expression"}, + "GroupBy":{ + "shape":"GroupDefinitions", + "documentation":"

    You can group results using the attributes DIMENSION, TAG, and COST_CATEGORY.

    " + }, + "MaxResults":{ + "shape":"CostAndUsageComparisonsMaxResults", + "documentation":"

    The maximum number of results that are returned for the request.

    ", + "box":true + }, + "NextPageToken":{ + "shape":"NextPageToken", + "documentation":"

    The token to retrieve the next set of paginated results.

    " + } + } + }, + "GetCostAndUsageComparisonsResponse":{ + "type":"structure", + "members":{ + "CostAndUsageComparisons":{ + "shape":"CostAndUsageComparisons", + "documentation":"

    An array of comparison results showing cost and usage metrics between BaselineTimePeriod and ComparisonTimePeriod.

    " + }, + "TotalCostAndUsage":{ + "shape":"ComparisonMetrics", + "documentation":"

    A summary of the total cost and usage, comparing amounts between BaselineTimePeriod and ComparisonTimePeriod and their differences. This total represents the aggregate total across all paginated results, if the response spans multiple pages.

    " + }, + "NextPageToken":{ + "shape":"NextPageToken", + "documentation":"

    The token to retrieve the next set of paginated results.

    " + } + } + }, "GetCostAndUsageRequest":{ "type":"structure", "required":[ @@ -2620,6 +2802,59 @@ } } }, + "GetCostComparisonDriversRequest":{ + "type":"structure", + "required":[ + "BaselineTimePeriod", + "ComparisonTimePeriod", + "MetricForComparison" + ], + "members":{ + "BillingViewArn":{ + "shape":"BillingViewArn", + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies a specific billing view. The ARN is used to specify which particular billing view you want to interact with or retrieve information from when making API calls related to Amazon Web Services Billing and Cost Management features. The BillingViewArn can be retrieved by calling the ListBillingViews API.

    " + }, + "BaselineTimePeriod":{ + "shape":"DateInterval", + "documentation":"

    The reference time period for comparison. This time period serves as the baseline against which other cost and usage data will be compared. The interval must start and end on the first day of a month, with a duration of exactly one month.

    " + }, + "ComparisonTimePeriod":{ + "shape":"DateInterval", + "documentation":"

    The comparison time period for analysis. This time period's cost and usage data will be compared against the baseline time period. The interval must start and end on the first day of a month, with a duration of exactly one month.

    " + }, + "MetricForComparison":{ + "shape":"MetricName", + "documentation":"

    The cost and usage metric to compare. Valid values are AmortizedCost, BlendedCost, NetAmortizedCost, NetUnblendedCost, NormalizedUsageAmount, UnblendedCost, and UsageQuantity.

    " + }, + "Filter":{"shape":"Expression"}, + "GroupBy":{ + "shape":"GroupDefinitions", + "documentation":"

    You can group results using the attributes DIMENSION, TAG, and COST_CATEGORY. Note that SERVICE and USAGE_TYPE dimensions are automatically included in the cost comparison drivers analysis.

    " + }, + "MaxResults":{ + "shape":"CostComparisonDriversMaxResults", + "documentation":"

    The maximum number of results that are returned for the request.

    ", + "box":true + }, + "NextPageToken":{ + "shape":"NextPageToken", + "documentation":"

    The token to retrieve the next set of paginated results.

    " + } + } + }, + "GetCostComparisonDriversResponse":{ + "type":"structure", + "members":{ + "CostComparisonDrivers":{ + "shape":"CostComparisonDrivers", + "documentation":"

    An array of comparison results showing factors that drive significant cost differences between BaselineTimePeriod and ComparisonTimePeriod.

    " + }, + "NextPageToken":{ + "shape":"NextPageToken", + "documentation":"

    The token to retrieve the next set of paginated results.

    " + } + } + }, "GetCostForecastRequest":{ "type":"structure", "required":[ @@ -5332,8 +5567,7 @@ }, "StartSavingsPlansPurchaseRecommendationGenerationRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "StartSavingsPlansPurchaseRecommendationGenerationResponse":{ "type":"structure", @@ -5431,8 +5665,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValues":{ "type":"structure", @@ -5596,8 +5829,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UnusedHours":{"type":"string"}, "UnusedUnits":{"type":"string"}, diff --git a/services/costoptimizationhub/pom.xml b/services/costoptimizationhub/pom.xml index 49e53f622172..13e9c9699dd7 100644 --- a/services/costoptimizationhub/pom.xml +++ b/services/costoptimizationhub/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT costoptimizationhub AWS Java SDK :: Services :: Cost Optimization Hub diff --git a/services/costoptimizationhub/src/main/resources/codegen-resources/customization.config b/services/costoptimizationhub/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/costoptimizationhub/src/main/resources/codegen-resources/customization.config +++ b/services/costoptimizationhub/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/costoptimizationhub/src/main/resources/codegen-resources/service-2.json b/services/costoptimizationhub/src/main/resources/codegen-resources/service-2.json index 6a7d2f52a745..e45fac809577 100644 --- a/services/costoptimizationhub/src/main/resources/codegen-resources/service-2.json +++ b/services/costoptimizationhub/src/main/resources/codegen-resources/service-2.json @@ -201,6 +201,27 @@ "LowestPrice" ] }, + "AuroraDbClusterStorage":{ + "type":"structure", + "members":{ + "configuration":{ + "shape":"AuroraDbClusterStorageConfiguration", + "documentation":"

    The Aurora DB cluster storage configuration used for recommendations.

    " + }, + "costCalculation":{"shape":"ResourceCostCalculation"} + }, + "documentation":"

    Contains the details of an Aurora DB cluster storage.

    " + }, + "AuroraDbClusterStorageConfiguration":{ + "type":"structure", + "members":{ + "storageType":{ + "shape":"String", + "documentation":"

    The storage type to associate with the Aurora DB cluster.

    " + } + }, + "documentation":"

    The Aurora DB cluster storage configuration used for recommendations.

    " + }, "BlockStoragePerformanceConfiguration":{ "type":"structure", "members":{ @@ -758,6 +779,10 @@ "memberAccountDiscountVisibility":{ "shape":"MemberAccountDiscountVisibility", "documentation":"

    Retrieves the status of the \"member account discount visibility\" preference.

    " + }, + "preferredCommitment":{ + "shape":"PreferredCommitment", + "documentation":"

    Retrieves the current preferences for how Reserved Instances and Savings Plans cost-saving opportunities are prioritized in terms of payment option and term length.

    " } } }, @@ -1094,7 +1119,7 @@ }, "costCalculation":{"shape":"ReservedInstancesCostCalculation"} }, - "documentation":"

    The MemoryDB reserved instances recommendation details.

    MemoryDB reserved instances are referred to as \"MemoryDB reserved nodes\" in customer-facing documentation.

    " + "documentation":"

    The MemoryDB reserved instances recommendation details.

    While the API reference uses \"MemoryDB reserved instances\", the user guide and other documentation refer to them as \"MemoryDB reserved nodes\", as the terms are used interchangeably.

    " }, "MemoryDbReservedInstancesConfiguration":{ "type":"structure", @@ -1152,7 +1177,7 @@ "documentation":"

    Determines whether the recommendation is for a current generation instance.

    " } }, - "documentation":"

    The MemoryDB reserved instances configuration used for recommendations.

    MemoryDB reserved instances are referred to as \"MemoryDB reserved nodes\" in customer-facing documentation.

    " + "documentation":"

    The MemoryDB reserved instances configuration used for recommendations.

    While the API reference uses \"MemoryDB reserved instances\", the user guide and other documentation refer to them as \"MemoryDB reserved nodes\", as the terms are used interchangeably.

    " }, "MixedInstanceConfiguration":{ "type":"structure", @@ -1257,6 +1282,28 @@ }, "documentation":"

    Defines how rows will be sorted in the response.

    " }, + "PaymentOption":{ + "type":"string", + "enum":[ + "AllUpfront", + "PartialUpfront", + "NoUpfront" + ] + }, + "PreferredCommitment":{ + "type":"structure", + "members":{ + "term":{ + "shape":"Term", + "documentation":"

    The preferred length of the commitment period. If the value is null, it will default to ThreeYears (highest savings) where applicable.

    " + }, + "paymentOption":{ + "shape":"PaymentOption", + "documentation":"

    The preferred upfront payment structure for commitments. If the value is null, it will default to AllUpfront (highest savings) where applicable.

    " + } + }, + "documentation":"

    The preferred configuration for Reserved Instances and Savings Plans commitment-based discounts, consisting of a payment option and a commitment duration.

    " + }, "PrimitiveBoolean":{"type":"boolean"}, "RdsDbInstance":{ "type":"structure", @@ -1715,6 +1762,10 @@ "shape":"RdsDbInstanceStorage", "documentation":"

    The DB instance storage recommendation details.

    " }, + "auroraDbClusterStorage":{ + "shape":"AuroraDbClusterStorage", + "documentation":"

    The Aurora DB cluster storage recommendation details.

    " + }, "dynamoDbReservedCapacity":{ "shape":"DynamoDbReservedCapacity", "documentation":"

    The DynamoDB reserved capacity recommendation details.

    " @@ -1789,6 +1840,7 @@ "ElastiCacheReservedInstances", "RdsDbInstanceStorage", "RdsDbInstance", + "AuroraDbClusterStorage", "DynamoDbReservedCapacity", "MemoryDbReservedInstances" ] @@ -1936,6 +1988,13 @@ "max":100, "min":1 }, + "Term":{ + "type":"string", + "enum":[ + "OneYear", + "ThreeYears" + ] + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -1978,6 +2037,10 @@ "memberAccountDiscountVisibility":{ "shape":"MemberAccountDiscountVisibility", "documentation":"

    Sets the \"member account discount visibility\" preference.

    " + }, + "preferredCommitment":{ + "shape":"PreferredCommitment", + "documentation":"

    Sets the preferences for how Reserved Instances and Savings Plans cost-saving opportunities are prioritized in terms of payment option and term length.

    " } } }, @@ -1991,6 +2054,10 @@ "memberAccountDiscountVisibility":{ "shape":"MemberAccountDiscountVisibility", "documentation":"

    Shows the status of the \"member account discount visibility\" preference.

    " + }, + "preferredCommitment":{ + "shape":"PreferredCommitment", + "documentation":"

    Shows the updated preferences for how Reserved Instances and Savings Plans cost-saving opportunities are prioritized in terms of payment option and term length.

    " } } }, diff --git a/services/customerprofiles/pom.xml b/services/customerprofiles/pom.xml index 55ead6df999a..42abe9174fee 100644 --- a/services/customerprofiles/pom.xml +++ b/services/customerprofiles/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT customerprofiles AWS Java SDK :: Services :: Customer Profiles diff --git a/services/customerprofiles/src/main/resources/codegen-resources/customization.config b/services/customerprofiles/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/customization.config +++ b/services/customerprofiles/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json b/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json index ef6e528e5670..d8c945a0b63c 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json +++ b/services/customerprofiles/src/main/resources/codegen-resources/paginators-1.json @@ -6,6 +6,12 @@ "limit_key": "MaxResults", "result_key": "ProfileIds" }, + "ListDomainLayouts": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Items" + }, "ListEventStreams": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json index 00a74a1066b7..a599196f6fac 100644 --- a/services/customerprofiles/src/main/resources/codegen-resources/service-2.json +++ b/services/customerprofiles/src/main/resources/codegen-resources/service-2.json @@ -100,6 +100,23 @@ ], "documentation":"

    Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations.

    Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

    Use this API or UpdateDomain to enable identity resolution: set Matching to true.

    To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

    It is not possible to associate a Customer Profiles domain with an Amazon Connect Instance directly from the API. If you would like to create a domain and associate a Customer Profiles domain, use the Amazon Connect admin website. For more information, see Enable Customer Profiles.

    Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain.

    " }, + "CreateDomainLayout":{ + "name":"CreateDomainLayout", + "http":{ + "method":"POST", + "requestUri":"/domains/{DomainName}/layouts/{LayoutDefinitionName}" + }, + "input":{"shape":"CreateDomainLayoutRequest"}, + "output":{"shape":"CreateDomainLayoutResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Creates the layout to view data for a specific domain. This API can only be invoked from the Amazon Connect admin website.

    " + }, "CreateEventStream":{ "name":"CreateEventStream", "http":{ @@ -255,6 +272,23 @@ ], "documentation":"

    Deletes a specific domain and all of its customer data, such as customer profile attributes and their related objects.

    " }, + "DeleteDomainLayout":{ + "name":"DeleteDomainLayout", + "http":{ + "method":"DELETE", + "requestUri":"/domains/{DomainName}/layouts/{LayoutDefinitionName}" + }, + "input":{"shape":"DeleteDomainLayoutRequest"}, + "output":{"shape":"DeleteDomainLayoutResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Deletes the layout used to view data for a specific domain. This API can only be invoked from the Amazon Connect admin website.

    " + }, "DeleteEventStream":{ "name":"DeleteEventStream", "http":{ @@ -496,6 +530,23 @@ ], "documentation":"

    Returns information about a specific domain.

    " }, + "GetDomainLayout":{ + "name":"GetDomainLayout", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/layouts/{LayoutDefinitionName}" + }, + "input":{"shape":"GetDomainLayoutRequest"}, + "output":{"shape":"GetDomainLayoutResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Gets the layout to view data for a specific domain. This API can only be invoked from the Amazon Connect admin website.

    " + }, "GetEventStream":{ "name":"GetEventStream", "http":{ @@ -788,6 +839,23 @@ ], "documentation":"

    Retrieve a list of calculated attributes for a customer profile.

    " }, + "ListDomainLayouts":{ + "name":"ListDomainLayouts", + "http":{ + "method":"GET", + "requestUri":"/domains/{DomainName}/layouts" + }, + "input":{"shape":"ListDomainLayoutsRequest"}, + "output":{"shape":"ListDomainLayoutsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Lists the existing layouts that can be used to view data for a specific domain. This API can only be invoked from the Amazon Connect admin website.

    " + }, "ListDomains":{ "name":"ListDomains", "http":{ @@ -1173,6 +1241,23 @@ ], "documentation":"

    Updates the properties of a domain, including creating or selecting a dead letter queue or an encryption key.

    After a domain is created, the name can’t be changed.

    Use this API or CreateDomain to enable identity resolution: set Matching to true.

    To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.

    To add or remove tags on an existing Domain, see TagResource/UntagResource.

    " }, + "UpdateDomainLayout":{ + "name":"UpdateDomainLayout", + "http":{ + "method":"PUT", + "requestUri":"/domains/{DomainName}/layouts/{LayoutDefinitionName}" + }, + "input":{"shape":"UpdateDomainLayoutRequest"}, + "output":{"shape":"UpdateDomainLayoutResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

    Updates the layout used to view data for a specific domain. This API can only be invoked from the Amazon Connect admin website.

    " + }, "UpdateEventTrigger":{ "name":"UpdateEventTrigger", "http":{ @@ -1551,7 +1636,7 @@ "AttributeList":{ "type":"list", "member":{"shape":"AttributeItem"}, - "max":2, + "max":50, "min":1 }, "AttributeMap":{ @@ -1876,6 +1961,10 @@ "Value":{ "shape":"string1To255", "documentation":"

    The value of the calculated attribute.

    " + }, + "LastObjectTimestamp":{ + "shape":"timestamp", + "documentation":"

    The timestamp of the newest object included in the calculated attribute calculation.

    " } }, "documentation":"

    The object containing the values of a single calculated attribute value.

    " @@ -2052,6 +2141,10 @@ "shape":"Statistic", "documentation":"

    The aggregation operation to perform for the calculated attribute.

    " }, + "UseHistoricalData":{ + "shape":"optionalBoolean", + "documentation":"

    Whether historical data ingested before the Calculated Attribute was created should be included in calculations.

    " + }, "Tags":{ "shape":"TagMap", "documentation":"

    The tags used to organize, track, or control access for this resource.

    " @@ -2097,9 +2190,124 @@ "shape":"timestamp", "documentation":"

    The timestamp of when the calculated attribute definition was most recently edited.

    " }, + "UseHistoricalData":{ + "shape":"optionalBoolean", + "documentation":"

    Whether historical data ingested before the Calculated Attribute was created should be included in calculations.

    " + }, + "Status":{ + "shape":"ReadinessStatus", + "documentation":"

    Status of the Calculated Attribute creation (whether all historical data has been indexed.)

    " + }, + "Readiness":{ + "shape":"Readiness", + "documentation":"

    Information indicating if the Calculated Attribute is ready for use by confirming all historical data has been processed and reflected.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "CreateDomainLayoutRequest":{ + "type":"structure", + "required":[ + "DomainName", + "LayoutDefinitionName", + "Description", + "DisplayName", + "LayoutType", + "Layout" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    ", + "location":"uri", + "locationName":"LayoutDefinitionName" + }, + "Description":{ + "shape":"sensitiveText", + "documentation":"

    The description of the layout

    " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

    The display name of the layout

    " + }, + "IsDefault":{ + "shape":"boolean", + "documentation":"

    If set to true for a layout, this layout will be used by default to view data. If set to false, then the layout will not be used by default, but it can be used to view data by explicitly selecting it in the console.

    " + }, + "LayoutType":{ + "shape":"LayoutType", + "documentation":"

    The type of layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Layout":{ + "shape":"sensitiveString1To2000000", + "documentation":"

    A customizable layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "CreateDomainLayoutResponse":{ + "type":"structure", + "required":[ + "LayoutDefinitionName", + "Description", + "DisplayName", + "LayoutType", + "Layout", + "Version", + "CreatedAt" + ], + "members":{ + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    " + }, + "Description":{ + "shape":"sensitiveText", + "documentation":"

    The description of the layout

    " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

    The display name of the layout

    " + }, + "IsDefault":{ + "shape":"boolean", + "documentation":"

    If set to true for a layout, this layout will be used by default to view data. If set to false, then the layout will not be used by default, but it can be used to view data by explicitly selecting it in the console.

    " + }, + "LayoutType":{ + "shape":"LayoutType", + "documentation":"

    The type of layout that can be used to view data under customer profiles domain.

    " + }, + "Layout":{ + "shape":"sensitiveString1To2000000", + "documentation":"

    A customizable layout that can be used to view data under Customer Profiles domain.

    " + }, + "Version":{ + "shape":"string1To255", + "documentation":"

    The version used to create layout.

    " + }, "Tags":{ "shape":"TagMap", "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was most recently updated.

    " } } }, @@ -2388,7 +2596,7 @@ }, "AccountNumber":{ "shape":"sensitiveString1To255", - "documentation":"

    An account number that you have given to the customer.

    " + "documentation":"

    An account number that you have assigned to the customer.

    " }, "AdditionalInformation":{ "shape":"sensitiveString1To1000", @@ -2735,7 +2943,37 @@ }, "DeleteCalculatedAttributeDefinitionResponse":{ "type":"structure", + "members":{} + }, + "DeleteDomainLayoutRequest":{ + "type":"structure", + "required":[ + "DomainName", + "LayoutDefinitionName" + ], "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    ", + "location":"uri", + "locationName":"LayoutDefinitionName" + } + } + }, + "DeleteDomainLayoutResponse":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"message", + "documentation":"

    A message that indicates the delete request is done.

    " + } } }, "DeleteDomainRequest":{ @@ -2783,8 +3021,7 @@ }, "DeleteEventStreamResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteEventTriggerRequest":{ "type":"structure", @@ -3035,8 +3272,7 @@ }, "DeleteWorkflowResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DestinationField":{ "type":"string", @@ -3174,11 +3410,7 @@ "max":3, "min":1 }, - "End":{ - "type":"integer", - "max":366, - "min":0 - }, + "End":{"type":"integer"}, "EstimateStatus":{ "type":"string", "enum":[ @@ -3815,6 +4047,18 @@ "shape":"AttributeDetails", "documentation":"

    Mathematical expression and a list of attribute items specified in that expression.

    " }, + "UseHistoricalData":{ + "shape":"optionalBoolean", + "documentation":"

    Whether historical data ingested before the Calculated Attribute was created should be included in calculations.

    " + }, + "Status":{ + "shape":"ReadinessStatus", + "documentation":"

    Status of the Calculated Attribute creation (whether all historical data has been indexed).

    " + }, + "Readiness":{ + "shape":"Readiness", + "documentation":"

    Information indicating if the Calculated Attribute is ready for use by confirming all historical data has been processed and reflected.

    " + }, "Tags":{ "shape":"TagMap", "documentation":"

    The tags used to organize, track, or control access for this resource.

    " @@ -3867,6 +4111,86 @@ "Value":{ "shape":"string1To255", "documentation":"

    The value of the calculated attribute.

    " + }, + "LastObjectTimestamp":{ + "shape":"timestamp", + "documentation":"

    The timestamp of the newest object included in the calculated attribute calculation.

    " + } + } + }, + "GetDomainLayoutRequest":{ + "type":"structure", + "required":[ + "DomainName", + "LayoutDefinitionName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    ", + "location":"uri", + "locationName":"LayoutDefinitionName" + } + } + }, + "GetDomainLayoutResponse":{ + "type":"structure", + "required":[ + "LayoutDefinitionName", + "Description", + "DisplayName", + "LayoutType", + "Layout", + "Version", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    " + }, + "Description":{ + "shape":"sensitiveText", + "documentation":"

    The description of the layout

    " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

    The display name of the layout

    " + }, + "IsDefault":{ + "shape":"boolean", + "documentation":"

    If set to true for a layout, this layout will be used by default to view data. If set to false, then the layout will not be used by default, but it can be used to view data by explicitly selecting it in the console.

    " + }, + "LayoutType":{ + "shape":"LayoutType", + "documentation":"

    The type of layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Layout":{ + "shape":"sensitiveString1To2000000", + "documentation":"

    A customizable layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Version":{ + "shape":"string1To255", + "documentation":"

    The version used to create layout.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was most recently updated.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " } } }, @@ -4990,6 +5314,60 @@ "min":20, "pattern":"arn:aws:kms:.*:[0-9]+:.*" }, + "LayoutItem":{ + "type":"structure", + "required":[ + "LayoutDefinitionName", + "Description", + "DisplayName", + "LayoutType", + "CreatedAt", + "LastUpdatedAt" + ], + "members":{ + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    " + }, + "Description":{ + "shape":"sensitiveText", + "documentation":"

    The description of the layout

    " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

    The display name of the layout

    " + }, + "IsDefault":{ + "shape":"boolean", + "documentation":"

    If set to true for a layout, this layout will be used by default to view data. If set to false, then layout will not be used by default but it can be used to view data by explicit selection on UI.

    " + }, + "LayoutType":{ + "shape":"LayoutType", + "documentation":"

    The type of layout that can be used to view data under customer profiles domain.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was most recently updated.

    " + } + }, + "documentation":"

    The layout object that contains LayoutDefinitionName, Description, DisplayName, IsDefault, LayoutType, Tags, CreatedAt, LastUpdatedAt

    " + }, + "LayoutList":{ + "type":"list", + "member":{"shape":"LayoutItem"} + }, + "LayoutType":{ + "type":"string", + "enum":["PROFILE_EXPLORER"] + }, "ListAccountIntegrationsRequest":{ "type":"structure", "required":["Uri"], @@ -5054,6 +5432,14 @@ "shape":"timestamp", "documentation":"

    The timestamp of when the calculated attribute definition was most recently edited.

    " }, + "UseHistoricalData":{ + "shape":"optionalBoolean", + "documentation":"

    Whether historical data ingested before the Calculated Attribute was created should be included in calculations.

    " + }, + "Status":{ + "shape":"ReadinessStatus", + "documentation":"

    Status of the Calculated Attribute creation (whether all historical data has been indexed.)

    " + }, "Tags":{ "shape":"TagMap", "documentation":"

    The tags used to organize, track, or control access for this resource.

    " @@ -5116,6 +5502,10 @@ "Value":{ "shape":"string1To255", "documentation":"

    The value of the calculated attribute.

    " + }, + "LastObjectTimestamp":{ + "shape":"timestamp", + "documentation":"

    The timestamp of the newest object included in the calculated attribute calculation.

    " } }, "documentation":"

    The details of a single calculated attribute for a profile.

    " @@ -5193,6 +5583,43 @@ }, "documentation":"

    An object in a list that represents a domain.

    " }, + "ListDomainLayoutsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "NextToken":{ + "shape":"token", + "documentation":"

    Identifies the next page of results to return.

    ", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"maxSize100", + "documentation":"

    The maximum number of objects returned per page.

    ", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListDomainLayoutsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"LayoutList", + "documentation":"

    Contains summary information about an EventStream.

    " + }, + "NextToken":{ + "shape":"token", + "documentation":"

    Identifies the next page of results to return.

    " + } + } + }, "ListDomainsRequest":{ "type":"structure", "members":{ @@ -6120,7 +6547,6 @@ }, "ObjectCount":{ "type":"integer", - "max":100, "min":1 }, "ObjectFilter":{ @@ -6283,7 +6709,7 @@ }, "AccountNumber":{ "shape":"sensitiveString1To255", - "documentation":"

    An account number that you have given to the customer.

    " + "documentation":"

    An account number that you have assigned to the customer.

    " }, "AdditionalInformation":{ "shape":"sensitiveString1To1000", @@ -6911,10 +7337,6 @@ }, "Range":{ "type":"structure", - "required":[ - "Value", - "Unit" - ], "members":{ "Value":{ "shape":"Value", @@ -6923,6 +7345,18 @@ "Unit":{ "shape":"Unit", "documentation":"

    The unit of time.

    " + }, + "ValueRange":{ + "shape":"ValueRange", + "documentation":"

    A structure letting customers specify a relative time window over which over which data is included in the Calculated Attribute. Use positive numbers to indicate that the endpoint is in the past, and negative numbers to indicate it is in the future. ValueRange overrides Value.

    " + }, + "TimestampSource":{ + "shape":"string1To255", + "documentation":"

    An expression specifying the field in your JSON object from which the date should be parsed. The expression should follow the structure of \\\"{ObjectTypeName.<Location of timestamp field in JSON pointer format>}\\\". E.g. if your object type is MyType and source JSON is {\"generatedAt\": {\"timestamp\": \"1737587945945\"}}, then TimestampSource should be \"{MyType.generatedAt.timestamp}\".

    " + }, + "TimestampFormat":{ + "shape":"string1To255", + "documentation":"

    The format the timestamp field in your JSON object is specified. This value should be one of EPOCHMILLI (for Unix epoch timestamps with second/millisecond level precision) or ISO_8601 (following ISO_8601 format with second/millisecond level precision, with an optional offset of Z or in the format HH:MM or HHMM.). E.g. if your object type is MyType and source JSON is {\"generatedAt\": {\"timestamp\": \"2001-07-04T12:08:56.235-0700\"}}, then TimestampFormat should be \"ISO_8601\".

    " } }, "documentation":"

    The relative time period over which data is included in the aggregation.

    " @@ -6953,6 +7387,29 @@ "type":"string", "enum":["DAYS"] }, + "Readiness":{ + "type":"structure", + "members":{ + "ProgressPercentage":{ + "shape":"percentageInteger", + "documentation":"

    Approximately how far the Calculated Attribute creation is from completion.

    " + }, + "Message":{ + "shape":"text", + "documentation":"

    Any customer messaging.

    " + } + }, + "documentation":"

    Information indicating if the Calculated Attribute is ready for use by confirming all historical data has been processed and reflected.

    " + }, + "ReadinessStatus":{ + "type":"string", + "enum":[ + "PREPARING", + "IN_PROGRESS", + "COMPLETED", + "FAILED" + ] + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -7472,6 +7929,15 @@ "CASE", "ORDER", "COMMUNICATION_RECORD", + "AIR_PREFERENCE", + "HOTEL_PREFERENCE", + "AIR_BOOKING", + "AIR_SEGMENT", + "HOTEL_RESERVATION", + "HOTEL_STAY_REVENUE", + "LOYALTY", + "LOYALTY_TRANSACTION", + "LOYALTY_PROMOTION", "UNIQUE", "SECONDARY", "LOOKUP_ONLY", @@ -7482,11 +7948,7 @@ "type":"list", "member":{"shape":"StandardIdentifier"} }, - "Start":{ - "type":"integer", - "max":366, - "min":1 - }, + "Start":{"type":"integer"}, "Statistic":{ "type":"string", "enum":[ @@ -7570,8 +8032,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -7728,8 +8189,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateAddress":{ "type":"structure", @@ -7852,6 +8312,104 @@ "shape":"AttributeDetails", "documentation":"

    The mathematical expression and a list of attribute items specified in that expression.

    " }, + "UseHistoricalData":{ + "shape":"optionalBoolean", + "documentation":"

    Whether historical data ingested before the Calculated Attribute was created should be included in calculations.

    " + }, + "Status":{ + "shape":"ReadinessStatus", + "documentation":"

    Status of the Calculated Attribute creation (whether all historical data has been indexed.)

    " + }, + "Readiness":{ + "shape":"Readiness", + "documentation":"

    Information indicating if the Calculated Attribute is ready for use by confirming all historical data has been processed and reflected.

    " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

    The tags used to organize, track, or control access for this resource.

    " + } + } + }, + "UpdateDomainLayoutRequest":{ + "type":"structure", + "required":[ + "DomainName", + "LayoutDefinitionName" + ], + "members":{ + "DomainName":{ + "shape":"name", + "documentation":"

    The unique name of the domain.

    ", + "location":"uri", + "locationName":"DomainName" + }, + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    ", + "location":"uri", + "locationName":"LayoutDefinitionName" + }, + "Description":{ + "shape":"sensitiveText", + "documentation":"

    The description of the layout

    " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

    The display name of the layout

    " + }, + "IsDefault":{ + "shape":"boolean", + "documentation":"

    If set to true for a layout, this layout will be used by default to view data. If set to false, then the layout will not be used by default, but it can be used to view data by explicitly selecting it in the console.

    " + }, + "LayoutType":{ + "shape":"LayoutType", + "documentation":"

    The type of layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Layout":{ + "shape":"sensitiveString1To2000000", + "documentation":"

    A customizable layout that can be used to view data under a Customer Profiles domain.

    " + } + } + }, + "UpdateDomainLayoutResponse":{ + "type":"structure", + "members":{ + "LayoutDefinitionName":{ + "shape":"name", + "documentation":"

    The unique name of the layout.

    " + }, + "Description":{ + "shape":"sensitiveText", + "documentation":"

    The description of the layout

    " + }, + "DisplayName":{ + "shape":"displayName", + "documentation":"

    The display name of the layout

    " + }, + "IsDefault":{ + "shape":"boolean", + "documentation":"

    If set to true for a layout, this layout will be used by default to view data. If set to false, then the layout will not be used by default, but it can be used to view data by explicitly selecting it in the console.

    " + }, + "LayoutType":{ + "shape":"LayoutType", + "documentation":"

    The type of layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Layout":{ + "shape":"sensitiveString1To2000000", + "documentation":"

    A customizable layout that can be used to view data under a Customer Profiles domain.

    " + }, + "Version":{ + "shape":"string1To255", + "documentation":"

    The version used to create layout.

    " + }, + "CreatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was created.

    " + }, + "LastUpdatedAt":{ + "shape":"timestamp", + "documentation":"

    The timestamp of when the layout was most recently updated.

    " + }, "Tags":{ "shape":"TagMap", "documentation":"

    The tags used to organize, track, or control access for this resource.

    " @@ -8045,7 +8603,7 @@ }, "AccountNumber":{ "shape":"sensitiveString0To255", - "documentation":"

    An account number that you have given to the customer.

    " + "documentation":"

    An account number that you have assigned to the customer.

    " }, "PartyType":{ "shape":"PartyType", @@ -8145,8 +8703,8 @@ }, "Value":{ "type":"integer", - "max":366, - "min":1 + "max":2147483647, + "min":0 }, "ValueList":{ "type":"list", @@ -8154,6 +8712,26 @@ "max":10, "min":1 }, + "ValueRange":{ + "type":"structure", + "required":[ + "Start", + "End" + ], + "members":{ + "Start":{ + "shape":"ValueRangeStart", + "documentation":"

    The start time of when to include objects. Use positive numbers to indicate that the starting point is in the past, and negative numbers to indicate it is in the future.

    " + }, + "End":{ + "shape":"ValueRangeEnd", + "documentation":"

    The end time of when to include objects. Use positive numbers to indicate that the starting point is in the past, and negative numbers to indicate it is in the future.

    " + } + }, + "documentation":"

    A structure letting customers specify a relative time window over which over which data is included in the Calculated Attribute. Use positive numbers to indicate that the endpoint is in the past, and negative numbers to indicate it is in the future. ValueRange overrides Value.

    " + }, + "ValueRangeEnd":{"type":"integer"}, + "ValueRangeStart":{"type":"integer"}, "Values":{ "type":"list", "member":{"shape":"string1To255"}, @@ -8311,6 +8889,11 @@ }, "optionalBoolean":{"type":"boolean"}, "optionalLong":{"type":"long"}, + "percentageInteger":{ + "type":"integer", + "max":100, + "min":0 + }, "requestValueList":{ "type":"list", "member":{"shape":"string1To255"} @@ -8351,6 +8934,12 @@ "min":1, "sensitive":true }, + "sensitiveString1To2000000":{ + "type":"string", + "max":2000000, + "min":1, + "sensitive":true + }, "sensitiveString1To255":{ "type":"string", "max":255, diff --git a/services/databasemigration/pom.xml b/services/databasemigration/pom.xml index 084755d46836..87ef67f9460d 100644 --- a/services/databasemigration/pom.xml +++ b/services/databasemigration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT databasemigration AWS Java SDK :: Services :: AWS Database Migration Service diff --git a/services/databasemigration/src/main/resources/codegen-resources/customization.config b/services/databasemigration/src/main/resources/codegen-resources/customization.config index 624c262e908f..a2efe702958d 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/customization.config +++ b/services/databasemigration/src/main/resources/codegen-resources/customization.config @@ -16,6 +16,5 @@ "excludedSimpleMethods": [ "describeReplicationTaskAssessmentResults" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/databasemigration/src/main/resources/codegen-resources/service-2.json b/services/databasemigration/src/main/resources/codegen-resources/service-2.json index a534411d4fa1..84dbbfad0f4c 100644 --- a/services/databasemigration/src/main/resources/codegen-resources/service-2.json +++ b/services/databasemigration/src/main/resources/codegen-resources/service-2.json @@ -54,7 +54,7 @@ {"shape":"AccessDeniedFault"}, {"shape":"ResourceNotFoundFault"} ], - "documentation":"

    Starts the analysis of up to 20 source databases to recommend target engines for each source database. This is a batch version of StartRecommendations.

    The result of analysis of each source database is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Starts the analysis of up to 20 source databases to recommend target engines for each source database. This is a batch version of StartRecommendations.

    The result of analysis of each source database is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

    " }, "CancelReplicationTaskAssessmentRun":{ "name":"CancelReplicationTaskAssessmentRun", @@ -160,7 +160,7 @@ {"shape":"S3ResourceNotFoundFault"}, {"shape":"ResourceQuotaExceededFault"} ], - "documentation":"

    Creates a Fleet Advisor collector using the specified parameters.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Creates a Fleet Advisor collector using the specified parameters.

    " }, "CreateInstanceProfile":{ "name":"CreateInstanceProfile", @@ -381,7 +381,7 @@ {"shape":"CollectorNotFoundFault"}, {"shape":"AccessDeniedFault"} ], - "documentation":"

    Deletes the specified Fleet Advisor collector.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Deletes the specified Fleet Advisor collector.

    " }, "DeleteFleetAdvisorDatabases":{ "name":"DeleteFleetAdvisorDatabases", @@ -396,7 +396,7 @@ {"shape":"InvalidOperationFault"}, {"shape":"AccessDeniedFault"} ], - "documentation":"

    Deletes the specified Fleet Advisor collector databases.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Deletes the specified Fleet Advisor collector databases.

    " }, "DeleteInstanceProfile":{ "name":"DeleteInstanceProfile", @@ -695,7 +695,7 @@ "errors":[ {"shape":"InvalidResourceStateFault"} ], - "documentation":"

    Returns a list of the Fleet Advisor collectors in your account.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Returns a list of the Fleet Advisor collectors in your account.

    " }, "DescribeFleetAdvisorDatabases":{ "name":"DescribeFleetAdvisorDatabases", @@ -708,7 +708,7 @@ "errors":[ {"shape":"InvalidResourceStateFault"} ], - "documentation":"

    Returns a list of Fleet Advisor databases in your account.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Returns a list of Fleet Advisor databases in your account.

    " }, "DescribeFleetAdvisorLsaAnalysis":{ "name":"DescribeFleetAdvisorLsaAnalysis", @@ -721,7 +721,7 @@ "errors":[ {"shape":"InvalidResourceStateFault"} ], - "documentation":"

    Provides descriptions of large-scale assessment (LSA) analyses produced by your Fleet Advisor collectors.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Provides descriptions of large-scale assessment (LSA) analyses produced by your Fleet Advisor collectors.

    " }, "DescribeFleetAdvisorSchemaObjectSummary":{ "name":"DescribeFleetAdvisorSchemaObjectSummary", @@ -734,7 +734,7 @@ "errors":[ {"shape":"InvalidResourceStateFault"} ], - "documentation":"

    Provides descriptions of the schemas discovered by your Fleet Advisor collectors.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Provides descriptions of the schemas discovered by your Fleet Advisor collectors.

    " }, "DescribeFleetAdvisorSchemas":{ "name":"DescribeFleetAdvisorSchemas", @@ -747,7 +747,7 @@ "errors":[ {"shape":"InvalidResourceStateFault"} ], - "documentation":"

    Returns a list of schemas detected by Fleet Advisor Collectors in your account.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Returns a list of schemas detected by Fleet Advisor Collectors in your account.

    " }, "DescribeInstanceProfiles":{ "name":"DescribeInstanceProfiles", @@ -879,7 +879,7 @@ {"shape":"InvalidResourceStateFault"}, {"shape":"AccessDeniedFault"} ], - "documentation":"

    Returns a paginated list of limitations for recommendations of target Amazon Web Services engines.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Returns a paginated list of limitations for recommendations of target Amazon Web Services engines.

    " }, "DescribeRecommendations":{ "name":"DescribeRecommendations", @@ -893,7 +893,7 @@ {"shape":"InvalidResourceStateFault"}, {"shape":"AccessDeniedFault"} ], - "documentation":"

    Returns a paginated list of target engine recommendations for your source databases.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Returns a paginated list of target engine recommendations for your source databases.

    " }, "DescribeRefreshSchemasStatus":{ "name":"DescribeRefreshSchemasStatus", @@ -1404,7 +1404,7 @@ {"shape":"InvalidResourceStateFault"}, {"shape":"ResourceNotFoundFault"} ], - "documentation":"

    Runs large-scale assessment (LSA) analysis on every Fleet Advisor collector in your account.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Runs large-scale assessment (LSA) analysis on every Fleet Advisor collector in your account.

    " }, "StartDataMigration":{ "name":"StartDataMigration", @@ -1555,7 +1555,7 @@ {"shape":"AccessDeniedFault"}, {"shape":"ResourceNotFoundFault"} ], - "documentation":"

    Starts the analysis of your source database to provide recommendations of target engines.

    You can create recommendations for multiple source databases using BatchStartRecommendations.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Starts the analysis of your source database to provide recommendations of target engines.

    You can create recommendations for multiple source databases using BatchStartRecommendations.

    " }, "StartReplication":{ "name":"StartReplication", @@ -1755,8 +1755,7 @@ }, "AddTagsToResourceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    " }, "ApplyPendingMaintenanceActionMessage":{ @@ -2248,6 +2247,10 @@ "shape":"String", "documentation":"

    The type of database engine for the data provider. Valid values include \"aurora\", \"aurora-postgresql\", \"mysql\", \"oracle\", \"postgres\", \"sqlserver\", redshift, mariadb, mongodb, db2, db2-zos and docdb. A value of \"aurora\" represents Amazon Aurora MySQL-Compatible Edition.

    " }, + "Virtual":{ + "shape":"BooleanOptional", + "documentation":"

    Indicates whether the data provider is virtual.

    " + }, "Settings":{ "shape":"DataProviderSettings", "documentation":"

    The settings in JSON format for a data provider.

    " @@ -2788,7 +2791,7 @@ }, "ReplicationSubnetGroupDescription":{ "shape":"String", - "documentation":"

    The description for the subnet group.

    " + "documentation":"

    The description for the subnet group.

    Constraints: This parameter Must not contain non-printable control characters.

    " }, "SubnetIds":{ "shape":"SubnetIdentifierList", @@ -3059,6 +3062,10 @@ "shape":"String", "documentation":"

    The type of database engine for the data provider. Valid values include \"aurora\", \"aurora-postgresql\", \"mysql\", \"oracle\", \"postgres\", \"sqlserver\", redshift, mariadb, mongodb, db2, db2-zos and docdb. A value of \"aurora\" represents Amazon Aurora MySQL-Compatible Edition.

    " }, + "Virtual":{ + "shape":"BooleanOptional", + "documentation":"

    Indicates whether the data provider is virtual.

    " + }, "Settings":{ "shape":"DataProviderSettings", "documentation":"

    The settings in JSON format for a data provider.

    " @@ -3527,8 +3534,7 @@ }, "DeleteReplicationSubnetGroupResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    " }, "DeleteReplicationTaskAssessmentRunMessage":{ @@ -3575,8 +3581,7 @@ }, "DescribeAccountAttributesMessage":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    " }, "DescribeAccountAttributesResponse":{ @@ -3712,7 +3717,7 @@ "required":["MigrationProjectIdentifier"], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The name or Amazon Resource Name (ARN) for the schema conversion project to describe.

    " } } @@ -3952,7 +3957,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

    Filters applied to event subscriptions.

    Valid filter names: event-subscription-arn | event-subscription-id

    " + "documentation":"

    Filters applied to event subscriptions.

    Valid filter names: event-subscription-arn | event-subscription-id

    " }, "MaxRecords":{ "shape":"IntegerOptional", @@ -4040,7 +4045,7 @@ "required":["MigrationProjectIdentifier"], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The name or Amazon Resource Name (ARN) for the migration project.

    " }, "Filters":{ @@ -4165,7 +4170,7 @@ }, "MaxRecords":{ "shape":"IntegerOptional", - "documentation":"

    Sets the maximum number of records returned in the response.

    " + "documentation":"

    End of support notice: On May 20, 2026, Amazon Web Services will end support for Amazon Web Services DMS Fleet Advisor;. After May 20, 2026, you will no longer be able to access the Amazon Web Services DMS Fleet Advisor; console or Amazon Web Services DMS Fleet Advisor; resources. For more information, see Amazon Web Services DMS Fleet Advisor end of support.

    Sets the maximum number of records returned in the response.

    " }, "NextToken":{ "shape":"String", @@ -4251,7 +4256,7 @@ "required":["MigrationProjectIdentifier"], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The name or Amazon Resource Name (ARN) of the migration project.

    " }, "Filters":{ @@ -4286,7 +4291,7 @@ "required":["MigrationProjectIdentifier"], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The migration project name or Amazon Resource Name (ARN).

    " }, "Filters":{ @@ -4321,7 +4326,7 @@ "required":["MigrationProjectIdentifier"], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The migration project name or Amazon Resource Name (ARN).

    " }, "Filters":{ @@ -4356,7 +4361,7 @@ "required":["MigrationProjectIdentifier"], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The migration project name or Amazon Resource Name (ARN).

    " }, "Filters":{ @@ -4391,7 +4396,7 @@ "required":["MigrationProjectIdentifier"], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The migration project name or Amazon Resource Name (ARN).

    " }, "Filters":{ @@ -4520,7 +4525,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

    Filters applied to the limitations described in the form of key-value pairs.

    " + "documentation":"

    Filters applied to the limitations described in the form of key-value pairs.

    Valid filter names: database-id | engine-name

    " }, "MaxRecords":{ "shape":"IntegerOptional", @@ -4550,7 +4555,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

    Filters applied to the target engine recommendations described in the form of key-value pairs.

    " + "documentation":"

    Filters applied to the target engine recommendations described in the form of key-value pairs.

    Valid filter names: database-id | engine-name

    " }, "MaxRecords":{ "shape":"IntegerOptional", @@ -4909,7 +4914,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

    Filters applied to the replications.

    " + "documentation":"

    Filters applied to the replications.

    Valid filter names: replication-config-arn | replication-config-id

    " }, "MaxRecords":{ "shape":"IntegerOptional", @@ -5548,7 +5553,7 @@ ], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The migration project name or Amazon Resource Name (ARN).

    " }, "SelectionRules":{ @@ -5838,6 +5843,14 @@ "CertificateArn":{ "shape":"String", "documentation":"

    The Amazon Resource Name (ARN) of the certificate used for SSL connection.

    " + }, + "S3Path":{ + "shape":"String", + "documentation":"

    The path for the Amazon S3 bucket that the application uses for accessing the user-defined schema.

    " + }, + "S3AccessRoleArn":{ + "shape":"String", + "documentation":"

    The ARN for the role the application uses to access its Amazon S3 bucket.

    " } }, "documentation":"

    Provides information about an IBM DB2 LUW data provider.

    " @@ -5864,6 +5877,14 @@ "CertificateArn":{ "shape":"String", "documentation":"

    The Amazon Resource Name (ARN) of the certificate used for SSL connection.

    " + }, + "S3Path":{ + "shape":"String", + "documentation":"

    The path for the Amazon S3 bucket that the application uses for accessing the user-defined schema.

    " + }, + "S3AccessRoleArn":{ + "shape":"String", + "documentation":"

    The ARN for the role the application uses to access its Amazon S3 bucket.

    " } }, "documentation":"

    Provides information about an IBM DB2 for z/OS data provider.

    " @@ -6360,6 +6381,14 @@ "CertificateArn":{ "shape":"String", "documentation":"

    The Amazon Resource Name (ARN) of the certificate used for SSL connection.

    " + }, + "S3Path":{ + "shape":"String", + "documentation":"

    The path for the Amazon S3 bucket that the application uses for accessing the user-defined schema.

    " + }, + "S3AccessRoleArn":{ + "shape":"String", + "documentation":"

    The ARN for the role the application uses to access its Amazon S3 bucket.

    " } }, "documentation":"

    Provides information that defines a MariaDB data provider.

    " @@ -6475,6 +6504,14 @@ "CertificateArn":{ "shape":"String", "documentation":"

    The Amazon Resource Name (ARN) of the certificate used for SSL connection.

    " + }, + "S3Path":{ + "shape":"String", + "documentation":"

    The path for the Amazon S3 bucket that the application uses for accessing the user-defined schema.

    " + }, + "S3AccessRoleArn":{ + "shape":"String", + "documentation":"

    The ARN for the role the application uses to access its Amazon S3 bucket.

    " } }, "documentation":"

    Provides information that defines a Microsoft SQL Server data provider.

    " @@ -6525,6 +6562,10 @@ }, "documentation":"

    Provides information that defines a migration project.

    " }, + "MigrationProjectIdentifier":{ + "type":"string", + "max":255 + }, "MigrationProjectList":{ "type":"list", "member":{"shape":"MigrationProject"} @@ -6545,7 +6586,7 @@ ], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The migration project name or Amazon Resource Name (ARN).

    " }, "ConversionConfiguration":{ @@ -6634,6 +6675,10 @@ "shape":"String", "documentation":"

    The type of database engine for the data provider. Valid values include \"aurora\", \"aurora-postgresql\", \"mysql\", \"oracle\", \"postgres\", \"sqlserver\", redshift, mariadb, mongodb, db2, db2-zos and docdb. A value of \"aurora\" represents Amazon Aurora MySQL-Compatible Edition.

    " }, + "Virtual":{ + "shape":"BooleanOptional", + "documentation":"

    Indicates whether the data provider is virtual.

    " + }, "ExactSettings":{ "shape":"BooleanOptional", "documentation":"

    If this attribute is Y, the current call to ModifyDataProvider replaces all existing data provider settings with the exact settings that you specify in this call. If this attribute is N, the current call to ModifyDataProvider does two things:

    • It replaces any data provider settings that already exist with new values, for settings with the same names.

    • It creates new data provider settings that you specify in the call, for settings with different names.

    " @@ -7280,6 +7325,13 @@ }, "documentation":"

    " }, + "MySQLAuthenticationMethod":{ + "type":"string", + "enum":[ + "password", + "iam" + ] + }, "MySQLSettings":{ "type":"structure", "members":{ @@ -7342,6 +7394,14 @@ "ExecuteTimeout":{ "shape":"IntegerOptional", "documentation":"

    Sets the client statement timeout (in seconds) for a MySQL source endpoint.

    " + }, + "ServiceAccessRoleArn":{ + "shape":"String", + "documentation":"

    The IAM role you can use to authenticate when connecting to your endpoint. Ensure to include iam:PassRole and rds-db:connect actions in permission policy.

    " + }, + "AuthenticationMethod":{ + "shape":"MySQLAuthenticationMethod", + "documentation":"

    This attribute allows you to specify the authentication method as \"iam auth\".

    " } }, "documentation":"

    Provides information that defines a MySQL endpoint.

    " @@ -7364,6 +7424,14 @@ "CertificateArn":{ "shape":"String", "documentation":"

    The Amazon Resource Name (ARN) of the certificate used for SSL connection.

    " + }, + "S3Path":{ + "shape":"String", + "documentation":"

    The path for the Amazon S3 bucket that the application uses for accessing the user-defined schema.

    " + }, + "S3AccessRoleArn":{ + "shape":"String", + "documentation":"

    The ARN for the role the application uses to access its Amazon S3 bucket.

    " } }, "documentation":"

    Provides information that defines a MySQL data provider.

    " @@ -7462,6 +7530,14 @@ "SecretsManagerSecurityDbEncryptionAccessRoleArn":{ "shape":"String", "documentation":"

    The ARN of the IAM role that provides access to the secret in Secrets Manager that contains the TDE password.

    " + }, + "S3Path":{ + "shape":"String", + "documentation":"

    The path for the Amazon S3 bucket that the application uses for accessing the user-defined schema.

    " + }, + "S3AccessRoleArn":{ + "shape":"String", + "documentation":"

    The ARN for the role the application uses to access its Amazon S3 bucket.

    " } }, "documentation":"

    Provides information that defines an Oracle data provider.

    " @@ -7754,6 +7830,13 @@ "pglogical" ] }, + "PostgreSQLAuthenticationMethod":{ + "type":"string", + "enum":[ + "password", + "iam" + ] + }, "PostgreSQLSettings":{ "type":"structure", "members":{ @@ -7856,6 +7939,14 @@ "DisableUnicodeSourceFilter":{ "shape":"BooleanOptional", "documentation":"

    Disables the Unicode source filter with PostgreSQL, for values passed into the Selection rule filter on Source Endpoint column values. By default DMS performs source filter comparisons using a Unicode string which can cause look ups to ignore the indexes in the text columns and slow down migrations.

    Unicode support should only be disabled when using a selection rule filter is on a text column in the Source database that is indexed.

    " + }, + "ServiceAccessRoleArn":{ + "shape":"String", + "documentation":"

    The IAM role arn you can use to authenticate the connection to your endpoint. Ensure to include iam:PassRole and rds-db:connect actions in permission policy.

    " + }, + "AuthenticationMethod":{ + "shape":"PostgreSQLAuthenticationMethod", + "documentation":"

    This attribute allows you to specify the authentication method as \"iam auth\".

    " } }, "documentation":"

    Provides information that defines a PostgreSQL endpoint.

    " @@ -7882,6 +7973,14 @@ "CertificateArn":{ "shape":"String", "documentation":"

    The Amazon Resource Name (ARN) of the certificate used for SSL connection.

    " + }, + "S3Path":{ + "shape":"String", + "documentation":"

    The path for the Amazon S3 bucket that the application uses for accessing the user-defined schema.

    " + }, + "S3AccessRoleArn":{ + "shape":"String", + "documentation":"

    The ARN for the role the application uses to access its Amazon S3 bucket.

    " } }, "documentation":"

    Provides information that defines a PostgreSQL data provider.

    " @@ -8212,6 +8311,14 @@ "DatabaseName":{ "shape":"String", "documentation":"

    The database name on the Amazon Redshift data provider.

    " + }, + "S3Path":{ + "shape":"String", + "documentation":"

    The path for the Amazon S3 bucket that the application uses for accessing the user-defined schema.

    " + }, + "S3AccessRoleArn":{ + "shape":"String", + "documentation":"

    The ARN for the role the application uses to access its Amazon S3 bucket.

    " } }, "documentation":"

    Provides information that defines an Amazon Redshift data provider.

    " @@ -8504,8 +8611,7 @@ }, "RemoveTagsFromResourceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    " }, "Replication":{ @@ -9486,7 +9592,7 @@ }, "DatePartitionTimezone":{ "shape":"String", - "documentation":"

    When creating an S3 target endpoint, set DatePartitionTimezone to convert the current UTC time into a specified time zone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The time zone format is Area/Location. Use this parameter when DatePartitionedEnabled is set to true, as shown in the following example.

    s3-settings='{\"DatePartitionEnabled\": true, \"DatePartitionSequence\": \"YYYYMMDDHH\", \"DatePartitionDelimiter\": \"SLASH\", \"DatePartitionTimezone\":\"Asia/Seoul\", \"BucketName\": \"dms-nattarat-test\"}'

    " + "documentation":"

    When creating an S3 target endpoint, set DatePartitionTimezone to convert the current UTC time into a specified time zone. The conversion occurs when a date partition folder is created and a CDC filename is generated. The time zone format is Area/Location. Use this parameter when DatePartitionedEnabled is set to true, as shown in the following example:

    s3-settings='{\"DatePartitionEnabled\": true, \"DatePartitionSequence\": \"YYYYMMDDHH\", \"DatePartitionDelimiter\": \"SLASH\", \"DatePartitionTimezone\":\"Asia/Seoul\", \"BucketName\": \"dms-nattarat-test\"}'

    " }, "AddTrailingPaddingCharacter":{ "shape":"BooleanOptional", @@ -9741,7 +9847,7 @@ "required":["MigrationProjectIdentifier"], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The migration project name or Amazon Resource Name (ARN).

    " } } @@ -9763,7 +9869,7 @@ ], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The migration project name or Amazon Resource Name (ARN).

    " }, "SelectionRules":{ @@ -9789,7 +9895,7 @@ ], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The migration project name or Amazon Resource Name (ARN).

    " }, "SelectionRules":{ @@ -9816,7 +9922,7 @@ ], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The migration project name or Amazon Resource Name (ARN).

    " }, "SelectionRules":{ @@ -9850,7 +9956,7 @@ ], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The migration project name or Amazon Resource Name (ARN).

    " }, "SelectionRules":{ @@ -9881,7 +9987,7 @@ ], "members":{ "MigrationProjectIdentifier":{ - "shape":"String", + "shape":"MigrationProjectIdentifier", "documentation":"

    The migration project name or Amazon Resource Name (ARN).

    " }, "SelectionRules":{ @@ -10094,7 +10200,7 @@ }, "StartReplicationTaskType":{ "shape":"StartReplicationTaskTypeValue", - "documentation":"

    The type of replication task to start.

    When the migration type is full-load or full-load-and-cdc, the only valid value for the first run of the task is start-replication. This option will start the migration.

    You can also use ReloadTables to reload specific tables that failed during migration instead of restarting the task.

    The resume-processing option isn't applicable for a full-load task, because you can't resume partially loaded tables during the full load phase.

    For a full-load-and-cdc task, DMS migrates table data, and then applies data changes that occur on the source. To load all the tables again, and start capturing source changes, use reload-target. Otherwise use resume-processing, to replicate the changes from the last stop position.

    " + "documentation":"

    The type of replication task to start.

    start-replication is the only valid action that can be used for the first time a task with the migration type of full-loadfull-load, full-load-and-cdc or cdc is run. Any other action used for the first time on a given task, such as resume-processing and reload-target will result in data errors.

    You can also use ReloadTables to reload specific tables that failed during migration instead of restarting the task.

    For a full-load task, the resume-processing option will reload any tables that were partially loaded or not yet loaded during the full load phase.

    For a full-load-and-cdc task, DMS migrates table data, and then applies data changes that occur on the source. To load all the tables again, and start capturing source changes, use reload-target. Otherwise use resume-processing, to replicate the changes from the last stop position.

    For a cdc only task, to start from a specific position, you must use start-replication and also specify the start position. Check the source endpoint DMS documentation for any limitations. For example, not all sources support starting from a time.

    resume-processing is only available for previously executed tasks.

    " }, "CdcStartTime":{ "shape":"TStamp", @@ -10413,6 +10519,26 @@ "ValidationStateDetails":{ "shape":"String", "documentation":"

    Additional details about the state of validation.

    " + }, + "ResyncState":{ + "shape":"String", + "documentation":"

    Records the current state of table resynchronization in the migration task.

    This parameter can have the following values:

    • Not enabled – Resync is not enabled for the table in the migration task.

    • Pending – The tables are waiting for resync.

    • In progress – Resync in progress for some records in the table.

    • No primary key – The table could not be resynced because it has no primary key.

    • Last resync at: date/time – Resync session is finished at time. Time provided in UTC format.

    " + }, + "ResyncRowsAttempted":{ + "shape":"LongOptional", + "documentation":"

    Records the total number of mismatched data rows where the system attempted to apply fixes in the target database.

    " + }, + "ResyncRowsSucceeded":{ + "shape":"LongOptional", + "documentation":"

    Records the total number of mismatched data rows where fixes were successfully applied in the target database.

    " + }, + "ResyncRowsFailed":{ + "shape":"LongOptional", + "documentation":"

    Records the total number of mismatched data rows where fix attempts failed in the target database.

    " + }, + "ResyncProgress":{ + "shape":"DoubleOptional", + "documentation":"

    Calculates the percentage of failed validations that were successfully resynced to the system.

    " } }, "documentation":"

    Provides a collection of table statistics in response to a request by the DescribeTableStatistics operation.

    " diff --git a/services/databrew/pom.xml b/services/databrew/pom.xml index 6ff930064d79..5cdd6139e1a4 100644 --- a/services/databrew/pom.xml +++ b/services/databrew/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT databrew AWS Java SDK :: Services :: Data Brew diff --git a/services/databrew/src/main/resources/codegen-resources/customization.config b/services/databrew/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/databrew/src/main/resources/codegen-resources/customization.config +++ b/services/databrew/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/dataexchange/pom.xml b/services/dataexchange/pom.xml index 12ccf0a2d992..f46b8b66e508 100644 --- a/services/dataexchange/pom.xml +++ b/services/dataexchange/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT dataexchange AWS Java SDK :: Services :: DataExchange diff --git a/services/dataexchange/src/main/resources/codegen-resources/customization.config b/services/dataexchange/src/main/resources/codegen-resources/customization.config index 9a2be0384a26..69190d0102fb 100644 --- a/services/dataexchange/src/main/resources/codegen-resources/customization.config +++ b/services/dataexchange/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ } }, "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/dataexchange/src/main/resources/codegen-resources/service-2.json b/services/dataexchange/src/main/resources/codegen-resources/service-2.json index ead739dc3812..4abd274e8a99 100644 --- a/services/dataexchange/src/main/resources/codegen-resources/service-2.json +++ b/services/dataexchange/src/main/resources/codegen-resources/service-2.json @@ -990,7 +990,7 @@ "required":["JobId"], "members":{ "JobId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a job.

    ", "location":"uri", "locationName":"JobId" @@ -1245,6 +1245,10 @@ "Event":{ "shape":"Event", "documentation":"

    What occurs to start an action.

    " + }, + "Tags":{ + "shape":"MapOf__string", + "documentation":"

    Key-value pairs that you can associate with the event action.

    " } } }, @@ -1271,6 +1275,10 @@ "shape":"Id", "documentation":"

    The unique identifier for the event action.

    " }, + "Tags":{ + "shape":"MapOf__string", + "documentation":"

    The tags for the event action.

    " + }, "UpdatedAt":{ "shape":"Timestamp", "documentation":"

    The date and time that the event action was last updated, in ISO 8601 format.

    " @@ -1340,7 +1348,7 @@ "documentation":"

    An optional comment about the revision.

    " }, "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" @@ -1649,19 +1657,19 @@ ], "members":{ "AssetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for an asset.

    ", "location":"uri", "locationName":"AssetId" }, "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" }, "RevisionId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a revision.

    ", "location":"uri", "locationName":"RevisionId" @@ -1685,7 +1693,7 @@ "required":["DataSetId"], "members":{ "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" @@ -1712,13 +1720,13 @@ ], "members":{ "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" }, "RevisionId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a revision.

    ", "location":"uri", "locationName":"RevisionId" @@ -1986,19 +1994,19 @@ ], "members":{ "AssetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for an asset.

    ", "location":"uri", "locationName":"AssetId" }, "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" }, "RevisionId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a revision.

    ", "location":"uri", "locationName":"RevisionId" @@ -2145,7 +2153,7 @@ "required":["DataSetId"], "members":{ "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" @@ -2236,6 +2244,10 @@ "shape":"Id", "documentation":"

    The unique identifier for the event action.

    " }, + "Tags":{ + "shape":"MapOf__string", + "documentation":"

    The tags for the event action.

    " + }, "UpdatedAt":{ "shape":"Timestamp", "documentation":"

    The date and time that the event action was last updated, in ISO 8601 format.

    " @@ -2247,7 +2259,7 @@ "required":["JobId"], "members":{ "JobId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a job.

    ", "location":"uri", "locationName":"JobId" @@ -2379,13 +2391,13 @@ ], "members":{ "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" }, "RevisionId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a revision.

    ", "location":"uri", "locationName":"RevisionId" @@ -2452,7 +2464,10 @@ "NONE" ] }, - "Id":{"type":"string"}, + "Id":{ + "type":"string", + "pattern":"[a-zA-Z0-9]{30,40}" + }, "ImportAssetFromApiGatewayApiRequestDetails":{ "type":"structure", "required":[ @@ -3131,7 +3146,7 @@ "required":["DataSetId"], "members":{ "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" @@ -3416,7 +3431,7 @@ ], "members":{ "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" @@ -3434,7 +3449,7 @@ "locationName":"nextToken" }, "RevisionId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a revision.

    ", "location":"uri", "locationName":"RevisionId" @@ -3879,13 +3894,13 @@ ], "members":{ "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" }, "RevisionId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a revision.

    ", "location":"uri", "locationName":"RevisionId" @@ -4184,7 +4199,7 @@ "documentation":"

    Free-form text field for providers to add information about their notifications.

    " }, "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    Affected data set of the notification.

    ", "location":"uri", "locationName":"DataSetId" @@ -4244,7 +4259,7 @@ "required":["JobId"], "members":{ "JobId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a job.

    ", "location":"uri", "locationName":"JobId" @@ -4389,13 +4404,13 @@ ], "members":{ "AssetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for an asset.

    ", "location":"uri", "locationName":"AssetId" }, "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" @@ -4405,7 +4420,7 @@ "documentation":"

    The name of the asset. When importing from Amazon S3, the Amazon S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target Amazon S3 object key. When importing from Amazon API Gateway API, the API name is used as the asset name. When importing from Amazon Redshift, the datashare name is used as the asset name. When importing from AWS Lake Formation, the static values of \"Database(s) included in the LF-tag policy\" or \"Table(s) included in LF-tag policy\" are used as the name.

    " }, "RevisionId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a revision.

    ", "location":"uri", "locationName":"RevisionId" @@ -4462,7 +4477,7 @@ "required":["DataSetId"], "members":{ "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" @@ -4579,7 +4594,7 @@ "documentation":"

    An optional comment about the revision.

    " }, "DataSetId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a data set.

    ", "location":"uri", "locationName":"DataSetId" @@ -4589,7 +4604,7 @@ "documentation":"

    Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.

    " }, "RevisionId":{ - "shape":"__string", + "shape":"Id", "documentation":"

    The unique identifier for a revision.

    ", "location":"uri", "locationName":"RevisionId" diff --git a/services/datapipeline/pom.xml b/services/datapipeline/pom.xml index 7492959ea321..04a493b97b53 100644 --- a/services/datapipeline/pom.xml +++ b/services/datapipeline/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT datapipeline AWS Java SDK :: Services :: AWS Data Pipeline diff --git a/services/datapipeline/src/main/resources/codegen-resources/customization.config b/services/datapipeline/src/main/resources/codegen-resources/customization.config index a1bed121ef72..291756f11dd3 100644 --- a/services/datapipeline/src/main/resources/codegen-resources/customization.config +++ b/services/datapipeline/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listPipelines" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/datasync/pom.xml b/services/datasync/pom.xml index 9bc2215a1c6f..b7248b0a6e0a 100644 --- a/services/datasync/pom.xml +++ b/services/datasync/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT datasync AWS Java SDK :: Services :: DataSync diff --git a/services/datasync/src/main/resources/codegen-resources/customization.config b/services/datasync/src/main/resources/codegen-resources/customization.config index 4397048d9bf2..fadc149e092b 100644 --- a/services/datasync/src/main/resources/codegen-resources/customization.config +++ b/services/datasync/src/main/resources/codegen-resources/customization.config @@ -6,6 +6,5 @@ "listTasks" ], "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/datasync/src/main/resources/codegen-resources/paginators-1.json b/services/datasync/src/main/resources/codegen-resources/paginators-1.json index a6e759743212..53de34f21c8e 100644 --- a/services/datasync/src/main/resources/codegen-resources/paginators-1.json +++ b/services/datasync/src/main/resources/codegen-resources/paginators-1.json @@ -1,40 +1,17 @@ { "pagination": { - "DescribeStorageSystemResourceMetrics": { - "input_token": "NextToken", - "output_token": "NextToken", - "limit_key": "MaxResults", - "result_key": "Metrics" - }, - "DescribeStorageSystemResources": { - "input_token": "NextToken", - "output_token": "NextToken", - "limit_key": "MaxResults" - }, "ListAgents": { "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Agents" }, - "ListDiscoveryJobs": { - "input_token": "NextToken", - "output_token": "NextToken", - "limit_key": "MaxResults", - "result_key": "DiscoveryJobs" - }, "ListLocations": { "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Locations" }, - "ListStorageSystems": { - "input_token": "NextToken", - "output_token": "NextToken", - "limit_key": "MaxResults", - "result_key": "StorageSystems" - }, "ListTagsForResource": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/datasync/src/main/resources/codegen-resources/service-2.json b/services/datasync/src/main/resources/codegen-resources/service-2.json index 33f995fe6b24..13f6ff5f8641 100644 --- a/services/datasync/src/main/resources/codegen-resources/service-2.json +++ b/services/datasync/src/main/resources/codegen-resources/service-2.json @@ -16,21 +16,6 @@ "auth":["aws.auth#sigv4"] }, "operations":{ - "AddStorageSystem":{ - "name":"AddStorageSystem", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"AddStorageSystemRequest"}, - "output":{"shape":"AddStorageSystemResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Creates an Amazon Web Services resource for an on-premises storage system that you want DataSync Discovery to collect information about.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, "CancelTaskExecution":{ "name":"CancelTaskExecution", "http":{ @@ -71,7 +56,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

    Creates a transfer location for a Microsoft Azure Blob Storage container. DataSync can use this location as a transfer source or destination.

    Before you begin, make sure you know how DataSync accesses Azure Blob Storage and works with access tiers and blob types. You also need a DataSync agent that can connect to your container.

    " + "documentation":"

    Creates a transfer location for a Microsoft Azure Blob Storage container. DataSync can use this location as a transfer source or destination. You can make transfers with or without a DataSync agent that connects to your container.

    Before you begin, make sure you know how DataSync accesses Azure Blob Storage and works with access tiers and blob types.

    " }, "CreateLocationEfs":{ "name":"CreateLocationEfs", @@ -183,7 +168,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

    Creates a transfer location for an object storage system. DataSync can use this location as a source or destination for transferring data.

    Before you begin, make sure that you understand the prerequisites for DataSync to work with object storage systems.

    " + "documentation":"

    Creates a transfer location for an object storage system. DataSync can use this location as a source or destination for transferring data. You can make transfers with or without a DataSync agent.

    Before you begin, make sure that you understand the prerequisites for DataSync to work with object storage systems.

    " }, "CreateLocationS3":{ "name":"CreateLocationS3", @@ -283,21 +268,6 @@ ], "documentation":"

    Returns information about an DataSync agent, such as its name, service endpoint type, and status.

    " }, - "DescribeDiscoveryJob":{ - "name":"DescribeDiscoveryJob", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DescribeDiscoveryJobRequest"}, - "output":{"shape":"DescribeDiscoveryJobResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Returns information about a DataSync discovery job.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, "DescribeLocationAzureBlob":{ "name":"DescribeLocationAzureBlob", "http":{ @@ -452,51 +422,6 @@ ], "documentation":"

    Provides details about how an DataSync transfer location for a Server Message Block (SMB) file server is configured.

    " }, - "DescribeStorageSystem":{ - "name":"DescribeStorageSystem", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DescribeStorageSystemRequest"}, - "output":{"shape":"DescribeStorageSystemResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Returns information about an on-premises storage system that you're using with DataSync Discovery.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, - "DescribeStorageSystemResourceMetrics":{ - "name":"DescribeStorageSystemResourceMetrics", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DescribeStorageSystemResourceMetricsRequest"}, - "output":{"shape":"DescribeStorageSystemResourceMetricsResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Returns information, including performance data and capacity usage, which DataSync Discovery collects about a specific resource in your-premises storage system.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, - "DescribeStorageSystemResources":{ - "name":"DescribeStorageSystemResources", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"DescribeStorageSystemResourcesRequest"}, - "output":{"shape":"DescribeStorageSystemResourcesResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Returns information that DataSync Discovery collects about resources in your on-premises storage system.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, "DescribeTask":{ "name":"DescribeTask", "http":{ @@ -525,21 +450,6 @@ ], "documentation":"

    Provides information about an execution of your DataSync task. You can use this operation to help monitor the progress of an ongoing data transfer or check the results of the transfer.

    Some DescribeTaskExecution response elements are only relevant to a specific task mode. For information, see Understanding task mode differences and Understanding data transfer performance counters.

    " }, - "GenerateRecommendations":{ - "name":"GenerateRecommendations", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"GenerateRecommendationsRequest"}, - "output":{"shape":"GenerateRecommendationsResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Creates recommendations about where to migrate your data to in Amazon Web Services. Recommendations are generated based on information that DataSync Discovery collects about your on-premises storage system's resources. For more information, see Recommendations provided by DataSync Discovery.

    Once generated, you can view your recommendations by using the DescribeStorageSystemResources operation.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, "ListAgents":{ "name":"ListAgents", "http":{ @@ -554,21 +464,6 @@ ], "documentation":"

    Returns a list of DataSync agents that belong to an Amazon Web Services account in the Amazon Web Services Region specified in the request.

    With pagination, you can reduce the number of agents returned in a response. If you get a truncated list of agents in a response, the response contains a marker that you can specify in your next request to fetch the next page of agents.

    ListAgents is eventually consistent. This means the result of running the operation might not reflect that you just created or deleted an agent. For example, if you create an agent with CreateAgent and then immediately run ListAgents, that agent might not show up in the list right away. In situations like this, you can always confirm whether an agent has been created (or deleted) by using DescribeAgent.

    " }, - "ListDiscoveryJobs":{ - "name":"ListDiscoveryJobs", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ListDiscoveryJobsRequest"}, - "output":{"shape":"ListDiscoveryJobsResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Provides a list of the existing discovery jobs in the Amazon Web Services Region and Amazon Web Services account where you're using DataSync Discovery.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, "ListLocations":{ "name":"ListLocations", "http":{ @@ -583,21 +478,6 @@ ], "documentation":"

    Returns a list of source and destination locations.

    If you have more locations than are returned in a response (that is, the response returns only a truncated list of your agents), the response contains a token that you can specify in your next request to fetch the next page of locations.

    " }, - "ListStorageSystems":{ - "name":"ListStorageSystems", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"ListStorageSystemsRequest"}, - "output":{"shape":"ListStorageSystemsResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Lists the on-premises storage systems that you're using with DataSync Discovery.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -640,36 +520,6 @@ ], "documentation":"

    Returns a list of the DataSync tasks you created.

    " }, - "RemoveStorageSystem":{ - "name":"RemoveStorageSystem", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"RemoveStorageSystemRequest"}, - "output":{"shape":"RemoveStorageSystemResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Permanently removes a storage system resource from DataSync Discovery, including the associated discovery jobs, collected data, and recommendations.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, - "StartDiscoveryJob":{ - "name":"StartDiscoveryJob", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"StartDiscoveryJobRequest"}, - "output":{"shape":"StartDiscoveryJobResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Runs a DataSync discovery job on your on-premises storage system. If you haven't added the storage system to DataSync Discovery yet, do this first by using the AddStorageSystem operation.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, "StartTaskExecution":{ "name":"StartTaskExecution", "http":{ @@ -684,21 +534,6 @@ ], "documentation":"

    Starts an DataSync transfer task. For each task, you can only run one task execution at a time.

    There are several steps to a task execution. For more information, see Task execution statuses.

    If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin.

    " }, - "StopDiscoveryJob":{ - "name":"StopDiscoveryJob", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"StopDiscoveryJobRequest"}, - "output":{"shape":"StopDiscoveryJobResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Stops a running DataSync discovery job.

    You can stop a discovery job anytime. A job that's stopped before it's scheduled to end likely will provide you some information about your on-premises storage system resources. To get recommendations for a stopped job, you must use the GenerateRecommendations operation.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, "TagResource":{ "name":"TagResource", "http":{ @@ -741,21 +576,6 @@ ], "documentation":"

    Updates the name of an DataSync agent.

    " }, - "UpdateDiscoveryJob":{ - "name":"UpdateDiscoveryJob", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateDiscoveryJobRequest"}, - "output":{"shape":"UpdateDiscoveryJobResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Edits a DataSync discovery job configuration.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, "UpdateLocationAzureBlob":{ "name":"UpdateLocationAzureBlob", "http":{ @@ -910,21 +730,6 @@ ], "documentation":"

    Modifies the following configuration parameters of the Server Message Block (SMB) transfer location that you're using with DataSync.

    For more information, see Configuring DataSync transfers with an SMB file server.

    " }, - "UpdateStorageSystem":{ - "name":"UpdateStorageSystem", - "http":{ - "method":"POST", - "requestUri":"/" - }, - "input":{"shape":"UpdateStorageSystemRequest"}, - "output":{"shape":"UpdateStorageSystemResponse"}, - "errors":[ - {"shape":"InvalidRequestException"}, - {"shape":"InternalException"} - ], - "documentation":"

    Modifies some configurations of an on-premises storage system resource that you're using with DataSync Discovery.

    ", - "endpoint":{"hostPrefix":"discovery-"} - }, "UpdateTask":{ "name":"UpdateTask", "http":{ @@ -960,61 +765,6 @@ "max":29, "pattern":"[A-Z0-9]{5}(-[A-Z0-9]{5}){4}" }, - "AddStorageSystemRequest":{ - "type":"structure", - "required":[ - "ServerConfiguration", - "SystemType", - "AgentArns", - "ClientToken", - "Credentials" - ], - "members":{ - "ServerConfiguration":{ - "shape":"DiscoveryServerConfiguration", - "documentation":"

    Specifies the server name and network port required to connect with the management interface of your on-premises storage system.

    " - }, - "SystemType":{ - "shape":"DiscoverySystemType", - "documentation":"

    Specifies the type of on-premises storage system that you want DataSync Discovery to collect information about.

    DataSync Discovery currently supports NetApp Fabric-Attached Storage (FAS) and All Flash FAS (AFF) systems running ONTAP 9.7 or later.

    " - }, - "AgentArns":{ - "shape":"DiscoveryAgentArnList", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads from your on-premises storage system's management interface. You can only specify one ARN.

    " - }, - "CloudWatchLogGroupArn":{ - "shape":"LogGroupArn", - "documentation":"

    Specifies the ARN of the Amazon CloudWatch log group for monitoring and logging discovery job events.

    " - }, - "Tags":{ - "shape":"InputTagList", - "documentation":"

    Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your on-premises storage system.

    " - }, - "Name":{ - "shape":"Name", - "documentation":"

    Specifies a familiar name for your on-premises storage system.

    " - }, - "ClientToken":{ - "shape":"PtolemyUUID", - "documentation":"

    Specifies a client token to make sure requests with this API operation are idempotent. If you don't specify a client token, DataSync generates one for you automatically.

    ", - "idempotencyToken":true - }, - "Credentials":{ - "shape":"Credentials", - "documentation":"

    Specifies the user name and password for accessing your on-premises storage system's management interface.

    " - } - } - }, - "AddStorageSystemResponse":{ - "type":"structure", - "required":["StorageSystemArn"], - "members":{ - "StorageSystemArn":{ - "shape":"StorageSystemArn", - "documentation":"

    The ARN of the on-premises storage system that you can use with DataSync Discovery.

    " - } - } - }, "AgentArn":{ "type":"string", "max":128, @@ -1082,7 +832,10 @@ }, "AzureBlobAuthenticationType":{ "type":"string", - "enum":["SAS"] + "enum":[ + "SAS", + "NONE" + ] }, "AzureBlobContainerUrl":{ "type":"string", @@ -1133,35 +886,21 @@ }, "CancelTaskExecutionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, - "Capacity":{ + "CmkSecretConfig":{ "type":"structure", "members":{ - "Used":{ - "shape":"NonNegativeLong", - "documentation":"

    The amount of space that's being used in a storage system resource.

    " - }, - "Provisioned":{ - "shape":"NonNegativeLong", - "documentation":"

    The total amount of space available in a storage system resource.

    " + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    Specifies the ARN for the DataSync-managed Secrets Manager secret that that is used to access a specific storage location. This property is generated by DataSync and is read-only. DataSync encrypts this secret with the KMS key that you specify for KmsKeyArn.

    " }, - "LogicalUsed":{ - "shape":"NonNegativeLong", - "documentation":"

    The amount of space that's being used in a storage system resource without accounting for compression or deduplication.

    " - }, - "ClusterCloudStorageUsed":{ - "shape":"NonNegativeLong", - "documentation":"

    The amount of space in the cluster that's in cloud storage (for example, if you're using data tiering).

    " + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

    Specifies the ARN for the customer-managed KMS key that DataSync uses to encrypt the DataSync-managed secret stored for SecretArn. DataSync provides this key to Secrets Manager.

    " } }, - "documentation":"

    The storage capacity of an on-premises storage system resource (for example, a volume).

    " - }, - "CollectionDurationMinutes":{ - "type":"integer", - "max":44640, - "min":60 + "documentation":"

    Specifies configuration information for a DataSync-managed secret, such as an authentication token or secret key that DataSync uses to access a specific storage location, with a customer-managed KMS key.

    You can use either CmkSecretConfig or CustomSecretConfig to provide credentials for a CreateLocation request. Do not provide both parameters for the same request.

    " }, "CreateAgentRequest":{ "type":"structure", @@ -1208,8 +947,7 @@ "type":"structure", "required":[ "ContainerUrl", - "AuthenticationType", - "AgentArns" + "AuthenticationType" ], "members":{ "ContainerUrl":{ @@ -1222,7 +960,7 @@ }, "SasConfiguration":{ "shape":"AzureBlobSasConfiguration", - "documentation":"

    Specifies the SAS configuration that allows DataSync to access your Azure Blob Storage.

    " + "documentation":"

    Specifies the SAS configuration that allows DataSync to access your Azure Blob Storage.

    If you provide an authentication token using SasConfiguration, but do not provide secret configuration details using CmkSecretConfig or CustomSecretConfig, then DataSync stores the token using your Amazon Web Services account's secrets manager secret.

    " }, "BlobType":{ "shape":"AzureBlobType", @@ -1238,11 +976,19 @@ }, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container.

    You can specify more than one agent. For more information, see Using multiple agents for your transfer.

    " + "documentation":"

    (Optional) Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container. If you are setting up an agentless cross-cloud transfer, you do not need to specify a value for this parameter.

    You can specify more than one agent. For more information, see Using multiple agents for your transfer.

    Make sure you configure this parameter correctly when you first create your storage location. You cannot add or remove agents from a storage location after you create it.

    " }, "Tags":{ "shape":"InputTagList", "documentation":"

    Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your transfer location.

    " + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

    Specifies configuration information for a DataSync-managed secret, which includes the authentication token that DataSync uses to access a specific AzureBlob storage location, with a customer-managed KMS key.

    When you include this paramater as part of a CreateLocationAzureBlob request, you provide only the KMS key ARN. DataSync uses this KMS key together with the authentication token you specify for SasConfiguration to create a DataSync-managed secret to store the location access credentials.

    Make sure the DataSync has permission to access the KMS key that you specify.

    You can use either CmkSecretConfig (with SasConfiguration) or CustomSecretConfig (without SasConfiguration) to provide credentials for a CreateLocationAzureBlob request. Do not provide both parameters for the same request.

    " + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

    Specifies configuration information for a customer-managed Secrets Manager secret where the authentication token for an AzureBlob storage location is stored in plain text. This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

    You can use either CmkSecretConfig (with SasConfiguration) or CustomSecretConfig (without SasConfiguration) to provide credentials for a CreateLocationAzureBlob request. Do not provide both parameters for the same request.

    " } } }, @@ -1506,7 +1252,7 @@ }, "KerberosKeytab":{ "shape":"KerberosKeytabFile", - "documentation":"

    The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted keys. You can load the keytab from a file by providing the file's address. If you're using the CLI, it performs base64 encoding for you. Otherwise, provide the base64-encoded text.

    If KERBEROS is specified for AuthenticationType, this parameter is required.

    " + "documentation":"

    The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted keys. You can load the keytab from a file by providing the file's address.

    If KERBEROS is specified for AuthenticationType, this parameter is required.

    " }, "KerberosKrb5Conf":{ "shape":"KerberosKrb5ConfFile", @@ -1576,8 +1322,7 @@ "type":"structure", "required":[ "ServerHostname", - "BucketName", - "AgentArns" + "BucketName" ], "members":{ "ServerHostname":{ @@ -1610,7 +1355,7 @@ }, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

    Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system.

    " + "documentation":"

    (Optional) Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system. If you are setting up an agentless cross-cloud transfer, you do not need to specify a value for this parameter.

    Make sure you configure this parameter correctly when you first create your storage location. You cannot add or remove agents from a storage location after you create it.

    " }, "Tags":{ "shape":"InputTagList", @@ -1619,6 +1364,14 @@ "ServerCertificate":{ "shape":"ObjectStorageCertificate", "documentation":"

    Specifies a certificate chain for DataSync to authenticate with your object storage system if the system uses a private or self-signed certificate authority (CA). You must specify a single .pem file with a full certificate chain (for example, file:///home/user/.ssh/object_storage_certificates.pem).

    The certificate chain might include:

    • The object storage system's certificate

    • All intermediate certificates (if there are any)

    • The root certificate of the signing CA

    You can concatenate your certificates into a .pem file (which can be up to 32768 bytes before base64 encoding). The following example cat command creates an object_storage_certificates.pem file that includes three certificates:

    cat object_server_certificate.pem intermediate_certificate.pem ca_root_certificate.pem > object_storage_certificates.pem

    To use this parameter, configure ServerProtocol to HTTPS.

    " + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

    Specifies configuration information for a DataSync-managed secret, which includes the SecretKey that DataSync uses to access a specific object storage location, with a customer-managed KMS key.

    When you include this paramater as part of a CreateLocationObjectStorage request, you provide only the KMS key ARN. DataSync uses this KMS key together with the value you specify for the SecretKey parameter to create a DataSync-managed secret to store the location access credentials.

    Make sure the DataSync has permission to access the KMS key that you specify.

    You can use either CmkSecretConfig (with SecretKey) or CustomSecretConfig (without SecretKey) to provide credentials for a CreateLocationObjectStorage request. Do not provide both parameters for the same request.

    " + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

    Specifies configuration information for a customer-managed Secrets Manager secret where the secret key for a specific object storage location is stored in plain text. This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

    You can use either CmkSecretConfig (with SecretKey) or CustomSecretConfig (without SecretKey) to provide credentials for a CreateLocationObjectStorage request. Do not provide both parameters for the same request.

    " } }, "documentation":"

    CreateLocationObjectStorageRequest

    " @@ -1728,7 +1481,7 @@ }, "KerberosKeytab":{ "shape":"KerberosKeytabFile", - "documentation":"

    Specifies your Kerberos key table (keytab) file, which includes mappings between your Kerberos principal and encryption keys.

    The file must be base64 encoded. If you're using the CLI, the encoding is done for you.

    To avoid task execution errors, make sure that the Kerberos principal that you use to create the keytab file matches exactly what you specify for KerberosPrincipal.

    " + "documentation":"

    Specifies your Kerberos key table (keytab) file, which includes mappings between your Kerberos principal and encryption keys.

    To avoid task execution errors, make sure that the Kerberos principal that you use to create the keytab file matches exactly what you specify for KerberosPrincipal.

    " }, "KerberosKrb5Conf":{ "shape":"KerberosKrb5ConfFile", @@ -1800,7 +1553,7 @@ }, "TaskMode":{ "shape":"TaskMode", - "documentation":"

    Specifies one of the following task modes for your data transfer:

    • ENHANCED - Transfer virtually unlimited numbers of objects with higher performance than Basic mode. Enhanced mode tasks optimize the data transfer process by listing, preparing, transferring, and verifying data in parallel. Enhanced mode is currently available for transfers between Amazon S3 locations.

      To create an Enhanced mode task, the IAM role that you use to call the CreateTask operation must have the iam:CreateServiceLinkedRole permission.

    • BASIC (default) - Transfer files or objects between Amazon Web Services storage and all other supported DataSync locations. Basic mode tasks are subject to quotas on the number of files, objects, and directories in a dataset. Basic mode sequentially prepares, transfers, and verifies data, making it slower than Enhanced mode for most workloads.

    For more information, see Understanding task mode differences.

    " + "documentation":"

    Specifies one of the following task modes for your data transfer:

    • ENHANCED - Transfer virtually unlimited numbers of objects with higher performance than Basic mode. Enhanced mode tasks optimize the data transfer process by listing, preparing, transferring, and verifying data in parallel. Enhanced mode is currently available for transfers between Amazon S3 locations, transfers between Azure Blob and Amazon S3 without an agent, and transfers between other clouds and Amazon S3 without an agent.

      To create an Enhanced mode task, the IAM role that you use to call the CreateTask operation must have the iam:CreateServiceLinkedRole permission.

    • BASIC (default) - Transfer files or objects between Amazon Web Services storage and all other supported DataSync locations. Basic mode tasks are subject to quotas on the number of files, objects, and directories in a dataset. Basic mode sequentially prepares, transfers, and verifies data, making it slower than Enhanced mode for most workloads.

    For more information, see Understanding task mode differences.

    " } }, "documentation":"

    CreateTaskRequest

    " @@ -1815,23 +1568,19 @@ }, "documentation":"

    CreateTaskResponse

    " }, - "Credentials":{ + "CustomSecretConfig":{ "type":"structure", - "required":[ - "Username", - "Password" - ], "members":{ - "Username":{ - "shape":"PtolemyUsername", - "documentation":"

    Specifies the user name for your storage system's management interface.

    " + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    Specifies the ARN for an Secrets Manager secret.

    " }, - "Password":{ - "shape":"PtolemyPassword", - "documentation":"

    Specifies the password for your storage system's management interface.

    " + "SecretAccessRoleArn":{ + "shape":"IamRoleArnOrEmptyString", + "documentation":"

    Specifies the ARN for the Identity and Access Management role that DataSync uses to access the secret specified for SecretArn.

    " } }, - "documentation":"

    The credentials that provide DataSync Discovery read access to your on-premises storage system's management interface.

    DataSync Discovery stores these credentials in Secrets Manager. For more information, see Accessing your on-premises storage system.

    " + "documentation":"

    Specifies configuration information for a customer-managed Secrets Manager secret where a storage location authentication token or secret key is stored in plain text. This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

    You can use either CmkSecretConfig or CustomSecretConfig to provide credentials for a CreateLocation request. Do not provide both parameters for the same request.

    " }, "DeleteAgentRequest":{ "type":"structure", @@ -1846,8 +1595,7 @@ }, "DeleteAgentResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLocationRequest":{ "type":"structure", @@ -1862,8 +1610,7 @@ }, "DeleteLocationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteTaskRequest":{ "type":"structure", @@ -1878,8 +1625,7 @@ }, "DeleteTaskResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeAgentRequest":{ "type":"structure", @@ -1930,45 +1676,6 @@ }, "documentation":"

    DescribeAgentResponse

    " }, - "DescribeDiscoveryJobRequest":{ - "type":"structure", - "required":["DiscoveryJobArn"], - "members":{ - "DiscoveryJobArn":{ - "shape":"DiscoveryJobArn", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the discovery job that you want information about.

    " - } - } - }, - "DescribeDiscoveryJobResponse":{ - "type":"structure", - "members":{ - "StorageSystemArn":{ - "shape":"StorageSystemArn", - "documentation":"

    The ARN of the on-premises storage system you're running the discovery job on.

    " - }, - "DiscoveryJobArn":{ - "shape":"DiscoveryJobArn", - "documentation":"

    The ARN of the discovery job.

    " - }, - "CollectionDurationMinutes":{ - "shape":"CollectionDurationMinutes", - "documentation":"

    The number of minutes that the discovery job runs.

    " - }, - "Status":{ - "shape":"DiscoveryJobStatus", - "documentation":"

    Indicates the status of a discovery job. For more information, see Discovery job statuses.

    " - }, - "JobStartTime":{ - "shape":"DiscoveryTime", - "documentation":"

    The time when the discovery job started.

    " - }, - "JobEndTime":{ - "shape":"DiscoveryTime", - "documentation":"

    The time when the discovery job ended.

    " - } - } - }, "DescribeLocationAzureBlobRequest":{ "type":"structure", "required":["LocationArn"], @@ -2009,6 +1716,18 @@ "CreationTime":{ "shape":"Time", "documentation":"

    The time that your Azure Blob Storage transfer location was created.

    " + }, + "ManagedSecretConfig":{ + "shape":"ManagedSecretConfig", + "documentation":"

    Describes configuration information for a DataSync-managed secret, such as an authentication token that DataSync uses to access a specific storage location. DataSync uses the default Amazon Web Services-managed KMS key to encrypt this secret in Secrets Manager.

    " + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

    Describes configuration information for a DataSync-managed secret, such as an authentication token that DataSync uses to access a specific storage location, with a customer-managed KMS key.

    " + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

    Describes configuration information for a customer-managed secret, such as an authentication token that DataSync uses to access a specific storage location, with a customer-managed KMS key.

    " } } }, @@ -2341,6 +2060,18 @@ "ServerCertificate":{ "shape":"ObjectStorageCertificate", "documentation":"

    The certificate chain for DataSync to authenticate with your object storage system if the system uses a private or self-signed certificate authority (CA).

    " + }, + "ManagedSecretConfig":{ + "shape":"ManagedSecretConfig", + "documentation":"

    Describes configuration information for a DataSync-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location. DataSync uses the default Amazon Web Services-managed KMS key to encrypt this secret in Secrets Manager.

    " + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

    Describes configuration information for a DataSync-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

    " + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

    Describes configuration information for a customer-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

    " } }, "documentation":"

    DescribeLocationObjectStorageResponse

    " @@ -2440,216 +2171,64 @@ }, "documentation":"

    DescribeLocationSmbResponse

    " }, - "DescribeStorageSystemRequest":{ + "DescribeTaskExecutionRequest":{ "type":"structure", - "required":["StorageSystemArn"], + "required":["TaskExecutionArn"], "members":{ - "StorageSystemArn":{ - "shape":"StorageSystemArn", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of an on-premises storage system that you're using with DataSync Discovery.

    " + "TaskExecutionArn":{ + "shape":"TaskExecutionArn", + "documentation":"

    Specifies the Amazon Resource Name (ARN) of the task execution that you want information about.

    " } - } + }, + "documentation":"

    DescribeTaskExecutionRequest

    " }, - "DescribeStorageSystemResourceMetricsRequest":{ + "DescribeTaskExecutionResponse":{ "type":"structure", - "required":[ - "DiscoveryJobArn", - "ResourceType", - "ResourceId" - ], "members":{ - "DiscoveryJobArn":{ - "shape":"DiscoveryJobArn", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the discovery job that collects information about your on-premises storage system.

    " + "TaskExecutionArn":{ + "shape":"TaskExecutionArn", + "documentation":"

    The ARN of the task execution that you wanted information about. TaskExecutionArn is hierarchical and includes TaskArn for the task that was executed.

    For example, a TaskExecution value with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2/execution/exec-08ef1e88ec491019b executed the task with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2.

    " }, - "ResourceType":{ - "shape":"DiscoveryResourceType", - "documentation":"

    Specifies the kind of storage system resource that you want information about.

    " + "Status":{ + "shape":"TaskExecutionStatus", + "documentation":"

    The status of the task execution.

    " }, - "ResourceId":{ - "shape":"ResourceId", - "documentation":"

    Specifies the universally unique identifier (UUID) of the storage system resource that you want information about.

    " + "Options":{"shape":"Options"}, + "Excludes":{ + "shape":"FilterList", + "documentation":"

    A list of filter rules that exclude specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.

    " }, - "StartTime":{ - "shape":"DiscoveryTime", - "documentation":"

    Specifies a time within the total duration that the discovery job ran. To see information gathered during a certain time frame, use this parameter with EndTime.

    " + "Includes":{ + "shape":"FilterList", + "documentation":"

    A list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.

    " }, - "EndTime":{ - "shape":"DiscoveryTime", - "documentation":"

    Specifies a time within the total duration that the discovery job ran. To see information gathered during a certain time frame, use this parameter with StartTime.

    " + "ManifestConfig":{ + "shape":"ManifestConfig", + "documentation":"

    The configuration of the manifest that lists the files or objects to transfer. For more information, see Specifying what DataSync transfers by using a manifest.

    " }, - "MaxResults":{ - "shape":"DiscoveryMaxResults", - "documentation":"

    Specifies how many results that you want in the response.

    " + "StartTime":{ + "shape":"Time", + "documentation":"

    The time that DataSync sends the request to start the task execution. For non-queued tasks, LaunchTime and StartTime are typically the same. For queued tasks, LaunchTime is typically later than StartTime because previously queued tasks must finish running before newer tasks can begin.

    " }, - "NextToken":{ - "shape":"DiscoveryNextToken", - "documentation":"

    Specifies an opaque string that indicates the position to begin the next list of results in the response.

    " - } - } - }, - "DescribeStorageSystemResourceMetricsResponse":{ - "type":"structure", - "members":{ - "Metrics":{ - "shape":"Metrics", - "documentation":"

    The details that your discovery job collected about your storage system resource.

    " + "EstimatedFilesToTransfer":{ + "shape":"long", + "documentation":"

    The number of files, objects, and directories that DataSync expects to transfer over the network. This value is calculated while DataSync prepares the transfer.

    How this gets calculated depends primarily on your task’s transfer mode configuration:

    • If TranserMode is set to CHANGED - The calculation is based on comparing the content of the source and destination locations and determining the difference that needs to be transferred. The difference can include:

      • Anything that's added or modified at the source location.

      • Anything that's in both locations and modified at the destination after an initial transfer (unless OverwriteMode is set to NEVER).

      • (Basic task mode only) The number of items that DataSync expects to delete (if PreserveDeletedFiles is set to REMOVE).

    • If TranserMode is set to ALL - The calculation is based only on the items that DataSync finds at the source location.

    " }, - "NextToken":{ - "shape":"DiscoveryNextToken", - "documentation":"

    The opaque string that indicates the position to begin the next list of results in the response.

    " - } - } - }, - "DescribeStorageSystemResourcesRequest":{ - "type":"structure", - "required":[ - "DiscoveryJobArn", - "ResourceType" - ], - "members":{ - "DiscoveryJobArn":{ - "shape":"DiscoveryJobArn", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the discovery job that's collecting data from your on-premises storage system.

    " + "EstimatedBytesToTransfer":{ + "shape":"long", + "documentation":"

    The number of logical bytes that DataSync expects to write to the destination location.

    " }, - "ResourceType":{ - "shape":"DiscoveryResourceType", - "documentation":"

    Specifies what kind of storage system resources that you want information about.

    " + "FilesTransferred":{ + "shape":"long", + "documentation":"

    The number of files, objects, and directories that DataSync actually transfers over the network. This value is updated periodically during your task execution when something is read from the source and sent over the network.

    If DataSync fails to transfer something, this value can be less than EstimatedFilesToTransfer. In some cases, this value can also be greater than EstimatedFilesToTransfer. This element is implementation-specific for some location types, so don't use it as an exact indication of what's transferring or to monitor your task execution.

    " }, - "ResourceIds":{ - "shape":"ResourceIds", - "documentation":"

    Specifies the universally unique identifiers (UUIDs) of the storage system resources that you want information about. You can't use this parameter in combination with the Filter parameter.

    " + "BytesWritten":{ + "shape":"long", + "documentation":"

    The number of logical bytes that DataSync actually writes to the destination location.

    " }, - "Filter":{ - "shape":"ResourceFilters", - "documentation":"

    Filters the storage system resources that you want returned. For example, this might be volumes associated with a specific storage virtual machine (SVM).

    " - }, - "MaxResults":{ - "shape":"DiscoveryMaxResults", - "documentation":"

    Specifies the maximum number of storage system resources that you want to list in a response.

    " - }, - "NextToken":{ - "shape":"DiscoveryNextToken", - "documentation":"

    Specifies an opaque string that indicates the position to begin the next list of results in the response.

    " - } - } - }, - "DescribeStorageSystemResourcesResponse":{ - "type":"structure", - "members":{ - "ResourceDetails":{ - "shape":"ResourceDetails", - "documentation":"

    The information collected about your storage system's resources. A response can also include Amazon Web Services storage service recommendations.

    For more information, see storage resource information collected by and recommendations provided by DataSync Discovery.

    " - }, - "NextToken":{ - "shape":"DiscoveryNextToken", - "documentation":"

    The opaque string that indicates the position to begin the next list of results in the response.

    " - } - } - }, - "DescribeStorageSystemResponse":{ - "type":"structure", - "members":{ - "StorageSystemArn":{ - "shape":"StorageSystemArn", - "documentation":"

    The ARN of the on-premises storage system that the discovery job looked at.

    " - }, - "ServerConfiguration":{ - "shape":"DiscoveryServerConfiguration", - "documentation":"

    The server name and network port required to connect with your on-premises storage system's management interface.

    " - }, - "SystemType":{ - "shape":"DiscoverySystemType", - "documentation":"

    The type of on-premises storage system.

    DataSync Discovery currently only supports NetApp Fabric-Attached Storage (FAS) and All Flash FAS (AFF) systems running ONTAP 9.7 or later.

    " - }, - "AgentArns":{ - "shape":"DiscoveryAgentArnList", - "documentation":"

    The ARN of the DataSync agent that connects to and reads from your on-premises storage system.

    " - }, - "Name":{ - "shape":"Name", - "documentation":"

    The name that you gave your on-premises storage system when adding it to DataSync Discovery.

    " - }, - "ErrorMessage":{ - "shape":"ErrorMessage", - "documentation":"

    Describes the connectivity error that the DataSync agent is encountering with your on-premises storage system.

    " - }, - "ConnectivityStatus":{ - "shape":"StorageSystemConnectivityStatus", - "documentation":"

    Indicates whether your DataSync agent can connect to your on-premises storage system.

    " - }, - "CloudWatchLogGroupArn":{ - "shape":"LogGroupArn", - "documentation":"

    The ARN of the Amazon CloudWatch log group that's used to monitor and log discovery job events.

    " - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

    The time when you added the on-premises storage system to DataSync Discovery.

    " - }, - "SecretsManagerArn":{ - "shape":"SecretsManagerArn", - "documentation":"

    The ARN of the secret that stores your on-premises storage system's credentials. DataSync Discovery stores these credentials in Secrets Manager.

    " - } - } - }, - "DescribeTaskExecutionRequest":{ - "type":"structure", - "required":["TaskExecutionArn"], - "members":{ - "TaskExecutionArn":{ - "shape":"TaskExecutionArn", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the task execution that you want information about.

    " - } - }, - "documentation":"

    DescribeTaskExecutionRequest

    " - }, - "DescribeTaskExecutionResponse":{ - "type":"structure", - "members":{ - "TaskExecutionArn":{ - "shape":"TaskExecutionArn", - "documentation":"

    The ARN of the task execution that you wanted information about. TaskExecutionArn is hierarchical and includes TaskArn for the task that was executed.

    For example, a TaskExecution value with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2/execution/exec-08ef1e88ec491019b executed the task with the ARN arn:aws:datasync:us-east-1:111222333444:task/task-0208075f79cedf4a2.

    " - }, - "Status":{ - "shape":"TaskExecutionStatus", - "documentation":"

    The status of the task execution.

    " - }, - "Options":{"shape":"Options"}, - "Excludes":{ - "shape":"FilterList", - "documentation":"

    A list of filter rules that exclude specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.

    " - }, - "Includes":{ - "shape":"FilterList", - "documentation":"

    A list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.

    " - }, - "ManifestConfig":{ - "shape":"ManifestConfig", - "documentation":"

    The configuration of the manifest that lists the files or objects to transfer. For more information, see Specifying what DataSync transfers by using a manifest.

    " - }, - "StartTime":{ - "shape":"Time", - "documentation":"

    The time when the task execution started.

    " - }, - "EstimatedFilesToTransfer":{ - "shape":"long", - "documentation":"

    The number of files, objects, and directories that DataSync expects to transfer over the network. This value is calculated while DataSync prepares the transfer.

    How this gets calculated depends primarily on your task’s transfer mode configuration:

    • If TranserMode is set to CHANGED - The calculation is based on comparing the content of the source and destination locations and determining the difference that needs to be transferred. The difference can include:

      • Anything that's added or modified at the source location.

      • Anything that's in both locations and modified at the destination after an initial transfer (unless OverwriteMode is set to NEVER).

      • (Basic task mode only) The number of items that DataSync expects to delete (if PreserveDeletedFiles is set to REMOVE).

    • If TranserMode is set to ALL - The calculation is based only on the items that DataSync finds at the source location.

    " - }, - "EstimatedBytesToTransfer":{ - "shape":"long", - "documentation":"

    The number of logical bytes that DataSync expects to write to the destination location.

    " - }, - "FilesTransferred":{ - "shape":"long", - "documentation":"

    The number of files, objects, and directories that DataSync actually transfers over the network. This value is updated periodically during your task execution when something is read from the source and sent over the network.

    If DataSync fails to transfer something, this value can be less than EstimatedFilesToTransfer. In some cases, this value can also be greater than EstimatedFilesToTransfer. This element is implementation-specific for some location types, so don't use it as an exact indication of what's transferring or to monitor your task execution.

    " - }, - "BytesWritten":{ - "shape":"long", - "documentation":"

    The number of logical bytes that DataSync actually writes to the destination location.

    " - }, - "BytesTransferred":{ - "shape":"long", - "documentation":"

    The number of bytes that DataSync sends to the network before compression (if compression is possible). For the number of bytes transferred over the network, see BytesCompressed.

    " + "BytesTransferred":{ + "shape":"long", + "documentation":"

    The number of bytes that DataSync sends to the network before compression (if compression is possible). For the number of bytes transferred over the network, see BytesCompressed.

    " }, "BytesCompressed":{ "shape":"long", @@ -2698,6 +2277,14 @@ "FilesFailed":{ "shape":"TaskExecutionFilesFailedDetail", "documentation":"

    The number of objects that DataSync fails to prepare, transfer, verify, and delete during your task execution.

    Applies only to Enhanced mode tasks.

    " + }, + "LaunchTime":{ + "shape":"Time", + "documentation":"

    The time that the task execution actually begins. For non-queued tasks, LaunchTime and StartTime are typically the same. For queued tasks, LaunchTime is typically later than StartTime because previously queued tasks must finish running before newer tasks can begin.

    " + }, + "EndTime":{ + "shape":"Time", + "documentation":"

    The time that the transfer task ends.

    " } }, "documentation":"

    DescribeTaskExecutionResponse

    " @@ -2803,100 +2390,6 @@ "type":"list", "member":{"shape":"NetworkInterfaceArn"} }, - "DiscoveryAgentArnList":{ - "type":"list", - "member":{"shape":"AgentArn"}, - "max":1, - "min":1 - }, - "DiscoveryJobArn":{ - "type":"string", - "max":256, - "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:system/storage-system-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/job/discovery-job-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" - }, - "DiscoveryJobList":{ - "type":"list", - "member":{"shape":"DiscoveryJobListEntry"} - }, - "DiscoveryJobListEntry":{ - "type":"structure", - "members":{ - "DiscoveryJobArn":{ - "shape":"DiscoveryJobArn", - "documentation":"

    The Amazon Resource Name (ARN) of a discovery job.

    " - }, - "Status":{ - "shape":"DiscoveryJobStatus", - "documentation":"

    The status of a discovery job. For more information, see Discovery job statuses.

    " - } - }, - "documentation":"

    The details about a specific DataSync discovery job.

    " - }, - "DiscoveryJobStatus":{ - "type":"string", - "enum":[ - "RUNNING", - "WARNING", - "TERMINATED", - "FAILED", - "STOPPED", - "COMPLETED", - "COMPLETED_WITH_ISSUES" - ] - }, - "DiscoveryMaxResults":{ - "type":"integer", - "max":100, - "min":1 - }, - "DiscoveryNextToken":{ - "type":"string", - "max":65535, - "pattern":"[a-zA-Z0-9=_-]+" - }, - "DiscoveryResourceFilter":{ - "type":"string", - "enum":["SVM"] - }, - "DiscoveryResourceType":{ - "type":"string", - "enum":[ - "SVM", - "VOLUME", - "CLUSTER" - ] - }, - "DiscoveryServerConfiguration":{ - "type":"structure", - "required":["ServerHostname"], - "members":{ - "ServerHostname":{ - "shape":"DiscoveryServerHostname", - "documentation":"

    The domain name or IP address of your storage system's management interface.

    " - }, - "ServerPort":{ - "shape":"DiscoveryServerPort", - "documentation":"

    The network port for accessing the storage system's management interface.

    " - } - }, - "documentation":"

    The network settings that DataSync Discovery uses to connect with your on-premises storage system's management interface.

    " - }, - "DiscoveryServerHostname":{ - "type":"string", - "max":255, - "pattern":"^(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9])$" - }, - "DiscoveryServerPort":{ - "type":"integer", - "box":true, - "max":65535, - "min":1 - }, - "DiscoverySystemType":{ - "type":"string", - "enum":["NetAppONTAP"] - }, - "DiscoveryTime":{"type":"timestamp"}, "DnsIpList":{ "type":"list", "member":{"shape":"ServerIpAddress"}, @@ -2962,10 +2455,6 @@ "max":4096, "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]*$" }, - "EnabledProtocols":{ - "type":"list", - "member":{"shape":"PtolemyString"} - }, "Endpoint":{ "type":"string", "max":15, @@ -2980,11 +2469,6 @@ "FIPS" ] }, - "ErrorMessage":{ - "type":"string", - "max":128, - "pattern":".*" - }, "FilterAttributeValue":{ "type":"string", "max":255, @@ -2997,10 +2481,6 @@ "max":1, "min":0 }, - "FilterMembers":{ - "type":"list", - "member":{"shape":"PtolemyString"} - }, "FilterRule":{ "type":"structure", "members":{ @@ -3129,33 +2609,6 @@ "max":4096, "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$" }, - "GenerateRecommendationsRequest":{ - "type":"structure", - "required":[ - "DiscoveryJobArn", - "ResourceIds", - "ResourceType" - ], - "members":{ - "DiscoveryJobArn":{ - "shape":"DiscoveryJobArn", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the discovery job that collects information about your on-premises storage system.

    " - }, - "ResourceIds":{ - "shape":"ResourceIds", - "documentation":"

    Specifies the universally unique identifiers (UUIDs) of the resources in your storage system that you want recommendations on.

    " - }, - "ResourceType":{ - "shape":"DiscoveryResourceType", - "documentation":"

    Specifies the type of resource in your storage system that you want recommendations on.

    " - } - } - }, - "GenerateRecommendationsResponse":{ - "type":"structure", - "members":{ - } - }, "Gid":{ "type":"string", "enum":[ @@ -3248,33 +2701,16 @@ "min":1, "pattern":"^[_.A-Za-z0-9][-_.A-Za-z0-9]*$" }, - "IOPS":{ - "type":"structure", - "members":{ - "Read":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak IOPS related to read operations.

    " - }, - "Write":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak IOPS related to write operations.

    " - }, - "Other":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak IOPS unrelated to read and write operations.

    " - }, - "Total":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak total IOPS on your on-premises storage system resource.

    " - } - }, - "documentation":"

    The IOPS peaks for an on-premises storage system resource. Each data point represents the 95th percentile peak value during a 1-hour interval.

    " - }, "IamRoleArn":{ "type":"string", "max":2048, "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):iam::[0-9]{12}:role/.*$" }, + "IamRoleArnOrEmptyString":{ + "type":"string", + "max":2048, + "pattern":"^(arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):iam::[0-9]{12}:role/[a-zA-Z0-9+=,.@_-]+|)$" + }, "InputTagList":{ "type":"list", "member":{"shape":"TagListEntry"}, @@ -3315,30 +2751,17 @@ "min":1, "pattern":"^.+$" }, + "KmsKeyArn":{ + "type":"string", + "max":2048, + "pattern":"^(arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):kms:[a-z\\-0-9]+:[0-9]{12}:key/.*|)$" + }, "KmsKeyProviderUri":{ "type":"string", "max":255, "min":1, "pattern":"^kms:\\/\\/http[s]?@(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9])(;(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9]))*:[0-9]{1,5}\\/kms$" }, - "Latency":{ - "type":"structure", - "members":{ - "Read":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak latency for read operations.

    " - }, - "Write":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak latency for write operations.

    " - }, - "Other":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak latency for operations unrelated to read and write operations.

    " - } - }, - "documentation":"

    The latency peaks for an on-premises storage system resource. Each data point represents the 95th percentile peak value during a 1-hour interval.

    " - }, "ListAgentsRequest":{ "type":"structure", "members":{ @@ -3367,36 +2790,6 @@ }, "documentation":"

    ListAgentsResponse

    " }, - "ListDiscoveryJobsRequest":{ - "type":"structure", - "members":{ - "StorageSystemArn":{ - "shape":"StorageSystemArn", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of an on-premises storage system. Use this parameter if you only want to list the discovery jobs that are associated with a specific storage system.

    " - }, - "MaxResults":{ - "shape":"DiscoveryMaxResults", - "documentation":"

    Specifies how many results you want in the response.

    " - }, - "NextToken":{ - "shape":"DiscoveryNextToken", - "documentation":"

    Specifies an opaque string that indicates the position to begin the next list of results in the response.

    " - } - } - }, - "ListDiscoveryJobsResponse":{ - "type":"structure", - "members":{ - "DiscoveryJobs":{ - "shape":"DiscoveryJobList", - "documentation":"

    The discovery jobs that you've run.

    " - }, - "NextToken":{ - "shape":"DiscoveryNextToken", - "documentation":"

    The opaque string that indicates the position to begin the next list of results in the response.

    " - } - } - }, "ListLocationsRequest":{ "type":"structure", "members":{ @@ -3429,32 +2822,6 @@ }, "documentation":"

    ListLocationsResponse

    " }, - "ListStorageSystemsRequest":{ - "type":"structure", - "members":{ - "MaxResults":{ - "shape":"DiscoveryMaxResults", - "documentation":"

    Specifies how many results you want in the response.

    " - }, - "NextToken":{ - "shape":"DiscoveryNextToken", - "documentation":"

    Specifies an opaque string that indicates the position to begin the next list of results in the response.

    " - } - } - }, - "ListStorageSystemsResponse":{ - "type":"structure", - "members":{ - "StorageSystems":{ - "shape":"StorageSystemList", - "documentation":"

    The Amazon Resource Names ARNs) of the on-premises storage systems that you're using with DataSync Discovery.

    " - }, - "NextToken":{ - "shape":"DiscoveryNextToken", - "documentation":"

    The opaque string that indicates the position to begin the next list of results in the response.

    " - } - } - }, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceArn"], @@ -3628,6 +2995,16 @@ "TRANSFER" ] }, + "ManagedSecretConfig":{ + "type":"structure", + "members":{ + "SecretArn":{ + "shape":"SecretArn", + "documentation":"

    Specifies the ARN for an Secrets Manager secret.

    " + } + }, + "documentation":"

    Specifies configuration information for a DataSync-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location. DataSync uses the default Amazon Web Services-managed KMS key to encrypt this secret in Secrets Manager.

    " + }, "ManifestAction":{ "type":"string", "enum":["TRANSFER"] @@ -3654,65 +3031,11 @@ "type":"string", "enum":["CSV"] }, - "MaxP95Performance":{ - "type":"structure", - "members":{ - "IopsRead":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak IOPS related to read operations.

    " - }, - "IopsWrite":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak IOPS related to write operations.

    " - }, - "IopsOther":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak IOPS unrelated to read and write operations.

    " - }, - "IopsTotal":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak total IOPS on your on-premises storage system resource.

    " - }, - "ThroughputRead":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak throughput related to read operations.

    " - }, - "ThroughputWrite":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak throughput related to write operations.

    " - }, - "ThroughputOther":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak throughput unrelated to read and write operations.

    " - }, - "ThroughputTotal":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak total throughput on your on-premises storage system resource.

    " - }, - "LatencyRead":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak latency for read operations.

    " - }, - "LatencyWrite":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak latency for write operations.

    " - }, - "LatencyOther":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak latency for operations unrelated to read and write operations.

    " - } - }, - "documentation":"

    The performance data that DataSync Discovery collects about an on-premises storage system resource.

    " - }, "MaxResults":{ "type":"integer", "max":100, "min":0 }, - "Metrics":{ - "type":"list", - "member":{"shape":"ResourceMetrics"} - }, "Mtime":{ "type":"string", "enum":[ @@ -3720,206 +3043,6 @@ "PRESERVE" ] }, - "Name":{ - "type":"string", - "max":256, - "min":1, - "pattern":"^[\\p{L}\\p{M}\\p{N}\\s+=._:@\\/-]+$" - }, - "NetAppONTAPCluster":{ - "type":"structure", - "members":{ - "CifsShareCount":{ - "shape":"NonNegativeLong", - "documentation":"

    The number of CIFS shares in the cluster.

    " - }, - "NfsExportedVolumes":{ - "shape":"NonNegativeLong", - "documentation":"

    The number of NFS volumes in the cluster.

    " - }, - "ResourceId":{ - "shape":"PtolemyUUID", - "documentation":"

    The universally unique identifier (UUID) of the cluster.

    " - }, - "ClusterName":{ - "shape":"PtolemyString", - "documentation":"

    The name of the cluster.

    " - }, - "MaxP95Performance":{ - "shape":"MaxP95Performance", - "documentation":"

    The performance data that DataSync Discovery collects about the cluster.

    " - }, - "ClusterBlockStorageSize":{ - "shape":"NonNegativeLong", - "documentation":"

    The total storage space that's available in the cluster.

    " - }, - "ClusterBlockStorageUsed":{ - "shape":"NonNegativeLong", - "documentation":"

    The storage space that's being used in a cluster.

    " - }, - "ClusterBlockStorageLogicalUsed":{ - "shape":"NonNegativeLong", - "documentation":"

    The storage space that's being used in the cluster without accounting for compression or deduplication.

    " - }, - "Recommendations":{ - "shape":"Recommendations", - "documentation":"

    The Amazon Web Services storage services that DataSync Discovery recommends for the cluster. For more information, see Recommendations provided by DataSync Discovery.

    " - }, - "RecommendationStatus":{ - "shape":"RecommendationStatus", - "documentation":"

    Indicates whether DataSync Discovery recommendations for the cluster are ready to view, incomplete, or can't be determined.

    For more information, see Recommendation statuses.

    " - }, - "LunCount":{ - "shape":"NonNegativeLong", - "documentation":"

    The number of LUNs (logical unit numbers) in the cluster.

    " - }, - "ClusterCloudStorageUsed":{ - "shape":"NonNegativeLong", - "documentation":"

    The amount of space in the cluster that's in cloud storage (for example, if you're using data tiering).

    " - } - }, - "documentation":"

    The information that DataSync Discovery collects about an on-premises storage system cluster.

    " - }, - "NetAppONTAPClusters":{ - "type":"list", - "member":{"shape":"NetAppONTAPCluster"} - }, - "NetAppONTAPSVM":{ - "type":"structure", - "members":{ - "ClusterUuid":{ - "shape":"PtolemyUUID", - "documentation":"

    The universally unique identifier (UUID) of the cluster associated with the SVM.

    " - }, - "ResourceId":{ - "shape":"PtolemyUUID", - "documentation":"

    The UUID of the SVM.

    " - }, - "SvmName":{ - "shape":"PtolemyString", - "documentation":"

    The name of the SVM

    " - }, - "CifsShareCount":{ - "shape":"NonNegativeLong", - "documentation":"

    The number of CIFS shares in the SVM.

    " - }, - "EnabledProtocols":{ - "shape":"EnabledProtocols", - "documentation":"

    The data transfer protocols (such as NFS) configured for the SVM.

    " - }, - "TotalCapacityUsed":{ - "shape":"NonNegativeLong", - "documentation":"

    The storage space that's being used in the SVM.

    " - }, - "TotalCapacityProvisioned":{ - "shape":"NonNegativeLong", - "documentation":"

    The total storage space that's available in the SVM.

    " - }, - "TotalLogicalCapacityUsed":{ - "shape":"NonNegativeLong", - "documentation":"

    The storage space that's being used in the SVM without accounting for compression or deduplication.

    " - }, - "MaxP95Performance":{ - "shape":"MaxP95Performance", - "documentation":"

    The performance data that DataSync Discovery collects about the SVM.

    " - }, - "Recommendations":{ - "shape":"Recommendations", - "documentation":"

    The Amazon Web Services storage services that DataSync Discovery recommends for the SVM. For more information, see Recommendations provided by DataSync Discovery.

    " - }, - "NfsExportedVolumes":{ - "shape":"NonNegativeLong", - "documentation":"

    The number of NFS volumes in the SVM.

    " - }, - "RecommendationStatus":{ - "shape":"RecommendationStatus", - "documentation":"

    Indicates whether DataSync Discovery recommendations for the SVM are ready to view, incomplete, or can't be determined.

    For more information, see Recommendation statuses.

    " - }, - "TotalSnapshotCapacityUsed":{ - "shape":"NonNegativeLong", - "documentation":"

    The amount of storage in the SVM that's being used for snapshots.

    " - }, - "LunCount":{ - "shape":"NonNegativeLong", - "documentation":"

    The number of LUNs (logical unit numbers) in the SVM.

    " - } - }, - "documentation":"

    The information that DataSync Discovery collects about a storage virtual machine (SVM) in your on-premises storage system.

    " - }, - "NetAppONTAPSVMs":{ - "type":"list", - "member":{"shape":"NetAppONTAPSVM"} - }, - "NetAppONTAPVolume":{ - "type":"structure", - "members":{ - "VolumeName":{ - "shape":"PtolemyString", - "documentation":"

    The name of the volume.

    " - }, - "ResourceId":{ - "shape":"PtolemyUUID", - "documentation":"

    The universally unique identifier (UUID) of the volume.

    " - }, - "CifsShareCount":{ - "shape":"NonNegativeLong", - "documentation":"

    The number of CIFS shares in the volume.

    " - }, - "SecurityStyle":{ - "shape":"PtolemyString", - "documentation":"

    The volume's security style (such as Unix or NTFS).

    " - }, - "SvmUuid":{ - "shape":"PtolemyUUID", - "documentation":"

    The UUID of the storage virtual machine (SVM) associated with the volume.

    " - }, - "SvmName":{ - "shape":"PtolemyString", - "documentation":"

    The name of the SVM associated with the volume.

    " - }, - "CapacityUsed":{ - "shape":"NonNegativeLong", - "documentation":"

    The storage space that's being used in the volume.

    " - }, - "CapacityProvisioned":{ - "shape":"NonNegativeLong", - "documentation":"

    The total storage space that's available in the volume.

    " - }, - "LogicalCapacityUsed":{ - "shape":"NonNegativeLong", - "documentation":"

    The storage space that's being used in the volume without accounting for compression or deduplication.

    " - }, - "NfsExported":{ - "shape":"PtolemyBoolean", - "documentation":"

    The number of NFS volumes in the volume.

    " - }, - "SnapshotCapacityUsed":{ - "shape":"NonNegativeLong", - "documentation":"

    The amount of storage in the volume that's being used for snapshots.

    " - }, - "MaxP95Performance":{ - "shape":"MaxP95Performance", - "documentation":"

    The performance data that DataSync Discovery collects about the volume.

    " - }, - "Recommendations":{ - "shape":"Recommendations", - "documentation":"

    The Amazon Web Services storage services that DataSync Discovery recommends for the volume. For more information, see Recommendations provided by DataSync Discovery.

    " - }, - "RecommendationStatus":{ - "shape":"RecommendationStatus", - "documentation":"

    Indicates whether DataSync Discovery recommendations for the volume are ready to view, incomplete, or can't be determined.

    For more information, see Recommendation statuses.

    " - }, - "LunCount":{ - "shape":"NonNegativeLong", - "documentation":"

    The number of LUNs (logical unit numbers) in the volume.

    " - } - }, - "documentation":"

    The information that DataSync Discovery collects about a volume in your on-premises storage system.

    " - }, - "NetAppONTAPVolumes":{ - "type":"list", - "member":{"shape":"NetAppONTAPVolume"} - }, "NetworkInterfaceArn":{ "type":"string", "max":128, @@ -3954,16 +3077,6 @@ "NFS4_1" ] }, - "NonNegativeDouble":{ - "type":"double", - "box":true, - "min":0 - }, - "NonNegativeLong":{ - "type":"long", - "box":true, - "min":0 - }, "ObjectStorageAccessKey":{ "type":"string", "max":200, @@ -4119,24 +3232,6 @@ "NEVER" ] }, - "P95Metrics":{ - "type":"structure", - "members":{ - "IOPS":{ - "shape":"IOPS", - "documentation":"

    The IOPS peaks for an on-premises storage system resource. Each data point represents the 95th percentile peak value during a 1-hour interval.

    " - }, - "Throughput":{ - "shape":"Throughput", - "documentation":"

    The throughput peaks for an on-premises storage system resource. Each data point represents the 95th percentile peak value during a 1-hour interval.

    " - }, - "Latency":{ - "shape":"Latency", - "documentation":"

    The latency peaks for an on-premises storage system resource. Each data point represents the 95th percentile peak value during a 1-hour interval.

    " - } - }, - "documentation":"

    The types of performance data that DataSync Discovery collects about an on-premises storage system resource.

    " - }, "PLSecurityGroupArnList":{ "type":"list", "member":{"shape":"Ec2SecurityGroupArn"}, @@ -4210,28 +3305,6 @@ }, "documentation":"

    Specifies how your DataSync agent connects to Amazon Web Services using a virtual private cloud (VPC) service endpoint. An agent that uses a VPC endpoint isn't accessible over the public internet.

    " }, - "PtolemyBoolean":{"type":"boolean"}, - "PtolemyPassword":{ - "type":"string", - "max":1024, - "pattern":"^(?!.*[:\\\"][^:\"]*$).+$", - "sensitive":true - }, - "PtolemyString":{ - "type":"string", - "max":1024, - "pattern":"^.{0,1024}$" - }, - "PtolemyUUID":{ - "type":"string", - "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" - }, - "PtolemyUsername":{ - "type":"string", - "max":1024, - "pattern":"^(?!.*[:\\\"][^:\"]*$).+$", - "sensitive":true - }, "QopConfiguration":{ "type":"structure", "members":{ @@ -4246,57 +3319,6 @@ }, "documentation":"

    The Quality of Protection (QOP) configuration specifies the Remote Procedure Call (RPC) and data transfer privacy settings configured on the Hadoop Distributed File System (HDFS) cluster.

    " }, - "Recommendation":{ - "type":"structure", - "members":{ - "StorageType":{ - "shape":"PtolemyString", - "documentation":"

    A recommended Amazon Web Services storage service that you can migrate data to based on information that DataSync Discovery collects about your on-premises storage system.

    " - }, - "StorageConfiguration":{ - "shape":"RecommendationsConfigMap", - "documentation":"

    Information about how you can set up a recommended Amazon Web Services storage service.

    " - }, - "EstimatedMonthlyStorageCost":{ - "shape":"PtolemyString", - "documentation":"

    The estimated monthly cost of the recommended Amazon Web Services storage service.

    " - } - }, - "documentation":"

    The details about an Amazon Web Services storage service that DataSync Discovery recommends for a resource in your on-premises storage system.

    For more information, see Recommendations provided by DataSync Discovery.

    " - }, - "RecommendationStatus":{ - "type":"string", - "enum":[ - "NONE", - "IN_PROGRESS", - "COMPLETED", - "FAILED" - ] - }, - "Recommendations":{ - "type":"list", - "member":{"shape":"Recommendation"} - }, - "RecommendationsConfigMap":{ - "type":"map", - "key":{"shape":"PtolemyString"}, - "value":{"shape":"PtolemyString"} - }, - "RemoveStorageSystemRequest":{ - "type":"structure", - "required":["StorageSystemArn"], - "members":{ - "StorageSystemArn":{ - "shape":"StorageSystemArn", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the storage system that you want to permanently remove from DataSync Discovery.

    " - } - } - }, - "RemoveStorageSystemResponse":{ - "type":"structure", - "members":{ - } - }, "ReportDestination":{ "type":"structure", "members":{ @@ -4393,65 +3415,6 @@ }, "documentation":"

    Indicates whether DataSync created a complete task report for your transfer.

    " }, - "ResourceDetails":{ - "type":"structure", - "members":{ - "NetAppONTAPSVMs":{ - "shape":"NetAppONTAPSVMs", - "documentation":"

    The information that DataSync Discovery collects about storage virtual machines (SVMs) in your on-premises storage system.

    " - }, - "NetAppONTAPVolumes":{ - "shape":"NetAppONTAPVolumes", - "documentation":"

    The information that DataSync Discovery collects about volumes in your on-premises storage system.

    " - }, - "NetAppONTAPClusters":{ - "shape":"NetAppONTAPClusters", - "documentation":"

    The information that DataSync Discovery collects about the cluster in your on-premises storage system.

    " - } - }, - "documentation":"

    Information provided by DataSync Discovery about the resources in your on-premises storage system.

    " - }, - "ResourceFilters":{ - "type":"map", - "key":{"shape":"DiscoveryResourceFilter"}, - "value":{"shape":"FilterMembers"} - }, - "ResourceId":{ - "type":"string", - "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" - }, - "ResourceIds":{ - "type":"list", - "member":{"shape":"ResourceId"}, - "max":100, - "min":1 - }, - "ResourceMetrics":{ - "type":"structure", - "members":{ - "Timestamp":{ - "shape":"Timestamp", - "documentation":"

    The time when DataSync Discovery collected this information from the resource.

    " - }, - "P95Metrics":{ - "shape":"P95Metrics", - "documentation":"

    The types of performance data that DataSync Discovery collects about the on-premises storage system resource.

    " - }, - "Capacity":{ - "shape":"Capacity", - "documentation":"

    The storage capacity of the on-premises storage system resource.

    " - }, - "ResourceId":{ - "shape":"ResourceId", - "documentation":"

    The universally unique identifier (UUID) of the on-premises storage system resource.

    " - }, - "ResourceType":{ - "shape":"DiscoveryResourceType", - "documentation":"

    The type of on-premises storage system resource.

    " - } - }, - "documentation":"

    Information, including performance data and capacity usage, provided by DataSync Discovery about a resource in your on-premises storage system.

    " - }, "S3BucketArn":{ "type":"string", "max":268, @@ -4543,10 +3506,10 @@ "DISABLED" ] }, - "SecretsManagerArn":{ + "SecretArn":{ "type":"string", "max":2048, - "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):secretsmanager:[a-z\\-0-9]+:[0-9]{12}:secret:.*" + "pattern":"^(arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):secretsmanager:[a-z\\-0-9]+:[0-9]{12}:secret:.*|)$" }, "ServerHostname":{ "type":"string", @@ -4630,42 +3593,6 @@ "type":"list", "member":{"shape":"NetworkInterfaceArn"} }, - "StartDiscoveryJobRequest":{ - "type":"structure", - "required":[ - "StorageSystemArn", - "CollectionDurationMinutes", - "ClientToken" - ], - "members":{ - "StorageSystemArn":{ - "shape":"StorageSystemArn", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the on-premises storage system that you want to run the discovery job on.

    " - }, - "CollectionDurationMinutes":{ - "shape":"CollectionDurationMinutes", - "documentation":"

    Specifies in minutes how long you want the discovery job to run.

    For more accurate recommendations, we recommend a duration of at least 14 days. Longer durations allow time to collect a sufficient number of data points and provide a realistic representation of storage performance and utilization.

    " - }, - "ClientToken":{ - "shape":"PtolemyUUID", - "documentation":"

    Specifies a client token to make sure requests with this API operation are idempotent. If you don't specify a client token, DataSync generates one for you automatically.

    ", - "idempotencyToken":true - }, - "Tags":{ - "shape":"InputTagList", - "documentation":"

    Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources.

    " - } - } - }, - "StartDiscoveryJobResponse":{ - "type":"structure", - "members":{ - "DiscoveryJobArn":{ - "shape":"DiscoveryJobArn", - "documentation":"

    The ARN of the discovery job that you started.

    " - } - } - }, "StartTaskExecutionRequest":{ "type":"structure", "required":["TaskArn"], @@ -4708,52 +3635,6 @@ }, "documentation":"

    StartTaskExecutionResponse

    " }, - "StopDiscoveryJobRequest":{ - "type":"structure", - "required":["DiscoveryJobArn"], - "members":{ - "DiscoveryJobArn":{ - "shape":"DiscoveryJobArn", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the discovery job that you want to stop.

    " - } - } - }, - "StopDiscoveryJobResponse":{ - "type":"structure", - "members":{ - } - }, - "StorageSystemArn":{ - "type":"string", - "max":128, - "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:system/storage-system-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" - }, - "StorageSystemConnectivityStatus":{ - "type":"string", - "enum":[ - "PASS", - "FAIL", - "UNKNOWN" - ] - }, - "StorageSystemList":{ - "type":"list", - "member":{"shape":"StorageSystemListEntry"} - }, - "StorageSystemListEntry":{ - "type":"structure", - "members":{ - "StorageSystemArn":{ - "shape":"StorageSystemArn", - "documentation":"

    The Amazon Resource Names (ARN) of an on-premises storage system that you added to DataSync Discovery.

    " - }, - "Name":{ - "shape":"Name", - "documentation":"

    The name of an on-premises storage system that you added to DataSync Discovery.

    " - } - }, - "documentation":"

    Information that identifies an on-premises storage system that you're using with DataSync Discovery.

    " - }, "StorageVirtualMachineArn":{ "type":"string", "max":162, @@ -4806,8 +3687,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -5086,30 +3966,7 @@ "UNAVAILABLE" ] }, - "Throughput":{ - "type":"structure", - "members":{ - "Read":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak throughput related to read operations.

    " - }, - "Write":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak throughput related to write operations.

    " - }, - "Other":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak throughput unrelated to read and write operations.

    " - }, - "Total":{ - "shape":"NonNegativeDouble", - "documentation":"

    Peak total throughput on your on-premises storage system resource.

    " - } - }, - "documentation":"

    The throughput peaks for an on-premises storage system volume. Each data point represents the 95th percentile peak value during a 1-hour interval.

    " - }, "Time":{"type":"timestamp"}, - "Timestamp":{"type":"timestamp"}, "TransferMode":{ "type":"string", "enum":[ @@ -5146,8 +4003,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateAgentRequest":{ "type":"structure", @@ -5166,30 +4022,7 @@ }, "UpdateAgentResponse":{ "type":"structure", - "members":{ - } - }, - "UpdateDiscoveryJobRequest":{ - "type":"structure", - "required":[ - "DiscoveryJobArn", - "CollectionDurationMinutes" - ], - "members":{ - "DiscoveryJobArn":{ - "shape":"DiscoveryJobArn", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the discovery job that you want to update.

    " - }, - "CollectionDurationMinutes":{ - "shape":"CollectionDurationMinutes", - "documentation":"

    Specifies in minutes how long that you want the discovery job to run. (You can't set this parameter to less than the number of minutes that the job has already run for.)

    " - } - } - }, - "UpdateDiscoveryJobResponse":{ - "type":"structure", - "members":{ - } + "members":{} }, "UpdateLocationAzureBlobRequest":{ "type":"structure", @@ -5221,14 +4054,21 @@ }, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container.

    You can specify more than one agent. For more information, see Using multiple agents for your transfer.

    " + "documentation":"

    (Optional) Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect with your Azure Blob Storage container. If you are setting up an agentless cross-cloud transfer, you do not need to specify a value for this parameter.

    You can specify more than one agent. For more information, see Using multiple agents for your transfer.

    You cannot add or remove agents from a storage location after you initially create it.

    " + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

    Specifies configuration information for a DataSync-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

    " + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

    Specifies configuration information for a customer-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

    " } } }, "UpdateLocationAzureBlobResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLocationEfsRequest":{ "type":"structure", @@ -5258,8 +4098,7 @@ }, "UpdateLocationEfsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLocationFsxLustreRequest":{ "type":"structure", @@ -5277,8 +4116,7 @@ }, "UpdateLocationFsxLustreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLocationFsxOntapRequest":{ "type":"structure", @@ -5300,8 +4138,7 @@ }, "UpdateLocationFsxOntapResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLocationFsxOpenZfsRequest":{ "type":"structure", @@ -5320,8 +4157,7 @@ }, "UpdateLocationFsxOpenZfsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLocationFsxWindowsRequest":{ "type":"structure", @@ -5351,8 +4187,7 @@ }, "UpdateLocationFsxWindowsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLocationHdfsRequest":{ "type":"structure", @@ -5400,7 +4235,7 @@ }, "KerberosKeytab":{ "shape":"KerberosKeytabFile", - "documentation":"

    The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted keys. You can load the keytab from a file by providing the file's address. If you use the CLI, it performs base64 encoding for you. Otherwise, provide the base64-encoded text.

    " + "documentation":"

    The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted keys. You can load the keytab from a file by providing the file's address.

    " }, "KerberosKrb5Conf":{ "shape":"KerberosKrb5ConfFile", @@ -5414,8 +4249,7 @@ }, "UpdateLocationHdfsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLocationNfsRequest":{ "type":"structure", @@ -5439,8 +4273,7 @@ }, "UpdateLocationNfsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLocationObjectStorageRequest":{ "type":"structure", @@ -5476,18 +4309,25 @@ }, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

    Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system.

    " + "documentation":"

    (Optional) Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system. If you are setting up an agentless cross-cloud transfer, you do not need to specify a value for this parameter.

    You cannot add or remove agents from a storage location after you initially create it.

    " }, "ServerCertificate":{ "shape":"ObjectStorageCertificate", "documentation":"

    Specifies a certificate chain for DataSync to authenticate with your object storage system if the system uses a private or self-signed certificate authority (CA). You must specify a single .pem file with a full certificate chain (for example, file:///home/user/.ssh/object_storage_certificates.pem).

    The certificate chain might include:

    • The object storage system's certificate

    • All intermediate certificates (if there are any)

    • The root certificate of the signing CA

    You can concatenate your certificates into a .pem file (which can be up to 32768 bytes before base64 encoding). The following example cat command creates an object_storage_certificates.pem file that includes three certificates:

    cat object_server_certificate.pem intermediate_certificate.pem ca_root_certificate.pem > object_storage_certificates.pem

    To use this parameter, configure ServerProtocol to HTTPS.

    Updating this parameter doesn't interfere with tasks that you have in progress.

    " + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

    Specifies configuration information for a DataSync-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

    " + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

    Specifies configuration information for a customer-managed secret, such as an authentication token or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

    " } } }, "UpdateLocationObjectStorageResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLocationS3Request":{ "type":"structure", @@ -5510,8 +4350,7 @@ }, "UpdateLocationS3Response":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLocationSmbRequest":{ "type":"structure", @@ -5560,7 +4399,7 @@ }, "KerberosKeytab":{ "shape":"KerberosKeytabFile", - "documentation":"

    Specifies your Kerberos key table (keytab) file, which includes mappings between your Kerberos principal and encryption keys.

    The file must be base64 encoded. If you're using the CLI, the encoding is done for you.

    To avoid task execution errors, make sure that the Kerberos principal that you use to create the keytab file matches exactly what you specify for KerberosPrincipal.

    " + "documentation":"

    Specifies your Kerberos key table (keytab) file, which includes mappings between your Kerberos principal and encryption keys.

    To avoid task execution errors, make sure that the Kerberos principal that you use to create the keytab file matches exactly what you specify for KerberosPrincipal.

    " }, "KerberosKrb5Conf":{ "shape":"KerberosKrb5ConfFile", @@ -5570,49 +4409,13 @@ }, "UpdateLocationSmbResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateSmbDomain":{ "type":"string", "max":253, "pattern":"^([A-Za-z0-9]((\\.|-+)?[A-Za-z0-9]){0,252})?$" }, - "UpdateStorageSystemRequest":{ - "type":"structure", - "required":["StorageSystemArn"], - "members":{ - "StorageSystemArn":{ - "shape":"StorageSystemArn", - "documentation":"

    Specifies the ARN of the on-premises storage system that you want reconfigure.

    " - }, - "ServerConfiguration":{ - "shape":"DiscoveryServerConfiguration", - "documentation":"

    Specifies the server name and network port required to connect with your on-premises storage system's management interface.

    " - }, - "AgentArns":{ - "shape":"DiscoveryAgentArnList", - "documentation":"

    Specifies the Amazon Resource Name (ARN) of the DataSync agent that connects to and reads your on-premises storage system. You can only specify one ARN.

    " - }, - "Name":{ - "shape":"Name", - "documentation":"

    Specifies a familiar name for your on-premises storage system.

    " - }, - "CloudWatchLogGroupArn":{ - "shape":"LogGroupArn", - "documentation":"

    Specifies the ARN of the Amazon CloudWatch log group for monitoring and logging discovery job events.

    " - }, - "Credentials":{ - "shape":"Credentials", - "documentation":"

    Specifies the user name and password for accessing your on-premises storage system's management interface.

    " - } - } - }, - "UpdateStorageSystemResponse":{ - "type":"structure", - "members":{ - } - }, "UpdateTaskExecutionRequest":{ "type":"structure", "required":[ @@ -5629,8 +4432,7 @@ }, "UpdateTaskExecutionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateTaskRequest":{ "type":"structure", @@ -5674,8 +4476,7 @@ }, "UpdateTaskResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdatedEfsAccessPointArn":{ "type":"string", @@ -5702,5 +4503,5 @@ "long":{"type":"long"}, "string":{"type":"string"} }, - "documentation":"DataSync

    DataSync is an online data movement and discovery service that simplifies data migration and helps you quickly, easily, and securely transfer your file or object data to, from, and between Amazon Web Services storage services.

    This API interface reference includes documentation for using DataSync programmatically. For complete information, see the DataSync User Guide .

    " + "documentation":"DataSync

    DataSync is an online data movement service that simplifies data migration and helps you quickly, easily, and securely transfer your file or object data to, from, and between Amazon Web Services storage services.

    This API interface reference includes documentation for using DataSync programmatically. For complete information, see the DataSync User Guide .

    " } diff --git a/services/datazone/pom.xml b/services/datazone/pom.xml index e35c1d234a53..b857b4400e0f 100644 --- a/services/datazone/pom.xml +++ b/services/datazone/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT datazone AWS Java SDK :: Services :: Data Zone diff --git a/services/datazone/src/main/resources/codegen-resources/customization.config b/services/datazone/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/datazone/src/main/resources/codegen-resources/customization.config +++ b/services/datazone/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/datazone/src/main/resources/codegen-resources/service-2.json b/services/datazone/src/main/resources/codegen-resources/service-2.json index 5c4f47a75d34..d06628e74619 100644 --- a/services/datazone/src/main/resources/codegen-resources/service-2.json +++ b/services/datazone/src/main/resources/codegen-resources/service-2.json @@ -21987,6 +21987,10 @@ "location":"uri", "locationName":"domainIdentifier" }, + "domainUnitId":{ + "shape":"DomainUnitId", + "documentation":"

    The ID of the domain unit.

    " + }, "environmentDeploymentDetails":{ "shape":"EnvironmentDeploymentDetails", "documentation":"

    The environment deployment details of the project.

    " diff --git a/services/dax/pom.xml b/services/dax/pom.xml index f72282a546a3..25672cba0575 100644 --- a/services/dax/pom.xml +++ b/services/dax/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT dax AWS Java SDK :: Services :: Amazon DynamoDB Accelerator (DAX) diff --git a/services/dax/src/main/resources/codegen-resources/customization.config b/services/dax/src/main/resources/codegen-resources/customization.config index b80e3a7bfa99..4f0fb398e0e7 100644 --- a/services/dax/src/main/resources/codegen-resources/customization.config +++ b/services/dax/src/main/resources/codegen-resources/customization.config @@ -6,6 +6,5 @@ "describeParameterGroups", "describeSubnetGroups" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/deadline/pom.xml b/services/deadline/pom.xml index 6715f63cc557..1ab6eab3b22b 100644 --- a/services/deadline/pom.xml +++ b/services/deadline/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT deadline AWS Java SDK :: Services :: Deadline diff --git a/services/deadline/src/main/resources/codegen-resources/customization.config b/services/deadline/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/deadline/src/main/resources/codegen-resources/customization.config +++ b/services/deadline/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/deadline/src/main/resources/codegen-resources/service-2.json b/services/deadline/src/main/resources/codegen-resources/service-2.json index 902c43dd3db5..bcef824293a6 100644 --- a/services/deadline/src/main/resources/codegen-resources/service-2.json +++ b/services/deadline/src/main/resources/codegen-resources/service-2.json @@ -498,7 +498,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"} ], - "documentation":"

    Creates a worker. A worker tells your instance how much processing power (vCPU), and memory (GiB) you’ll need to assemble the digital assets held within a particular instance. You can specify certain instance types to use, or let the worker know which instances types to exclude.

    ", + "documentation":"

    Creates a worker. A worker tells your instance how much processing power (vCPU), and memory (GiB) you’ll need to assemble the digital assets held within a particular instance. You can specify certain instance types to use, or let the worker know which instances types to exclude.

    Deadline Cloud limits the number of workers to less than or equal to the fleet's maximum worker count. The service maintains eventual consistency for the worker count. If you make multiple rapid calls to CreateWorker before the field updates, you might exceed your fleet's maximum worker count. For example, if your maxWorkerCount is 10 and you currently have 9 workers, making two quick CreateWorker calls might successfully create 2 workers instead of 1, resulting in 11 total workers.

    ", "endpoint":{"hostPrefix":"scheduling."}, "idempotent":true }, @@ -2286,11 +2286,11 @@ "members":{ "name":{ "shape":"AcceleratorName", - "documentation":"

    The name of the chip used by the GPU accelerator.

    If you specify l4 as the name of the accelerator, you must specify latest or grid:r550 as the runtime.

    The available GPU accelerators are:

    • t4 - NVIDIA T4 Tensor Core GPU

    • a10g - NVIDIA A10G Tensor Core GPU

    • l4 - NVIDIA L4 Tensor Core GPU

    • l40s - NVIDIA L40S Tensor Core GPU

    " + "documentation":"

    The name of the chip used by the GPU accelerator.

    If you specify l4 as the name of the accelerator, you must specify latest or grid:r570 as the runtime.

    The available GPU accelerators are:

    • t4 - NVIDIA T4 Tensor Core GPU

    • a10g - NVIDIA A10G Tensor Core GPU

    • l4 - NVIDIA L4 Tensor Core GPU

    • l40s - NVIDIA L40S Tensor Core GPU

    " }, "runtime":{ "shape":"AcceleratorRuntime", - "documentation":"

    Specifies the runtime driver to use for the GPU accelerator. You must use the same runtime for all GPUs.

    You can choose from the following runtimes:

    • latest - Use the latest runtime available for the chip. If you specify latest and a new version of the runtime is released, the new version of the runtime is used.

    • grid:r550 - NVIDIA vGPU software 17

    • grid:r535 - NVIDIA vGPU software 16

    If you don't specify a runtime, Deadline Cloud uses latest as the default. However, if you have multiple accelerators and specify latest for some and leave others blank, Deadline Cloud raises an exception.

    " + "documentation":"

    Specifies the runtime driver to use for the GPU accelerator. You must use the same runtime for all GPUs.

    You can choose from the following runtimes:

    • latest - Use the latest runtime available for the chip. If you specify latest and a new version of the runtime is released, the new version of the runtime is used.

    • grid:r570 - NVIDIA vGPU software 18

    • grid:r535 - NVIDIA vGPU software 16

    If you don't specify a runtime, Deadline Cloud uses latest as the default. However, if you have multiple accelerators and specify latest for some and leave others blank, Deadline Cloud raises an exception.

    " } }, "documentation":"

    Describes a specific GPU accelerator required for an Amazon Elastic Compute Cloud worker host.

    " @@ -3465,7 +3465,7 @@ }, "maxWorkerCount":{ "shape":"MinZeroMaxInteger", - "documentation":"

    The maximum number of workers for the fleet.

    " + "documentation":"

    The maximum number of workers for the fleet.

    Deadline Cloud limits the number of workers to less than or equal to the fleet's maximum worker count. The service maintains eventual consistency for the worker count. If you make multiple rapid calls to CreateWorker before the field updates, you might exceed your fleet's maximum worker count. For example, if your maxWorkerCount is 10 and you currently have 9 workers, making two quick CreateWorker calls might successfully create 2 workers instead of 1, resulting in 11 total workers.

    " }, "configuration":{ "shape":"FleetConfiguration", @@ -3474,6 +3474,10 @@ "tags":{ "shape":"Tags", "documentation":"

    Each tag consists of a tag key and a tag value. Tag keys and values are both required, but tag values can be empty strings.

    " + }, + "hostConfiguration":{ + "shape":"HostConfiguration", + "documentation":"

    Provides a script that runs as a worker is starting up that you can use to provide additional configuration for workers in your fleet.

    " } } }, @@ -5454,7 +5458,7 @@ }, "status":{ "shape":"FleetStatus", - "documentation":"

    The Auto Scaling status of the fleet.

    " + "documentation":"

    The status of the fleet.

    " }, "autoScalingStatus":{ "shape":"AutoScalingStatus", @@ -5480,6 +5484,10 @@ "shape":"FleetConfiguration", "documentation":"

    The configuration setting for the fleet.

    " }, + "hostConfiguration":{ + "shape":"HostConfiguration", + "documentation":"

    The script that runs as a worker is starting up that you can use to provide additional configuration for workers in your fleet.

    " + }, "capabilities":{ "shape":"FleetCapabilities", "documentation":"

    Outlines what the fleet is capable of for minimums, maximums, and naming, in addition to attribute names and values.

    " @@ -5625,6 +5633,10 @@ "shape":"TaskRunStatusCounts", "documentation":"

    The number of tasks running on the job.

    " }, + "taskFailureRetryCount":{ + "shape":"TaskFailureRetryCount", + "documentation":"

    The total number of times tasks from the job failed and were retried.

    " + }, "storageProfileId":{ "shape":"StorageProfileId", "documentation":"

    The storage profile ID associated with the job.

    " @@ -6498,6 +6510,10 @@ "shape":"TaskRunStatusCounts", "documentation":"

    The number of tasks running on the job.

    " }, + "taskFailureRetryCount":{ + "shape":"TaskFailureRetryCount", + "documentation":"

    The total number of times tasks from the step failed and were retried.

    " + }, "targetTaskRunStatus":{ "shape":"StepTargetTaskRunStatus", "documentation":"

    The task status with which the job started.

    " @@ -6845,6 +6861,33 @@ } } }, + "HostConfiguration":{ + "type":"structure", + "required":["scriptBody"], + "members":{ + "scriptBody":{ + "shape":"HostConfigurationScript", + "documentation":"

    The text of the script that runs as a worker is starting up that you can use to provide additional configuration for workers in your fleet. The script runs after a worker enters the STARTING state and before the worker processes tasks.

    For more information about using the script, see Run scripts as an administrator to configure workers in the Deadline Cloud Developer Guide.

    The script runs as an administrative user (sudo root on Linux, as an Administrator on Windows).

    " + }, + "scriptTimeoutSeconds":{ + "shape":"HostConfigurationScriptTimeoutSeconds", + "documentation":"

    The maximum time that the host configuration can run. If the timeout expires, the worker enters the NOT RESPONDING state and shuts down. You are charged for the time that the worker is running the host configuration script.

    You should configure your fleet for a maximum of one worker while testing your host configuration script to avoid starting additional workers.

    The default is 300 seconds (5 minutes).

    " + } + }, + "documentation":"

    Provides a script that runs as a worker is starting up that you can use to provide additional configuration for workers in your fleet.

    To remove a script from a fleet, use the UpdateFleet operation with the hostConfiguration scriptBody parameter set to an empty string (\"\").

    " + }, + "HostConfigurationScript":{ + "type":"string", + "max":15000, + "min":0, + "sensitive":true + }, + "HostConfigurationScriptTimeoutSeconds":{ + "type":"integer", + "box":true, + "max":3600, + "min":300 + }, "HostName":{ "type":"string", "pattern":"[a-zA-Z0-9_\\.\\-]{0,255}" @@ -7375,6 +7418,10 @@ "shape":"TaskRunStatusCounts", "documentation":"

    The number of tasks running on the job.

    " }, + "taskFailureRetryCount":{ + "shape":"TaskFailureRetryCount", + "documentation":"

    The total number of times tasks from the job failed and were retried.

    " + }, "priority":{ "shape":"JobPriority", "documentation":"

    The job priority.

    " @@ -7490,6 +7537,10 @@ "shape":"TaskRunStatusCounts", "documentation":"

    The number of tasks running on the job.

    " }, + "taskFailureRetryCount":{ + "shape":"TaskFailureRetryCount", + "documentation":"

    The total number of times tasks from the job failed and were retried.

    " + }, "maxFailedTasksCount":{ "shape":"MaxFailedTasksCount", "documentation":"

    The number of task failures before the job stops running and is marked as FAILED.

    " @@ -9940,7 +9991,7 @@ "documentation":"

    The operators to include in the search.

    " } }, - "documentation":"

    The filter expression, AND or OR, to use when searching among a group of search strings in a resource.

    You can use two groupings per search each within parenthesis ().

    " + "documentation":"

    The filter expression, AND or OR, to use when searching among a group of search strings in a resource. You can use two groupings per search each within parenthesis ().

    " }, "SearchJobsRequest":{ "type":"structure", @@ -9962,7 +10013,7 @@ }, "filterExpressions":{ "shape":"SearchGroupedFilterExpressions", - "documentation":"

    The filter expression, AND or OR, to use when searching among a group of search strings in a resource.

    You can use two groupings per search each within parenthesis ().

    " + "documentation":"

    The filter expression, AND or OR, to use when searching among a group of search strings in a resource. You can use two groupings per search each within parenthesis ().

    " }, "sortExpressions":{ "shape":"SearchSortExpressions", @@ -10066,7 +10117,7 @@ }, "filterExpressions":{ "shape":"SearchGroupedFilterExpressions", - "documentation":"

    The filter expression, AND or OR, to use when searching among a group of search strings in a resource.

    You can use two groupings per search each within parenthesis ().

    " + "documentation":"

    The filter expression, AND or OR, to use when searching among a group of search strings in a resource. You can use two groupings per search each within parenthesis ().

    " }, "sortExpressions":{ "shape":"SearchSortExpressions", @@ -10145,7 +10196,7 @@ }, "filterExpressions":{ "shape":"SearchGroupedFilterExpressions", - "documentation":"

    The filter expression, AND or OR, to use when searching among a group of search strings in a resource.

    You can use two groupings per search each within parenthesis ().

    " + "documentation":"

    The filter expression, AND or OR, to use when searching among a group of search strings in a resource. You can use two groupings per search each within parenthesis ().

    " }, "sortExpressions":{ "shape":"SearchSortExpressions", @@ -10247,7 +10298,7 @@ }, "filterExpressions":{ "shape":"SearchGroupedFilterExpressions", - "documentation":"

    The filter expression, AND or OR, to use when searching among a group of search strings in a resource.

    You can use two groupings per search each within parenthesis ().

    " + "documentation":"

    The filter expression, AND or OR, to use when searching among a group of search strings in a resource. You can use two groupings per search each within parenthesis ().

    " }, "sortExpressions":{ "shape":"SearchSortExpressions", @@ -10324,6 +10375,10 @@ "instanceMarketOptions":{ "shape":"ServiceManagedEc2InstanceMarketOptions", "documentation":"

    The Amazon EC2 market type.

    " + }, + "storageProfileId":{ + "shape":"StorageProfileId", + "documentation":"

    The storage profile ID.

    " } }, "documentation":"

    The configuration details for a service managed Amazon EC2 fleet.

    " @@ -10445,7 +10500,8 @@ "type":"string", "enum":[ "SERVICE_QUOTA_EXCEEDED_EXCEPTION", - "KMS_KEY_LIMIT_EXCEEDED" + "KMS_KEY_LIMIT_EXCEEDED", + "DEPENDENCY_LIMIT_EXCEEDED" ] }, "SessionActionDefinition":{ @@ -11150,6 +11206,10 @@ "shape":"TaskRunStatusCounts", "documentation":"

    The number of tasks running on the job.

    " }, + "taskFailureRetryCount":{ + "shape":"TaskFailureRetryCount", + "documentation":"

    The total number of times tasks from the step failed and were retried.

    " + }, "createdAt":{ "shape":"CreatedAt", "documentation":"

    The date and time the resource was created.

    " @@ -11209,6 +11269,10 @@ "shape":"TaskRunStatusCounts", "documentation":"

    The number of tasks running on the job.

    " }, + "taskFailureRetryCount":{ + "shape":"TaskFailureRetryCount", + "documentation":"

    The total number of times tasks from the step failed and were retried.

    " + }, "targetTaskRunStatus":{ "shape":"StepTargetTaskRunStatus", "documentation":"

    The task status to start with on the job.

    " @@ -11394,6 +11458,12 @@ "key":{"shape":"String"}, "value":{"shape":"String"} }, + "TaskFailureRetryCount":{ + "type":"integer", + "box":true, + "max":2147483647, + "min":0 + }, "TaskId":{ "type":"string", "pattern":"task-[0-9a-f]{32}-(0|([1-9][0-9]{0,9}))" @@ -11828,11 +11898,15 @@ }, "maxWorkerCount":{ "shape":"MinZeroMaxInteger", - "documentation":"

    The maximum number of workers in the fleet.

    " + "documentation":"

    The maximum number of workers in the fleet.

    Deadline Cloud limits the number of workers to less than or equal to the fleet's maximum worker count. The service maintains eventual consistency for the worker count. If you make multiple rapid calls to CreateWorker before the field updates, you might exceed your fleet's maximum worker count. For example, if your maxWorkerCount is 10 and you currently have 9 workers, making two quick CreateWorker calls might successfully create 2 workers instead of 1, resulting in 11 total workers.

    " }, "configuration":{ "shape":"FleetConfiguration", "documentation":"

    The fleet configuration to update.

    " + }, + "hostConfiguration":{ + "shape":"HostConfiguration", + "documentation":"

    Provides a script that runs as a worker is starting up that you can use to provide additional configuration for workers in your fleet.

    " } } }, @@ -12451,6 +12525,10 @@ "log":{ "shape":"LogConfiguration", "documentation":"

    The worker log to update.

    " + }, + "hostConfiguration":{ + "shape":"HostConfiguration", + "documentation":"

    The script that runs as a worker is starting up that you can use to provide additional configuration for workers in your fleet.

    " } } }, diff --git a/services/detective/pom.xml b/services/detective/pom.xml index 4de2345f6c26..c1770b7bfecb 100644 --- a/services/detective/pom.xml +++ b/services/detective/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT detective AWS Java SDK :: Services :: Detective diff --git a/services/detective/src/main/resources/codegen-resources/customization.config b/services/detective/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/detective/src/main/resources/codegen-resources/customization.config +++ b/services/detective/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/devicefarm/pom.xml b/services/devicefarm/pom.xml index 63fb744c3eae..b44c44f8d641 100644 --- a/services/devicefarm/pom.xml +++ b/services/devicefarm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT devicefarm AWS Java SDK :: Services :: AWS Device Farm diff --git a/services/devicefarm/src/main/resources/codegen-resources/customization.config b/services/devicefarm/src/main/resources/codegen-resources/customization.config index d3bad611c8a5..34218832ad4b 100644 --- a/services/devicefarm/src/main/resources/codegen-resources/customization.config +++ b/services/devicefarm/src/main/resources/codegen-resources/customization.config @@ -15,6 +15,5 @@ "renewOffering", "listVPCEConfigurations" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/devopsguru/pom.xml b/services/devopsguru/pom.xml index 4ed957efacc0..77bc4575f9a1 100644 --- a/services/devopsguru/pom.xml +++ b/services/devopsguru/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT devopsguru AWS Java SDK :: Services :: Dev Ops Guru diff --git a/services/devopsguru/src/main/resources/codegen-resources/customization.config b/services/devopsguru/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/devopsguru/src/main/resources/codegen-resources/customization.config +++ b/services/devopsguru/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/directconnect/pom.xml b/services/directconnect/pom.xml index 8be80babdc68..c634ff2f760e 100644 --- a/services/directconnect/pom.xml +++ b/services/directconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT directconnect AWS Java SDK :: Services :: AWS Direct Connect diff --git a/services/directconnect/src/main/resources/codegen-resources/customization.config b/services/directconnect/src/main/resources/codegen-resources/customization.config index d51f17191753..747a898ef2d7 100644 --- a/services/directconnect/src/main/resources/codegen-resources/customization.config +++ b/services/directconnect/src/main/resources/codegen-resources/customization.config @@ -20,6 +20,5 @@ "DescribeConnectionsOnInterconnect", "DescribeInterconnectLoa" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/directory/pom.xml b/services/directory/pom.xml index afe74be49146..788476236893 100644 --- a/services/directory/pom.xml +++ b/services/directory/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT directory AWS Java SDK :: Services :: AWS Directory Service diff --git a/services/directory/src/main/resources/codegen-resources/customization.config b/services/directory/src/main/resources/codegen-resources/customization.config index b28b4739170b..4136b1085a58 100644 --- a/services/directory/src/main/resources/codegen-resources/customization.config +++ b/services/directory/src/main/resources/codegen-resources/customization.config @@ -216,6 +216,5 @@ ] } }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/directoryservicedata/pom.xml b/services/directoryservicedata/pom.xml index f77a77229977..479c10c67fff 100644 --- a/services/directoryservicedata/pom.xml +++ b/services/directoryservicedata/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT directoryservicedata AWS Java SDK :: Services :: Directory Service Data diff --git a/services/directoryservicedata/src/main/resources/codegen-resources/customization.config b/services/directoryservicedata/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/directoryservicedata/src/main/resources/codegen-resources/customization.config +++ b/services/directoryservicedata/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/dlm/pom.xml b/services/dlm/pom.xml index 146d7362855f..4daadee90bd2 100644 --- a/services/dlm/pom.xml +++ b/services/dlm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT dlm AWS Java SDK :: Services :: DLM diff --git a/services/dlm/src/main/resources/codegen-resources/customization.config b/services/dlm/src/main/resources/codegen-resources/customization.config index 72eff5158fd7..108e3431d2ec 100644 --- a/services/dlm/src/main/resources/codegen-resources/customization.config +++ b/services/dlm/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "getLifecyclePolicies" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/docdb/pom.xml b/services/docdb/pom.xml index e6994a73249f..582fd4700931 100644 --- a/services/docdb/pom.xml +++ b/services/docdb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT docdb AWS Java SDK :: Services :: DocDB diff --git a/services/docdbelastic/pom.xml b/services/docdbelastic/pom.xml index 3208d31b6db9..a3e5feab1a53 100644 --- a/services/docdbelastic/pom.xml +++ b/services/docdbelastic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT docdbelastic AWS Java SDK :: Services :: Doc DB Elastic diff --git a/services/docdbelastic/src/main/resources/codegen-resources/customization.config b/services/docdbelastic/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/docdbelastic/src/main/resources/codegen-resources/customization.config +++ b/services/docdbelastic/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/drs/pom.xml b/services/drs/pom.xml index 63f464c85475..775aa70a5cc9 100644 --- a/services/drs/pom.xml +++ b/services/drs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT drs AWS Java SDK :: Services :: Drs diff --git a/services/drs/src/main/resources/codegen-resources/customization.config b/services/drs/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/drs/src/main/resources/codegen-resources/customization.config +++ b/services/drs/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/dsql/pom.xml b/services/dsql/pom.xml index f17559a17951..cd65528ef300 100644 --- a/services/dsql/pom.xml +++ b/services/dsql/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT dsql AWS Java SDK :: Services :: DSQL diff --git a/services/dsql/src/main/resources/codegen-resources/customization.config b/services/dsql/src/main/resources/codegen-resources/customization.config index 0b399df425ac..a63c69ef3dd5 100644 --- a/services/dsql/src/main/resources/codegen-resources/customization.config +++ b/services/dsql/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ "createMethodParams": [ "clientConfiguration" ] - }, - "enableFastUnmarshaller": true + } } diff --git a/services/dsql/src/main/resources/codegen-resources/service-2.json b/services/dsql/src/main/resources/codegen-resources/service-2.json index 0f0aeed47c38..6ba7b5378c8d 100644 --- a/services/dsql/src/main/resources/codegen-resources/service-2.json +++ b/services/dsql/src/main/resources/codegen-resources/service-2.json @@ -25,31 +25,12 @@ "errors":[ {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"}, - {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, - {"shape":"InternalServerException"}, - {"shape":"ConflictException"} - ], - "documentation":"

    Creates a cluster in Amazon Aurora DSQL.

    " - }, - "CreateMultiRegionClusters":{ - "name":"CreateMultiRegionClusters", - "http":{ - "method":"POST", - "requestUri":"/multi-region-clusters", - "responseCode":200 - }, - "input":{"shape":"CreateMultiRegionClustersInput"}, - "output":{"shape":"CreateMultiRegionClustersOutput"}, - "errors":[ - {"shape":"ServiceQuotaExceededException"}, - {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, - {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

    Creates multi-Region clusters in Amazon Aurora DSQL. Multi-Region clusters require a linked Region list, which is an array of the Regions in which you want to create linked clusters. Multi-Region clusters require a witness Region, which participates in quorum in failure scenarios.

    " + "documentation":"

    The CreateCluster API allows you to create both single-region clusters and multi-Region clusters. With the addition of the multiRegionProperties parameter, you can create a cluster with witness Region support and establish peer relationships with clusters in other Regions during creation.

    Creating multi-Region clusters requires additional IAM permissions beyond those needed for single-Region clusters, as detailed in the Required permissions section below.

    Required permissions

    dsql:CreateCluster

    Required to create a cluster.

    Resources: arn:aws:dsql:region:account-id:cluster/*

    dsql:TagResource

    Permission to add tags to a resource.

    Resources: arn:aws:dsql:region:account-id:cluster/*

    dsql:PutMultiRegionProperties

    Permission to configure multi-region properties for a cluster.

    Resources: arn:aws:dsql:region:account-id:cluster/*

    dsql:AddPeerCluster

    When specifying multiRegionProperties.clusters, permission to add peer clusters.

    Resources:

    • Local cluster: arn:aws:dsql:region:account-id:cluster/*

    • Each peer cluster: exact ARN of each specified peer cluster

    dsql:PutWitnessRegion

    When specifying multiRegionProperties.witnessRegion, permission to set a witness Region. This permission is checked both in the cluster Region and in the witness Region.

    Resources: arn:aws:dsql:region:account-id:cluster/*

    Condition Keys: dsql:WitnessRegion (matching the specified witness region)

    • The witness Region specified in multiRegionProperties.witnessRegion cannot be the same as the cluster's Region.

    " }, "DeleteCluster":{ "name":"DeleteCluster", @@ -71,25 +52,6 @@ "documentation":"

    Deletes a cluster in Amazon Aurora DSQL.

    ", "idempotent":true }, - "DeleteMultiRegionClusters":{ - "name":"DeleteMultiRegionClusters", - "http":{ - "method":"DELETE", - "requestUri":"/multi-region-clusters", - "responseCode":200 - }, - "input":{"shape":"DeleteMultiRegionClustersInput"}, - "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"AccessDeniedException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ConflictException"} - ], - "documentation":"

    Deletes a multi-Region cluster in Amazon Aurora DSQL.

    ", - "idempotent":true - }, "GetCluster":{ "name":"GetCluster", "http":{ @@ -210,13 +172,13 @@ "output":{"shape":"UpdateClusterOutput"}, "errors":[ {"shape":"ThrottlingException"}, - {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], - "documentation":"

    Updates a cluster.

    " + "documentation":"

    The UpdateCluster API allows you to modify both single-Region and multi-Region cluster configurations. With the multiRegionProperties parameter, you can add or modify witness Region support and manage peer relationships with clusters in other Regions.

    Note that updating multi-region clusters requires additional IAM permissions beyond those needed for standard cluster updates, as detailed in the Permissions section.

    Required permissions

    dsql:UpdateCluster

    Permission to update a DSQL cluster.

    Resources: arn:aws:dsql:region:account-id:cluster/cluster-id

    dsql:PutMultiRegionProperties

    Permission to configure multi-Region properties for a cluster.

    Resources: arn:aws:dsql:region:account-id:cluster/cluster-id

    dsql:GetCluster

    Permission to retrieve cluster information.

    Resources: arn:aws:dsql:region:account-id:cluster/cluster-id

    dsql:AddPeerCluster

    Permission to add peer clusters.

    Resources:

    • Local cluster: arn:aws:dsql:region:account-id:cluster/cluster-id

    • Each peer cluster: exact ARN of each specified peer cluster

    dsql:RemovePeerCluster

    Permission to remove peer clusters. The dsql:RemovePeerCluster permission uses a wildcard ARN pattern to simplify permission management during updates.

    Resources: arn:aws:dsql:*:account-id:cluster/*

    dsql:PutWitnessRegion

    Permission to set a witness Region.

    Resources: arn:aws:dsql:region:account-id:cluster/cluster-id

    Condition Keys: dsql:WitnessRegion (matching the specified witness Region)

    This permission is checked both in the cluster Region and in the witness Region.

    • The witness region specified in multiRegionProperties.witnessRegion cannot be the same as the cluster's Region.

    • When updating clusters with peer relationships, permissions are checked for both adding and removing peers.

    • The dsql:RemovePeerCluster permission uses a wildcard ARN pattern to simplify permission management during updates.

    " } }, "shapes":{ @@ -235,57 +197,56 @@ }, "Arn":{ "type":"string", - "documentation":"

    Amazon Resource Name

    ", + "documentation":"

    Amazon Resource Name.

    ", "max":1011, "min":1, "pattern":"arn:.+" }, "ClientToken":{ "type":"string", - "documentation":"

    Idempotency Token

    ", + "documentation":"

    Idempotency token so a request is only processed once.

    ", "max":128, "min":1, "pattern":"[!-~]+" }, "ClusterArn":{ "type":"string", - "documentation":"

    Cluster ARN

    " + "documentation":"

    The Amazon Resource Name of the cluster.

    ", + "pattern":"arn:aws(-[^:]+)?:dsql:[a-z0-9-]{1,20}:[0-9]{12}:cluster/[a-z0-9]{26}" }, "ClusterArnList":{ "type":"list", "member":{"shape":"ClusterArn"}, - "documentation":"

    List of cluster arns

    " + "documentation":"

    A list of the Amazon Resource Names of the cluster.

    " }, "ClusterCreationTime":{ "type":"timestamp", - "documentation":"

    Timestamp when the Cluster was created

    " + "documentation":"

    The timestamp when the cluster was created.

    " }, "ClusterId":{ "type":"string", - "documentation":"

    The ID of the cluster

    ", + "documentation":"

    The ID of the cluster.

    ", "pattern":"[a-z0-9]{26}" }, "ClusterList":{ "type":"list", "member":{"shape":"ClusterSummary"}, - "documentation":"

    List of clusters

    " - }, - "ClusterPropertyMap":{ - "type":"map", - "key":{"shape":"Region"}, - "value":{"shape":"LinkedClusterProperties"}, - "documentation":"

    Properties for each linked cluster

    " + "documentation":"

    The list of clusters.

    " }, "ClusterStatus":{ "type":"string", - "documentation":"

    Cluster Status

    ", + "documentation":"

    The current status of a cluster.

    ", "enum":[ "CREATING", "ACTIVE", + "IDLE", + "INACTIVE", "UPDATING", "DELETING", "DELETED", - "FAILED" + "FAILED", + "PENDING_SETUP", + "PENDING_DELETE" ] }, "ClusterSummary":{ @@ -334,6 +295,10 @@ "shape":"DeletionProtectionEnabled", "documentation":"

    If enabled, you can't delete your cluster. You must first disable this property before you can delete your cluster.

    " }, + "kmsEncryptionKey":{ + "shape":"KmsEncryptionKey", + "documentation":"

    The KMS key that encrypts and protects the data on your cluster. You can specify the ARN, ID, or alias of an existing key or have Amazon Web Services create a default key for you.

    " + }, "tags":{ "shape":"TagMap", "documentation":"

    A map of key and value pairs to use to tag your cluster.

    " @@ -342,6 +307,10 @@ "shape":"ClientToken", "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, the subsequent retries with the same client token return the result from the original successful request and they have no additional effect.

    If you don't specify a client token, the Amazon Web Services SDK automatically generates one.

    ", "idempotencyToken":true + }, + "multiRegionProperties":{ + "shape":"MultiRegionProperties", + "documentation":"

    The configuration settings when creating a multi-Region cluster, including the witness region and linked cluster properties.

    " } } }, @@ -371,48 +340,20 @@ "shape":"ClusterCreationTime", "documentation":"

    The time of when created the cluster.

    " }, + "multiRegionProperties":{ + "shape":"MultiRegionProperties", + "documentation":"

    The multi-Region cluster configuration details that were set during cluster creation

    " + }, + "encryptionDetails":{ + "shape":"EncryptionDetails", + "documentation":"

    The encryption configuration for the cluster that was specified during the creation process, including the KMS key identifier and encryption state.

    " + }, "deletionProtectionEnabled":{ "shape":"DeletionProtectionEnabled", "documentation":"

    Whether deletion protection is enabled on this cluster.

    " } }, - "documentation":"

    Output Mixin

    " - }, - "CreateMultiRegionClustersInput":{ - "type":"structure", - "required":[ - "linkedRegionList", - "witnessRegion" - ], - "members":{ - "linkedRegionList":{ - "shape":"RegionList", - "documentation":"

    An array of the Regions in which you want to create additional clusters.

    " - }, - "clusterProperties":{ - "shape":"ClusterPropertyMap", - "documentation":"

    A mapping of properties to use when creating linked clusters.

    " - }, - "witnessRegion":{ - "shape":"Region", - "documentation":"

    The witness Region of multi-Region clusters.

    " - }, - "clientToken":{ - "shape":"ClientToken", - "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully. The subsequent retries with the same client token return the result from the original successful request and they have no additional effect.

    If you don't specify a client token, the Amazon Web Services SDK automatically generates one.

    ", - "idempotencyToken":true - } - } - }, - "CreateMultiRegionClustersOutput":{ - "type":"structure", - "required":["linkedClusterArns"], - "members":{ - "linkedClusterArns":{ - "shape":"ClusterArnList", - "documentation":"

    An array that contains the ARNs of all linked clusters.

    " - } - } + "documentation":"

    The output of a created cluster.

    " }, "DeleteClusterInput":{ "type":"structure", @@ -439,8 +380,7 @@ "identifier", "arn", "status", - "creationTime", - "deletionProtectionEnabled" + "creationTime" ], "members":{ "identifier":{ @@ -458,37 +398,52 @@ "creationTime":{ "shape":"ClusterCreationTime", "documentation":"

    The time of when the cluster was created.

    " - }, - "deletionProtectionEnabled":{ - "shape":"DeletionProtectionEnabled", - "documentation":"

    Specifies whether deletion protection was enabled on the cluster.

    " } }, - "documentation":"

    Output Mixin

    " + "documentation":"

    The output from a deleted cluster.

    " + }, + "DeletionProtectionEnabled":{ + "type":"boolean", + "documentation":"

    Indicates whether deletion protection is enabled for a cluster.

    ", + "box":true }, - "DeleteMultiRegionClustersInput":{ + "EncryptionDetails":{ "type":"structure", - "required":["linkedClusterArns"], + "required":[ + "encryptionType", + "encryptionStatus" + ], "members":{ - "linkedClusterArns":{ - "shape":"ClusterArnList", - "documentation":"

    The ARNs of the clusters linked to the cluster you want to delete. also deletes these clusters as part of the operation.

    ", - "location":"querystring", - "locationName":"linked-cluster-arns" + "encryptionType":{ + "shape":"EncryptionType", + "documentation":"

    The type of encryption that protects the data on your cluster.

    " }, - "clientToken":{ - "shape":"ClientToken", - "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully. The subsequent retries with the same client token return the result from the original successful request and they have no additional effect.

    If you don't specify a client token, the Amazon Web Services SDK automatically generates one.

    ", - "idempotencyToken":true, - "location":"querystring", - "locationName":"client-token" + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

    The ARN of the KMS key that encrypts data in the cluster.

    " + }, + "encryptionStatus":{ + "shape":"EncryptionStatus", + "documentation":"

    The status of encryption for the cluster.

    " } - } + }, + "documentation":"

    Configuration details about encryption for the cluster including the KMS key ARN, encryption type, and encryption status.

    " }, - "DeletionProtectionEnabled":{ - "type":"boolean", - "documentation":"

    Deletion Protection

    ", - "box":true + "EncryptionStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "UPDATING", + "KMS_KEY_INACCESSIBLE", + "ENABLING" + ] + }, + "EncryptionType":{ + "type":"string", + "enum":[ + "AWS_OWNED_KMS_KEY", + "CUSTOMER_MANAGED_KMS_KEY" + ] }, "GetClusterInput":{ "type":"structure", @@ -532,16 +487,17 @@ "shape":"DeletionProtectionEnabled", "documentation":"

    Whether deletion protection is enabled in this cluster.

    " }, - "witnessRegion":{ - "shape":"Region", - "documentation":"

    The witness Region of the cluster. Applicable only for multi-Region clusters.

    " + "multiRegionProperties":{ + "shape":"MultiRegionProperties", + "documentation":"

    Returns the current multi-Region cluster configuration, including witness region and linked cluster information.

    " }, - "linkedClusterArns":{ - "shape":"ClusterArnList", - "documentation":"

    The ARNs of the clusters linked to the retrieved cluster.

    " + "tags":{"shape":"TagMap"}, + "encryptionDetails":{ + "shape":"EncryptionDetails", + "documentation":"

    The current encryption configuration details for the cluster.

    " } }, - "documentation":"

    Output Mixin

    " + "documentation":"

    The output of a cluster.

    " }, "GetVpcEndpointServiceNameInput":{ "type":"structure", @@ -587,20 +543,13 @@ "fault":true, "retryable":{"throttling":false} }, - "LinkedClusterProperties":{ - "type":"structure", - "members":{ - "deletionProtectionEnabled":{ - "shape":"DeletionProtectionEnabled", - "documentation":"

    Whether deletion protection is enabled.

    " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

    A map of key and value pairs the linked cluster is tagged with.

    " - } - }, - "documentation":"

    Properties of linked clusters.

    " + "KmsEncryptionKey":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[a-zA-Z0-9:/_-]+" }, + "KmsKeyArn":{"type":"string"}, "ListClustersInput":{ "type":"structure", "members":{ @@ -655,26 +604,35 @@ }, "MaxResults":{ "type":"integer", - "documentation":"

    Max results that will be returned per page

    ", + "documentation":"

    Max results that will be returned per page.

    ", "box":true, "max":100, "min":1 }, + "MultiRegionProperties":{ + "type":"structure", + "members":{ + "witnessRegion":{ + "shape":"Region", + "documentation":"

    The that serves as the witness region for a multi-Region cluster. The witness region helps maintain cluster consistency and quorum.

    " + }, + "clusters":{ + "shape":"ClusterArnList", + "documentation":"

    The set of linked clusters that form the multi-Region cluster configuration. Each linked cluster represents a database instance in a different Region.

    " + } + }, + "documentation":"

    Defines the structure for multi-Region cluster configurations, containing the witness region and linked cluster settings.

    " + }, "NextToken":{ "type":"string", - "documentation":"

    Opaque token used to retrieve next page

    " + "documentation":"

    Token used to retrieve next page.

    " }, "Region":{ "type":"string", - "documentation":"

    AWS Region name (e.g.: 'us-east-1')

    ", - "max":20, + "documentation":"

    Region name.

    ", + "max":50, "min":0 }, - "RegionList":{ - "type":"list", - "member":{"shape":"Region"}, - "documentation":"

    List of regions

    " - }, "ResourceNotFoundException":{ "type":"structure", "required":[ @@ -686,11 +644,11 @@ "message":{"shape":"String"}, "resourceId":{ "shape":"String", - "documentation":"

    Hypothetical identifier of the resource which does not exist

    " + "documentation":"

    The resource ID could not be found.

    " }, "resourceType":{ "shape":"String", - "documentation":"

    Hypothetical type of the resource which does not exist

    " + "documentation":"

    The resource type could not be found.

    " } }, "documentation":"

    The resource could not be found.

    ", @@ -702,7 +660,7 @@ }, "ServiceName":{ "type":"string", - "documentation":"

    VPC Endpoint Service name for a Cluster

    ", + "documentation":"

    The name of the VPC endpoint service that provides access to your cluster. Use this endpoint to establish a private connection between your VPC and the cluster.

    ", "max":128, "min":1, "pattern":"com\\.amazonaws\\.[a-z0-9-]+\\.dsql-[a-f0-9]{6}" @@ -719,23 +677,23 @@ "members":{ "message":{ "shape":"String", - "documentation":"

    Description of the error

    " + "documentation":"

    The service exception for exceeding a quota.

    " }, "resourceId":{ "shape":"String", - "documentation":"

    Identifier of the resource affected

    " + "documentation":"

    The resource ID exceeds a quota.

    " }, "resourceType":{ "shape":"String", - "documentation":"

    Type of the resource affected

    " + "documentation":"

    The resource type exceeds a quota.

    " }, "serviceCode":{ "shape":"String", - "documentation":"

    Service Quotas requirement to identify originating service

    " + "documentation":"

    The request exceeds a service quota.

    " }, "quotaCode":{ "shape":"String", - "documentation":"

    Service Quotas requirement to identify originating quota

    " + "documentation":"

    The service exceeds a quota.

    " } }, "documentation":"

    The service limit was exceeded.

    ", @@ -748,7 +706,7 @@ "String":{"type":"string"}, "TagKey":{ "type":"string", - "documentation":"

    Unique tag key, maximum 128 Unicode characters in UTF-8

    ", + "documentation":"

    Unique tag key, maximum 128 Unicode characters in UTF-8.

    ", "max":128, "min":1, "pattern":"[a-zA-Z0-9_.:/=+\\-@ ]*" @@ -756,7 +714,7 @@ "TagKeyList":{ "type":"list", "member":{"shape":"TagKey"}, - "documentation":"

    List of tag keys

    ", + "documentation":"

    List of tag keys.

    ", "max":200, "min":0 }, @@ -764,7 +722,7 @@ "type":"map", "key":{"shape":"TagKey"}, "value":{"shape":"TagValue"}, - "documentation":"

    Map of tags

    ", + "documentation":"

    Map of tags.

    ", "max":200, "min":0 }, @@ -789,7 +747,7 @@ }, "TagValue":{ "type":"string", - "documentation":"

    Tag value, maximum 256 Unicode characters in UTF-8

    ", + "documentation":"

    Tag value, maximum 256 Unicode characters in UTF-8.

    ", "max":256, "min":0, "pattern":"[a-zA-Z0-9_.:/=+\\-@ ]*" @@ -800,19 +758,19 @@ "members":{ "message":{ "shape":"String", - "documentation":"

    Description of the error

    " + "documentation":"

    The message that the request was denied due to request throttling.

    " }, "serviceCode":{ "shape":"String", - "documentation":"

    Service Quotas requirement to identify originating service

    " + "documentation":"

    The request exceeds a service quota.

    " }, "quotaCode":{ "shape":"String", - "documentation":"

    Service Quotas requirement to identify originating quota

    " + "documentation":"

    The request exceeds a request rate quota.

    " }, "retryAfterSeconds":{ "shape":"Integer", - "documentation":"

    Advice to clients on when the call can be safely retried

    ", + "documentation":"

    The request exceeds a request rate quota. Retry after seconds.

    ", "location":"header", "locationName":"Retry-After" } @@ -860,10 +818,18 @@ "shape":"DeletionProtectionEnabled", "documentation":"

    Specifies whether to enable deletion protection in your cluster.

    " }, + "kmsEncryptionKey":{ + "shape":"KmsEncryptionKey", + "documentation":"

    The KMS key that encrypts and protects the data on your cluster. You can specify the ARN, ID, or alias of an existing key or have Amazon Web Services create a default key for you.

    " + }, "clientToken":{ "shape":"ClientToken", "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully. The subsequent retries with the same client token return the result from the original successful request and they have no additional effect.

    If you don't specify a client token, the Amazon Web Services SDK automatically generates one.

    ", "idempotencyToken":true + }, + "multiRegionProperties":{ + "shape":"MultiRegionProperties", + "documentation":"

    The new multi-Region cluster configuration settings to be applied during an update operation.

    " } } }, @@ -873,8 +839,7 @@ "identifier", "arn", "status", - "creationTime", - "deletionProtectionEnabled" + "creationTime" ], "members":{ "identifier":{ @@ -892,21 +857,9 @@ "creationTime":{ "shape":"ClusterCreationTime", "documentation":"

    The time of when the cluster was created.

    " - }, - "deletionProtectionEnabled":{ - "shape":"DeletionProtectionEnabled", - "documentation":"

    Whether deletion protection is enabled for the updated cluster.

    " - }, - "witnessRegion":{ - "shape":"Region", - "documentation":"

    The Region that receives all data you write to linked clusters.

    " - }, - "linkedClusterArns":{ - "shape":"ClusterArnList", - "documentation":"

    The ARNs of the clusters linked to the updated cluster. Applicable only for multi-Region clusters.

    " } }, - "documentation":"

    Output Mixin

    " + "documentation":"

    The details of the cluster after it has been updated.

    " }, "ValidationException":{ "type":"structure", @@ -916,8 +869,14 @@ ], "members":{ "message":{"shape":"String"}, - "reason":{"shape":"ValidationExceptionReason"}, - "fieldList":{"shape":"ValidationExceptionFieldList"} + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    The reason for the validation exception.

    " + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    A list of fields that didn't validate.

    " + } }, "documentation":"

    The input failed to satisfy the constraints specified by an Amazon Web Services service.

    ", "error":{ @@ -947,11 +906,11 @@ "ValidationExceptionFieldList":{ "type":"list", "member":{"shape":"ValidationExceptionField"}, - "documentation":"

    List of fields that caused the error

    " + "documentation":"

    A list of fields that didn't validate.

    " }, "ValidationExceptionReason":{ "type":"string", - "documentation":"

    Reason the request failed validation

    ", + "documentation":"

    The reason for the validation exception.

    ", "enum":[ "unknownOperation", "cannotParse", @@ -961,5 +920,5 @@ ] } }, - "documentation":"

    This is an interface reference for Amazon Aurora DSQL. It contains documentation for one of the programming or command line interfaces you can use to manage Amazon Aurora DSQL.

    Amazon Aurora DSQL is a serverless, distributed SQL database suitable for workloads of any size. Aurora DSQL is available in both single-Region and multi-Region configurations, so your clusters and databases are always available even if an Availability Zone or an Amazon Web Services Region are unavailable. Aurora DSQL lets you focus on using your data to acquire new insights for your business and customers.

    " + "documentation":"

    This is an interface reference for Amazon Aurora DSQL. It contains documentation for one of the programming or command line interfaces you can use to manage Amazon Aurora DSQL.

    Amazon Aurora DSQL is a serverless, distributed SQL database suitable for workloads of any size. is available in both single-Region and multi-Region configurations, so your clusters and databases are always available even if an Availability Zone or an Amazon Web Services Region are unavailable. lets you focus on using your data to acquire new insights for your business and customers.

    " } diff --git a/services/dynamodb/pom.xml b/services/dynamodb/pom.xml index 0922a72f923f..673011fd1b12 100644 --- a/services/dynamodb/pom.xml +++ b/services/dynamodb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT dynamodb AWS Java SDK :: Services :: Amazon DynamoDB diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/customization.config b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/customization.config index a7b525068c07..b777570861ea 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/customization.config +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/customization.config @@ -36,5 +36,6 @@ "customRetryStrategy" : "software.amazon.awssdk.services.dynamodb.DynamoDbRetryPolicy", "enableEndpointDiscoveryMethodRequired": true, "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": false + "enableFastUnmarshaller": false, + "enableEndpointProviderUriCaching": true } diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/endpoint-rule-set.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/endpoint-rule-set.json index fda1bf85a08c..b5b8297d316a 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/endpoint-rule-set.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/endpoint-rule-set.json @@ -181,8 +181,8 @@ "authSchemes": [ { "signingRegion": "us-east-1", - "name": "sigv4", - "signingName": "dynamodb" + "signingName": "dynamodb", + "name": "sigv4" } ] }, diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/endpoint-tests.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/endpoint-tests.json index 6c38cd611222..bf08719fc3a7 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/endpoint-tests.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/endpoint-tests.json @@ -2794,9 +2794,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -2830,9 +2830,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -2882,9 +2882,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -2910,9 +2910,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -2938,9 +2938,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -2966,9 +2966,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -2994,9 +2994,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3022,9 +3022,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3097,9 +3097,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3135,9 +3135,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3173,9 +3173,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3211,9 +3211,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3280,9 +3280,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3330,9 +3330,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3357,9 +3357,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3419,9 +3419,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3455,9 +3455,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3507,9 +3507,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3535,9 +3535,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3563,9 +3563,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3591,9 +3591,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3619,9 +3619,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3647,9 +3647,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3722,9 +3722,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3760,9 +3760,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3798,9 +3798,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3836,9 +3836,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3905,9 +3905,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3955,9 +3955,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -3982,9 +3982,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4044,9 +4044,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4080,9 +4080,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4132,9 +4132,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4160,9 +4160,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4188,9 +4188,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4216,9 +4216,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4244,9 +4244,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4272,9 +4272,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4347,9 +4347,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4385,9 +4385,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4423,9 +4423,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4461,9 +4461,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4530,9 +4530,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4580,9 +4580,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, @@ -4607,9 +4607,9 @@ "properties": { "authSchemes": [ { - "signingName": "dynamodb", "name": "sigv4", - "signingRegion": "us-east-1" + "signingRegion": "us-east-1", + "signingName": "dynamodb" } ] }, diff --git a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json index f81cb0ef7e4a..20e0657d7bd7 100644 --- a/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json +++ b/services/dynamodb/src/main/resources/codegen-resources/dynamodb/service-2.json @@ -62,7 +62,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ItemCollectionSizeLimitExceededException"}, {"shape":"RequestLimitExceeded"}, - {"shape":"InternalServerError"} + {"shape":"InternalServerError"}, + {"shape":"ReplicatedWriteConflictException"} ], "documentation":"

    The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types.

    BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action.

    The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed.

    For tables and indexes with provisioned capacity, if none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. For all tables and indexes, if none of the items can be processed due to other throttling scenarios (such as exceeding partition level limits), then BatchWriteItem returns a ThrottlingException.

    If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.

    For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.

    With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response.

    If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.

    Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.

    If one or more of the following is true, DynamoDB rejects the entire batch write operation:

    • One or more tables specified in the BatchWriteItem request does not exist.

    • Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.

    • You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request.

    • Your request contains at least two items with identical hash and range keys (which essentially is two put operations).

    • There are more than 25 requests in the batch.

    • Any individual item in a batch exceeds 400 KB.

    • The total request size exceeds 16 MB.

    • Any individual items with keys exceeding the key length limits. For a partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 bytes.

    ", "endpointdiscovery":{}, @@ -192,7 +193,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

    The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

    For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

    DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. For the full list of table states, see TableStatus.

    When you delete a table, any indexes on that table are also deleted.

    If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

    Use the DescribeTable action to check the status of the table.

    ", + "documentation":"

    The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned.

    DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. For the full list of table states, see TableStatus.

    When you delete a table, any indexes on that table are also deleted.

    If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours.

    Use the DescribeTable action to check the status of the table.

    ", "endpointdiscovery":{} }, "DescribeBackup":{ @@ -349,7 +350,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

    Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

    For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

    If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

    ", + "documentation":"

    Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.

    If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again.

    ", "endpointdiscovery":{} }, "DescribeTableReplicaAutoScaling":{ @@ -364,7 +365,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerError"} ], - "documentation":"

    Describes auto scaling settings across replicas of the global table at once.

    For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

    " + "documentation":"

    Describes auto scaling settings across replicas of the global table at once.

    " }, "DescribeTimeToLive":{ "name":"DescribeTimeToLive", @@ -854,7 +855,7 @@ {"shape":"ReplicaNotFoundException"}, {"shape":"TableNotFoundException"} ], - "documentation":"

    Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units.

    This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

    To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

    For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). If you are using global tables Version 2019.11.21 you can use UpdateTable instead.

    Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

    If global secondary indexes are specified, then the following conditions must also be met:

    • The global secondary indexes must have the same name.

    • The global secondary indexes must have the same hash key and sort key (if present).

    • The global secondary indexes must have the same provisioned and maximum write capacity units.

    ", + "documentation":"

    Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units.

    This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

    To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

    If you are using global tables Version 2019.11.21 (Current) you can use UpdateTable instead.

    Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.

    If global secondary indexes are specified, then the following conditions must also be met:

    • The global secondary indexes must have the same name.

    • The global secondary indexes must have the same hash key and sort key (if present).

    • The global secondary indexes must have the same provisioned and maximum write capacity units.

    ", "endpointdiscovery":{} }, "UpdateGlobalTableSettings":{ @@ -928,7 +929,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

    Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

    For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

    You can only perform one of the following operations at once:

    • Modify the provisioned throughput settings of the table.

    • Remove a global secondary index from the table.

    • Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable to perform other operations.

    UpdateTable is an asynchronous operation; while it's executing, the table status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

    ", + "documentation":"

    Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

    You can only perform one of the following operations at once:

    • Modify the provisioned throughput settings of the table.

    • Remove a global secondary index from the table.

    • Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable to perform other operations.

    UpdateTable is an asynchronous operation; while it's executing, the table status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

    ", "endpointdiscovery":{} }, "UpdateTableReplicaAutoScaling":{ @@ -945,7 +946,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

    Updates auto scaling settings on your global tables at once.

    For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

    " + "documentation":"

    Updates auto scaling settings on your global tables at once.

    " }, "UpdateTimeToLive":{ "name":"UpdateTimeToLive", @@ -1989,6 +1990,17 @@ } } }, + "CreateGlobalTableWitnessGroupMemberAction":{ + "type":"structure", + "required":["RegionName"], + "members":{ + "RegionName":{ + "shape":"RegionName", + "documentation":"

    The Amazon Web Services Region name to be added as a witness Region for the MRSC global table. The witness must be in a different Region than the replicas and within the same Region set:

    • US Region set: US East (N. Virginia), US East (Ohio), US West (Oregon)

    • EU Region set: Europe (Ireland), Europe (London), Europe (Paris), Europe (Frankfurt)

    • AP Region set: Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Osaka)

    " + } + }, + "documentation":"

    Specifies the action to add a new witness Region to a MRSC global table. A MRSC global table can be configured with either three replicas, or with two replicas and one witness.

    " + }, "CreateReplicaAction":{ "type":"structure", "required":["RegionName"], @@ -2211,6 +2223,17 @@ }, "documentation":"

    Represents a global secondary index to be deleted from an existing table.

    " }, + "DeleteGlobalTableWitnessGroupMemberAction":{ + "type":"structure", + "required":["RegionName"], + "members":{ + "RegionName":{ + "shape":"RegionName", + "documentation":"

    The witness Region name to be removed from the MRSC global table.

    " + } + }, + "documentation":"

    Specifies the action to remove a witness Region from a MRSC global table. You cannot delete a single witness from a MRSC global table - you must delete both a replica and the witness together. The deletion of both a witness and replica converts the remaining replica to a single-Region DynamoDB table.

    " + }, "DeleteItemInput":{ "type":"structure", "required":[ @@ -3461,6 +3484,44 @@ "UPDATING" ] }, + "GlobalTableWitnessDescription":{ + "type":"structure", + "members":{ + "RegionName":{ + "shape":"RegionName", + "documentation":"

    The name of the Amazon Web Services Region that serves as a witness for the MRSC global table.

    " + }, + "WitnessStatus":{ + "shape":"WitnessStatus", + "documentation":"

    The current status of the witness Region in the MRSC global table.

    " + } + }, + "documentation":"

    Represents the properties of a witness Region in a MRSC global table.

    " + }, + "GlobalTableWitnessDescriptionList":{ + "type":"list", + "member":{"shape":"GlobalTableWitnessDescription"} + }, + "GlobalTableWitnessGroupUpdate":{ + "type":"structure", + "members":{ + "Create":{ + "shape":"CreateGlobalTableWitnessGroupMemberAction", + "documentation":"

    Specifies a witness Region to be added to a new MRSC global table. The witness must be added when creating the MRSC global table.

    " + }, + "Delete":{ + "shape":"DeleteGlobalTableWitnessGroupMemberAction", + "documentation":"

    Specifies a witness Region to be removed from an existing global table. Must be done in conjunction with removing a replica. The deletion of both a witness and replica converts the remaining replica to a single-Region DynamoDB table.

    " + } + }, + "documentation":"

    Represents one of the following:

    • A new witness to be added to a new global table.

    • An existing witness to be removed from an existing global table.

    You can configure one witness per MRSC global table.

    " + }, + "GlobalTableWitnessGroupUpdateList":{ + "type":"list", + "member":{"shape":"GlobalTableWitnessGroupUpdate"}, + "max":1, + "min":1 + }, "IdempotentParameterMismatchException":{ "type":"structure", "members":{ @@ -5229,7 +5290,10 @@ "DELETING", "ACTIVE", "REGION_DISABLED", - "INACCESSIBLE_ENCRYPTION_CREDENTIALS" + "INACCESSIBLE_ENCRYPTION_CREDENTIALS", + "ARCHIVING", + "ARCHIVED", + "REPLICATION_NOT_AUTHORIZED" ] }, "ReplicaStatusDescription":{"type":"string"}, @@ -5258,7 +5322,8 @@ "message":{"shape":"ErrorMessage"} }, "documentation":"

    The request was rejected because one or more items in the request are being modified by a request in another Region.

    ", - "exception":true + "exception":true, + "retryable":{"throttling":false} }, "ReplicationGroupUpdate":{ "type":"structure", @@ -5987,6 +6052,10 @@ "shape":"ReplicaDescriptionList", "documentation":"

    Represents replicas of the table.

    " }, + "GlobalTableWitnesses":{ + "shape":"GlobalTableWitnessDescriptionList", + "documentation":"

    The witness Region and its current status in the MRSC global table. Only one witness Region can be configured per MRSC global table.

    " + }, "RestoreSummary":{ "shape":"RestoreSummary", "documentation":"

    Contains details for the restore.

    " @@ -6017,7 +6086,7 @@ }, "MultiRegionConsistency":{ "shape":"MultiRegionConsistency", - "documentation":"

    Indicates one of the following consistency modes for a global table:

    • EVENTUAL: Indicates that the global table is configured for multi-Region eventual consistency.

    • STRONG: Indicates that the global table is configured for multi-Region strong consistency (preview).

      Multi-Region strong consistency (MRSC) is a new DynamoDB global tables capability currently available in preview mode. For more information, see Global tables multi-Region strong consistency.

    If you don't specify this field, the global table consistency mode defaults to EVENTUAL.

    " + "documentation":"

    Indicates one of the following consistency modes for a global table:

    • EVENTUAL: Indicates that the global table is configured for multi-Region eventual consistency (MREC).

    • STRONG: Indicates that the global table is configured for multi-Region strong consistency (MRSC).

    If you don't specify this field, the global table consistency mode defaults to EVENTUAL. For more information about global tables consistency modes, see Consistency modes in DynamoDB developer guide.

    " } }, "documentation":"

    Represents the properties of a table.

    " @@ -6061,7 +6130,8 @@ "ACTIVE", "INACCESSIBLE_ENCRYPTION_CREDENTIALS", "ARCHIVING", - "ARCHIVED" + "ARCHIVED", + "REPLICATION_NOT_AUTHORIZED" ] }, "TableWarmThroughputDescription":{ @@ -6733,7 +6803,7 @@ }, "ReplicaUpdates":{ "shape":"ReplicationGroupUpdateList", - "documentation":"

    A list of replica update actions (create, delete, or update) for the table.

    For global tables, this property only applies to global tables using Version 2019.11.21 (Current version).

    " + "documentation":"

    A list of replica update actions (create, delete, or update) for the table.

    " }, "TableClass":{ "shape":"TableClass", @@ -6745,7 +6815,11 @@ }, "MultiRegionConsistency":{ "shape":"MultiRegionConsistency", - "documentation":"

    Specifies the consistency mode for a new global table. This parameter is only valid when you create a global table by specifying one or more Create actions in the ReplicaUpdates action list.

    You can specify one of the following consistency modes:

    • EVENTUAL: Configures a new global table for multi-Region eventual consistency. This is the default consistency mode for global tables.

    • STRONG: Configures a new global table for multi-Region strong consistency (preview).

      Multi-Region strong consistency (MRSC) is a new DynamoDB global tables capability currently available in preview mode. For more information, see Global tables multi-Region strong consistency.

    If you don't specify this parameter, the global table consistency mode defaults to EVENTUAL.

    " + "documentation":"

    Specifies the consistency mode for a new global table. This parameter is only valid when you create a global table by specifying one or more Create actions in the ReplicaUpdates action list.

    You can specify one of the following consistency modes:

    • EVENTUAL: Configures a new global table for multi-Region eventual consistency (MREC). This is the default consistency mode for global tables.

    • STRONG: Configures a new global table for multi-Region strong consistency (MRSC).

    If you don't specify this field, the global table consistency mode defaults to EVENTUAL. For more information about global tables consistency modes, see Consistency modes in DynamoDB developer guide.

    " + }, + "GlobalTableWitnessUpdates":{ + "shape":"GlobalTableWitnessGroupUpdateList", + "documentation":"

    A list of witness updates for a MRSC global table. A witness provides a cost-effective alternative to a full replica in a MRSC global table by maintaining replicated change data written to global table replicas. You cannot perform read or write operations on a witness. For each witness, you can request one action:

    • Create - add a new witness to the global table.

    • Delete - remove a witness from the global table.

    You can create or delete only one witness per UpdateTable operation.

    For more information, see Multi-Region strong consistency (MRSC) in the Amazon DynamoDB Developer Guide

    " }, "OnDemandThroughput":{ "shape":"OnDemandThroughput", @@ -6839,6 +6913,14 @@ }, "documentation":"

    Provides visibility into the number of read and write operations your table or secondary index can instantaneously support. The settings can be modified using the UpdateTable operation to meet the throughput requirements of an upcoming peak event.

    " }, + "WitnessStatus":{ + "type":"string", + "enum":[ + "CREATING", + "DELETING", + "ACTIVE" + ] + }, "WriteRequest":{ "type":"structure", "members":{ diff --git a/services/ebs/pom.xml b/services/ebs/pom.xml index b17240a492b4..3610e0714aaf 100644 --- a/services/ebs/pom.xml +++ b/services/ebs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ebs AWS Java SDK :: Services :: EBS diff --git a/services/ebs/src/main/resources/codegen-resources/customization.config b/services/ebs/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/ebs/src/main/resources/codegen-resources/customization.config +++ b/services/ebs/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ec2/pom.xml b/services/ec2/pom.xml index 1c3b2fe70e74..e98ef2fcb917 100644 --- a/services/ec2/pom.xml +++ b/services/ec2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ec2 AWS Java SDK :: Services :: Amazon EC2 diff --git a/services/ec2/src/main/resources/codegen-resources/paginators-1.json b/services/ec2/src/main/resources/codegen-resources/paginators-1.json index a71edd693147..a7229ad1b31b 100644 --- a/services/ec2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/ec2/src/main/resources/codegen-resources/paginators-1.json @@ -372,6 +372,12 @@ "output_token": "NextToken", "result_key": "MacHosts" }, + "DescribeMacModificationTasks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "MacModificationTasks" + }, "DescribeManagedPrefixLists": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/services/ec2/src/main/resources/codegen-resources/service-2.json b/services/ec2/src/main/resources/codegen-resources/service-2.json index 505f77a86f32..e6a414fca125 100644 --- a/services/ec2/src/main/resources/codegen-resources/service-2.json +++ b/services/ec2/src/main/resources/codegen-resources/service-2.json @@ -410,7 +410,7 @@ }, "input":{"shape":"AttachVolumeRequest"}, "output":{"shape":"VolumeAttachment"}, - "documentation":"

    Attaches an EBS volume to a running or stopped instance and exposes it to the instance with the specified device name.

    Encrypted EBS volumes must be attached to instances that support Amazon EBS encryption. For more information, see Amazon EBS encryption in the Amazon EBS User Guide.

    After you attach an EBS volume, you must make it available. For more information, see Make an EBS volume available for use.

    If a volume has an Amazon Web Services Marketplace product code:

    • The volume can be attached only to a stopped instance.

    • Amazon Web Services Marketplace product codes are copied from the volume to the instance.

    • You must be subscribed to the product.

    • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

    For more information, see Attach an Amazon EBS volume to an instance in the Amazon EBS User Guide.

    " + "documentation":"

    Attaches an Amazon EBS volume to a running or stopped instance, and exposes it to the instance with the specified device name.

    The maximum number of Amazon EBS volumes that you can attach to an instance depends on the instance type. If you exceed the volume attachment limit for an instance type, the attachment request fails with the AttachmentLimitExceeded error. For more information, see Instance volume limits.

    After you attach an EBS volume, you must make it available for use. For more information, see Make an EBS volume available for use.

    If a volume has an Amazon Web Services Marketplace product code:

    • The volume can be attached only to a stopped instance.

    • Amazon Web Services Marketplace product codes are copied from the volume to the instance.

    • You must be subscribed to the product.

    • The instance type and operating system of the instance must support the product. For example, you can't detach a volume from a Windows instance and attach it to a Linux instance.

    For more information, see Attach an Amazon EBS volume to an instance in the Amazon EBS User Guide.

    " }, "AttachVpnGateway":{ "name":"AttachVpnGateway", @@ -528,7 +528,7 @@ }, "input":{"shape":"CancelImageLaunchPermissionRequest"}, "output":{"shape":"CancelImageLaunchPermissionResult"}, - "documentation":"

    Removes your Amazon Web Services account from the launch permissions for the specified AMI. For more information, see Cancel having an AMI shared with your Amazon Web Services account in the Amazon EC2 User Guide.

    " + "documentation":"

    Removes your Amazon Web Services account from the launch permissions for the specified AMI. For more information, see Cancel having an AMI shared with your Amazon Web Services account in the Amazon EC2 User Guide.

    " }, "CancelImportTask":{ "name":"CancelImportTask", @@ -598,7 +598,7 @@ }, "input":{"shape":"CopyImageRequest"}, "output":{"shape":"CopyImageResult"}, - "documentation":"

    Initiates an AMI copy operation. You can copy an AMI from one Region to another, or from a Region to an Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost to another, or within the same Outpost. To copy an AMI to another partition, see CreateStoreImageTask.

    When you copy an AMI from one Region to another, the destination Region is the current Region.

    When you copy an AMI from a Region to an Outpost, specify the ARN of the Outpost as the destination. Backing snapshots copied to an Outpost are encrypted by default using the default encryption key for the Region or the key that you specify. Outposts do not support unencrypted snapshots.

    For information about the prerequisites when copying an AMI, see Copy an AMI in the Amazon EC2 User Guide.

    " + "documentation":"

    Initiates an AMI copy operation. You can copy an AMI from one Region to another, or from a Region to an Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost to another, or within the same Outpost. To copy an AMI to another partition, see CreateStoreImageTask.

    When you copy an AMI from one Region to another, the destination Region is the current Region.

    When you copy an AMI from a Region to an Outpost, specify the ARN of the Outpost as the destination. Backing snapshots copied to an Outpost are encrypted by default using the default encryption key for the Region or the key that you specify. Outposts do not support unencrypted snapshots.

    For information about the prerequisites when copying an AMI, see Copy an Amazon EC2 AMI in the Amazon EC2 User Guide.

    " }, "CopySnapshot":{ "name":"CopySnapshot", @@ -720,6 +720,16 @@ "output":{"shape":"CreateDefaultVpcResult"}, "documentation":"

    Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPCs in the Amazon VPC User Guide. You cannot specify the components of the default VPC yourself.

    If you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

    " }, + "CreateDelegateMacVolumeOwnershipTask":{ + "name":"CreateDelegateMacVolumeOwnershipTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDelegateMacVolumeOwnershipTaskRequest"}, + "output":{"shape":"CreateDelegateMacVolumeOwnershipTaskResult"}, + "documentation":"

    Delegates ownership of the Amazon EBS root volume for an Apple silicon Mac instance to an administrative user.

    " + }, "CreateDhcpOptions":{ "name":"CreateDhcpOptions", "http":{ @@ -778,7 +788,7 @@ }, "input":{"shape":"CreateImageRequest"}, "output":{"shape":"CreateImageResult"}, - "documentation":"

    Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

    If you customized your instance with instance store volumes or Amazon EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

    For more information, see Create an Amazon EBS-backed Linux AMI in the Amazon Elastic Compute Cloud User Guide.

    " + "documentation":"

    Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is either running or stopped.

    If you customized your instance with instance store volumes or Amazon EBS volumes in addition to the root device volume, the new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, the instance automatically launches with those additional volumes.

    The location of the source instance determines where you can create the snapshots of the AMI:

    • If the source instance is in a Region, you must create the snapshots in the same Region as the instance.

    • If the source instance is in a Local Zone, you can create the snapshots in the same Local Zone or in its parent Region.

    For more information, see Create an Amazon EBS-backed AMI in the Amazon Elastic Compute Cloud User Guide.

    " }, "CreateInstanceConnectEndpoint":{ "name":"CreateInstanceConnectEndpoint", @@ -788,7 +798,7 @@ }, "input":{"shape":"CreateInstanceConnectEndpointRequest"}, "output":{"shape":"CreateInstanceConnectEndpointResult"}, - "documentation":"

    Creates an EC2 Instance Connect Endpoint.

    An EC2 Instance Connect Endpoint allows you to connect to an instance, without requiring the instance to have a public IPv4 address. For more information, see Connect to your instances without requiring a public IPv4 address using EC2 Instance Connect Endpoint in the Amazon EC2 User Guide.

    " + "documentation":"

    Creates an EC2 Instance Connect Endpoint.

    An EC2 Instance Connect Endpoint allows you to connect to an instance, without requiring the instance to have a public IPv4 address. For more information, see Connect to your instances using EC2 Instance Connect Endpoint in the Amazon EC2 User Guide.

    " }, "CreateInstanceEventWindow":{ "name":"CreateInstanceEventWindow", @@ -960,6 +970,16 @@ "output":{"shape":"CreateLocalGatewayVirtualInterfaceGroupResult"}, "documentation":"

    Create a local gateway virtual interface group.

    " }, + "CreateMacSystemIntegrityProtectionModificationTask":{ + "name":"CreateMacSystemIntegrityProtectionModificationTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMacSystemIntegrityProtectionModificationTaskRequest"}, + "output":{"shape":"CreateMacSystemIntegrityProtectionModificationTaskResult"}, + "documentation":"

    Creates a System Integrity Protection (SIP) modification task to configure the SIP settings for an x86 Mac instance or Apple silicon Mac instance. For more information, see Configure SIP for Amazon EC2 instances in the Amazon EC2 User Guide.

    When you configure the SIP settings for your instance, you can either enable or disable all SIP settings, or you can specify a custom SIP configuration that selectively enables or disables specific SIP settings.

    If you implement a custom configuration, connect to the instance and verify the settings to ensure that your requirements are properly implemented and functioning as intended.

    SIP configurations might change with macOS updates. We recommend that you review custom SIP settings after any macOS version upgrade to ensure continued compatibility and proper functionality of your security configurations.

    To enable or disable all SIP settings, use the MacSystemIntegrityProtectionStatus parameter only. For example, to enable all SIP settings, specify the following:

    • MacSystemIntegrityProtectionStatus=enabled

    To specify a custom configuration that selectively enables or disables specific SIP settings, use the MacSystemIntegrityProtectionStatus parameter to enable or disable all SIP settings, and then use the MacSystemIntegrityProtectionConfiguration parameter to specify exceptions. In this case, the exceptions you specify for MacSystemIntegrityProtectionConfiguration override the value you specify for MacSystemIntegrityProtectionStatus. For example, to enable all SIP settings, except NvramProtections, specify the following:

    • MacSystemIntegrityProtectionStatus=enabled

    • MacSystemIntegrityProtectionConfigurationRequest \"NvramProtections=disabled\"

    " + }, "CreateManagedPrefixList":{ "name":"CreateManagedPrefixList", "http":{ @@ -1087,7 +1107,7 @@ }, "input":{"shape":"CreateRestoreImageTaskRequest"}, "output":{"shape":"CreateRestoreImageTaskResult"}, - "documentation":"

    Starts a task that restores an AMI from an Amazon S3 object that was previously created by using CreateStoreImageTask.

    To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the Amazon EC2 User Guide.

    For more information, see Store and restore an AMI using Amazon S3 in the Amazon EC2 User Guide.

    " + "documentation":"

    Starts a task that restores an AMI from an Amazon S3 object that was previously created by using CreateStoreImageTask.

    To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the Amazon EC2 User Guide.

    For more information, see Store and restore an AMI using S3 in the Amazon EC2 User Guide.

    " }, "CreateRoute":{ "name":"CreateRoute", @@ -1187,7 +1207,7 @@ }, "input":{"shape":"CreateStoreImageTaskRequest"}, "output":{"shape":"CreateStoreImageTaskResult"}, - "documentation":"

    Stores an AMI as a single object in an Amazon S3 bucket.

    To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the Amazon EC2 User Guide.

    For more information, see Store and restore an AMI using Amazon S3 in the Amazon EC2 User Guide.

    " + "documentation":"

    Stores an AMI as a single object in an Amazon S3 bucket.

    To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the Amazon EC2 User Guide.

    For more information, see Store and restore an AMI using S3 in the Amazon EC2 User Guide.

    " }, "CreateSubnet":{ "name":"CreateSubnet", @@ -2347,7 +2367,7 @@ }, "input":{"shape":"DeregisterImageRequest"}, "output":{"shape":"DeregisterImageResult"}, - "documentation":"

    Deregisters the specified AMI. A deregistered AMI can't be used to launch new instances.

    If a deregistered EBS-backed AMI matches a Recycle Bin retention rule, it moves to the Recycle Bin for the specified retention period. It can be restored before its retention period expires, after which it is permanently deleted. If the deregistered AMI doesn't match a retention rule, it is permanently deleted immediately. For more information, see Recycle Bin in the Amazon EBS User Guide.

    Deregistering an AMI does not delete the following:

    • Instances already launched from the AMI. You'll continue to incur usage costs for the instances until you terminate them.

    • For EBS-backed AMIs: The snapshots that were created of the root and data volumes of the instance during AMI creation. You'll continue to incur snapshot storage costs.

    • For instance store-backed AMIs: The files uploaded to Amazon S3 during AMI creation. You'll continue to incur S3 storage costs.

    For more information, see Deregister an Amazon EC2 AMI in the Amazon EC2 User Guide.

    " + "documentation":"

    Deregisters the specified AMI. A deregistered AMI can't be used to launch new instances.

    If a deregistered EBS-backed AMI matches a Recycle Bin retention rule, it moves to the Recycle Bin for the specified retention period. It can be restored before its retention period expires, after which it is permanently deleted. If the deregistered AMI doesn't match a retention rule, it is permanently deleted immediately. For more information, see Recover deleted Amazon EBS snapshots and EBS-backed AMIs with Recycle Bin in the Amazon EBS User Guide.

    When deregistering an EBS-backed AMI, you can optionally delete its associated snapshots at the same time. However, if a snapshot is associated with multiple AMIs, it won't be deleted even if specified for deletion, although the AMI will still be deregistered.

    Deregistering an AMI does not delete the following:

    • Instances already launched from the AMI. You'll continue to incur usage costs for the instances until you terminate them.

    • For EBS-backed AMIs: Snapshots that are associated with multiple AMIs. You'll continue to incur snapshot storage costs.

    • For instance store-backed AMIs: The files uploaded to Amazon S3 during AMI creation. You'll continue to incur S3 storage costs.

    For more information, see Deregister an Amazon EC2 AMI in the Amazon EC2 User Guide.

    " }, "DeregisterInstanceEventNotificationAttributes":{ "name":"DeregisterInstanceEventNotificationAttributes", @@ -2847,7 +2867,7 @@ }, "input":{"shape":"DescribeImagesRequest"}, "output":{"shape":"DescribeImagesResult"}, - "documentation":"

    Describes the specified images (AMIs, AKIs, and ARIs) available to you or all of the images available to you.

    The images available to you include public images, private images that you own, and private images owned by other Amazon Web Services accounts for which you have explicit launch permissions.

    Recently deregistered images appear in the returned results for a short interval and then return empty results. After all instances that reference a deregistered AMI are terminated, specifying the ID of the image will eventually return an error indicating that the AMI ID cannot be found.

    When Allowed AMIs is set to enabled, only allowed images are returned in the results, with the imageAllowed field set to true for each image. In audit-mode, the imageAllowed field is set to true for images that meet the account's Allowed AMIs criteria, and false for images that don't meet the criteria. For more information, see EnableAllowedImagesSettings.

    We strongly recommend using only paginated requests. Unpaginated requests are susceptible to throttling and timeouts.

    The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

    " + "documentation":"

    Describes the specified images (AMIs, AKIs, and ARIs) available to you or all of the images available to you.

    The images available to you include public images, private images that you own, and private images owned by other Amazon Web Services accounts for which you have explicit launch permissions.

    Recently deregistered images appear in the returned results for a short interval and then return empty results. After all instances that reference a deregistered AMI are terminated, specifying the ID of the image will eventually return an error indicating that the AMI ID cannot be found.

    When Allowed AMIs is set to enabled, only allowed images are returned in the results, with the imageAllowed field set to true for each image. In audit-mode, the imageAllowed field is set to true for images that meet the account's Allowed AMIs criteria, and false for images that don't meet the criteria. For more information, see EnableAllowedImagesSettings.

    The Amazon EC2 API follows an eventual consistency model. This means that the result of an API command you run that creates or modifies resources might not be immediately available to all subsequent commands you run. For guidance on how to manage eventual consistency, see Eventual consistency in the Amazon EC2 API in the Amazon EC2 Developer Guide.

    We strongly recommend using only paginated requests. Unpaginated requests are susceptible to throttling and timeouts.

    The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

    " }, "DescribeImportImageTasks":{ "name":"DescribeImportImageTasks", @@ -2937,7 +2957,7 @@ }, "input":{"shape":"DescribeInstanceStatusRequest"}, "output":{"shape":"DescribeInstanceStatusResult"}, - "documentation":"

    Describes the status of the specified instances or all of your instances. By default, only running instances are described, unless you specifically indicate to return the status of all instances.

    Instance status includes the following components:

    • Status checks - Amazon EC2 performs status checks on running EC2 instances to identify hardware and software issues. For more information, see Status checks for your instances and Troubleshoot instances with failed status checks in the Amazon EC2 User Guide.

    • Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, or terminate) for your instances related to hardware issues, software updates, or system maintenance. For more information, see Scheduled events for your instances in the Amazon EC2 User Guide.

    • Instance state - You can manage your instances from the moment you launch them through their termination. For more information, see Instance lifecycle in the Amazon EC2 User Guide.

    The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

    " + "documentation":"

    Describes the status of the specified instances or all of your instances. By default, only running instances are described, unless you specifically indicate to return the status of all instances.

    Instance status includes the following components:

    • Status checks - Amazon EC2 performs status checks on running EC2 instances to identify hardware and software issues. For more information, see Status checks for your instances and Troubleshoot instances with failed status checks in the Amazon EC2 User Guide.

    • Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, or terminate) for your instances related to hardware issues, software updates, or system maintenance. For more information, see Scheduled events for your instances in the Amazon EC2 User Guide.

    • Instance state - You can manage your instances from the moment you launch them through their termination. For more information, see Instance lifecycle in the Amazon EC2 User Guide.

    The Amazon EC2 API follows an eventual consistency model. This means that the result of an API command you run that creates or modifies resources might not be immediately available to all subsequent commands you run. For guidance on how to manage eventual consistency, see Eventual consistency in the Amazon EC2 API in the Amazon EC2 Developer Guide.

    The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

    " }, "DescribeInstanceTopology":{ "name":"DescribeInstanceTopology", @@ -2947,7 +2967,7 @@ }, "input":{"shape":"DescribeInstanceTopologyRequest"}, "output":{"shape":"DescribeInstanceTopologyResult"}, - "documentation":"

    Describes a tree-based hierarchy that represents the physical host placement of your EC2 instances within an Availability Zone or Local Zone. You can use this information to determine the relative proximity of your EC2 instances within the Amazon Web Services network to support your tightly coupled workloads.

    Limitations

    • Supported zones

      • Availability Zone

      • Local Zone

    • Supported instance types

      • hpc6a.48xlarge | hpc6id.32xlarge | hpc7a.12xlarge | hpc7a.24xlarge | hpc7a.48xlarge | hpc7a.96xlarge | hpc7g.4xlarge | hpc7g.8xlarge | hpc7g.16xlarge

      • p3dn.24xlarge | p4d.24xlarge | p4de.24xlarge | p5.48xlarge | p5e.48xlarge | p5en.48xlarge

      • trn1.2xlarge | trn1.32xlarge | trn1n.32xlarge | trn2.48xlarge | trn2u.48xlarge

    For more information, see Amazon EC2 instance topology in the Amazon EC2 User Guide.

    " + "documentation":"

    Describes a tree-based hierarchy that represents the physical host placement of your EC2 instances within an Availability Zone or Local Zone. You can use this information to determine the relative proximity of your EC2 instances within the Amazon Web Services network to support your tightly coupled workloads.

    Limitations

    • Supported zones

      • Availability Zone

      • Local Zone

    • Supported instance types

      • Returns 3 network nodes in the response

        • hpc6a.48xlarge | hpc6id.32xlarge | hpc7a.12xlarge | hpc7a.24xlarge | hpc7a.48xlarge | hpc7a.96xlarge | hpc7g.4xlarge | hpc7g.8xlarge | hpc7g.16xlarge

        • p3dn.24xlarge | p4d.24xlarge | p4de.24xlarge | p5.48xlarge | p5e.48xlarge | p5en.48xlarge

        • trn1.2xlarge | trn1.32xlarge | trn1n.32xlarge | trn2.48xlarge | trn2u.48xlarge

      • Returns 4 network nodes in the response

        • p6-b200.48xlarge

    For more information, see Amazon EC2 instance topology in the Amazon EC2 User Guide.

    " }, "DescribeInstanceTypeOfferings":{ "name":"DescribeInstanceTypeOfferings", @@ -2977,7 +2997,7 @@ }, "input":{"shape":"DescribeInstancesRequest"}, "output":{"shape":"DescribeInstancesResult"}, - "documentation":"

    Describes the specified instances or all instances.

    If you specify instance IDs, the output includes information for only the specified instances. If you specify filters, the output includes information for only those instances that meet the filter criteria. If you do not specify instance IDs or filters, the output includes information for all instances, which can affect performance. We recommend that you use pagination to ensure that the operation returns quickly and successfully.

    If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the output.

    Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

    If you describe instances in the rare case where an Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected zone, or do not specify any instance IDs at all, the call fails. If you describe instances and specify only instance IDs that are in an unaffected zone, the call works normally.

    We strongly recommend using only paginated requests. Unpaginated requests are susceptible to throttling and timeouts.

    The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

    " + "documentation":"

    Describes the specified instances or all instances.

    If you specify instance IDs, the output includes information for only the specified instances. If you specify filters, the output includes information for only those instances that meet the filter criteria. If you do not specify instance IDs or filters, the output includes information for all instances, which can affect performance. We recommend that you use pagination to ensure that the operation returns quickly and successfully.

    If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the output.

    Recently terminated instances might appear in the returned results. This interval is usually less than one hour.

    If you describe instances in the rare case where an Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected zone, or do not specify any instance IDs at all, the call fails. If you describe instances and specify only instance IDs that are in an unaffected zone, the call works normally.

    The Amazon EC2 API follows an eventual consistency model. This means that the result of an API command you run that creates or modifies resources might not be immediately available to all subsequent commands you run. For guidance on how to manage eventual consistency, see Eventual consistency in the Amazon EC2 API in the Amazon EC2 Developer Guide.

    We strongly recommend using only paginated requests. Unpaginated requests are susceptible to throttling and timeouts.

    The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

    " }, "DescribeInternetGateways":{ "name":"DescribeInternetGateways", @@ -3179,6 +3199,16 @@ "output":{"shape":"DescribeMacHostsResult"}, "documentation":"

    Describes the specified EC2 Mac Dedicated Host or all of your EC2 Mac Dedicated Hosts.

    " }, + "DescribeMacModificationTasks":{ + "name":"DescribeMacModificationTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMacModificationTasksRequest"}, + "output":{"shape":"DescribeMacModificationTasksResult"}, + "documentation":"

    Describes a System Integrity Protection (SIP) modification task or volume ownership delegation task for an Amazon EC2 Mac instance. For more information, see Configure SIP for Amazon EC2 instances in the Amazon EC2 User Guide.

    " + }, "DescribeManagedPrefixLists":{ "name":"DescribeManagedPrefixLists", "http":{ @@ -3297,7 +3327,7 @@ }, "input":{"shape":"DescribeOutpostLagsRequest"}, "output":{"shape":"DescribeOutpostLagsResult"}, - "documentation":"

    Describes the Outposts link aggregation groups (LAGs).

    " + "documentation":"

    Describes the Outposts link aggregation groups (LAGs).

    LAGs are only available for second-generation Outposts racks at this time.

    " }, "DescribePlacementGroups":{ "name":"DescribePlacementGroups", @@ -3617,7 +3647,7 @@ }, "input":{"shape":"DescribeStoreImageTasksRequest"}, "output":{"shape":"DescribeStoreImageTasksResult"}, - "documentation":"

    Describes the progress of the AMI store tasks. You can describe the store tasks for specified AMIs. If you don't specify the AMIs, you get a paginated list of store tasks from the last 31 days.

    For each AMI task, the response indicates if the task is InProgress, Completed, or Failed. For tasks InProgress, the response shows the estimated progress as a percentage.

    Tasks are listed in reverse chronological order. Currently, only tasks from the past 31 days can be viewed.

    To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the Amazon EC2 User Guide.

    For more information, see Store and restore an AMI using Amazon S3 in the Amazon EC2 User Guide.

    " + "documentation":"

    Describes the progress of the AMI store tasks. You can describe the store tasks for specified AMIs. If you don't specify the AMIs, you get a paginated list of store tasks from the last 31 days.

    For each AMI task, the response indicates if the task is InProgress, Completed, or Failed. For tasks InProgress, the response shows the estimated progress as a percentage.

    Tasks are listed in reverse chronological order. Currently, only tasks from the past 31 days can be viewed.

    To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the Amazon EC2 User Guide.

    For more information, see Store and restore an AMI using S3 in the Amazon EC2 User Guide.

    " }, "DescribeSubnets":{ "name":"DescribeSubnets", @@ -4174,7 +4204,7 @@ }, "input":{"shape":"DisableImageBlockPublicAccessRequest"}, "output":{"shape":"DisableImageBlockPublicAccessResult"}, - "documentation":"

    Disables block public access for AMIs at the account level in the specified Amazon Web Services Region. This removes the block public access restriction from your account. With the restriction removed, you can publicly share your AMIs in the specified Amazon Web Services Region.

    The API can take up to 10 minutes to configure this setting. During this time, if you run GetImageBlockPublicAccessState, the response will be block-new-sharing. When the API has completed the configuration, the response will be unblocked.

    For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

    " + "documentation":"

    Disables block public access for AMIs at the account level in the specified Amazon Web Services Region. This removes the block public access restriction from your account. With the restriction removed, you can publicly share your AMIs in the specified Amazon Web Services Region.

    The API can take up to 10 minutes to configure this setting. During this time, if you run GetImageBlockPublicAccessState, the response will be block-new-sharing. When the API has completed the configuration, the response will be unblocked.

    For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

    " }, "DisableImageDeprecation":{ "name":"DisableImageDeprecation", @@ -4184,7 +4214,7 @@ }, "input":{"shape":"DisableImageDeprecationRequest"}, "output":{"shape":"DisableImageDeprecationResult"}, - "documentation":"

    Cancels the deprecation of the specified AMI.

    For more information, see Deprecate an AMI in the Amazon EC2 User Guide.

    " + "documentation":"

    Cancels the deprecation of the specified AMI.

    For more information, see Deprecate an Amazon EC2 AMI in the Amazon EC2 User Guide.

    " }, "DisableImageDeregistrationProtection":{ "name":"DisableImageDeregistrationProtection", @@ -4194,7 +4224,7 @@ }, "input":{"shape":"DisableImageDeregistrationProtectionRequest"}, "output":{"shape":"DisableImageDeregistrationProtectionResult"}, - "documentation":"

    Disables deregistration protection for an AMI. When deregistration protection is disabled, the AMI can be deregistered.

    If you chose to include a 24-hour cooldown period when you enabled deregistration protection for the AMI, then, when you disable deregistration protection, you won’t immediately be able to deregister the AMI.

    For more information, see Protect an AMI from deregistration in the Amazon EC2 User Guide.

    " + "documentation":"

    Disables deregistration protection for an AMI. When deregistration protection is disabled, the AMI can be deregistered.

    If you chose to include a 24-hour cooldown period when you enabled deregistration protection for the AMI, then, when you disable deregistration protection, you won’t immediately be able to deregister the AMI.

    For more information, see Protect an Amazon EC2 AMI from deregistration in the Amazon EC2 User Guide.

    " }, "DisableIpamOrganizationAdminAccount":{ "name":"DisableIpamOrganizationAdminAccount", @@ -4282,7 +4312,7 @@ "requestUri":"/" }, "input":{"shape":"DisassociateAddressRequest"}, - "documentation":"

    Disassociates an Elastic IP address from the instance or network interface it's associated with.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    " + "documentation":"

    Disassociates an Elastic IP address from the instance or network interface it's associated with.

    This is an idempotent operation. If you perform the operation more than once, Amazon EC2 doesn't return an error.

    An address cannot be disassociated if the all of the following conditions are met:

    • Network interface has a publicDualStackDnsName publicDnsName

    • Public IPv4 address is the primary public IPv4 address

    • Network interface only has one remaining public IPv4 address

    " }, "DisassociateCapacityReservationBillingOwner":{ "name":"DisassociateCapacityReservationBillingOwner", @@ -4521,7 +4551,7 @@ }, "input":{"shape":"EnableImageRequest"}, "output":{"shape":"EnableImageResult"}, - "documentation":"

    Re-enables a disabled AMI. The re-enabled AMI is marked as available and can be used for instance launches, appears in describe operations, and can be shared. Amazon Web Services accounts, organizations, and Organizational Units that lost access to the AMI when it was disabled do not regain access automatically. Once the AMI is available, it can be shared with them again.

    Only the AMI owner can re-enable a disabled AMI.

    For more information, see Disable an AMI in the Amazon EC2 User Guide.

    " + "documentation":"

    Re-enables a disabled AMI. The re-enabled AMI is marked as available and can be used for instance launches, appears in describe operations, and can be shared. Amazon Web Services accounts, organizations, and Organizational Units that lost access to the AMI when it was disabled do not regain access automatically. Once the AMI is available, it can be shared with them again.

    Only the AMI owner can re-enable a disabled AMI.

    For more information, see Disable an Amazon EC2 AMI in the Amazon EC2 User Guide.

    " }, "EnableImageBlockPublicAccess":{ "name":"EnableImageBlockPublicAccess", @@ -4531,7 +4561,7 @@ }, "input":{"shape":"EnableImageBlockPublicAccessRequest"}, "output":{"shape":"EnableImageBlockPublicAccessResult"}, - "documentation":"

    Enables block public access for AMIs at the account level in the specified Amazon Web Services Region. This prevents the public sharing of your AMIs. However, if you already have public AMIs, they will remain publicly available.

    The API can take up to 10 minutes to configure this setting. During this time, if you run GetImageBlockPublicAccessState, the response will be unblocked. When the API has completed the configuration, the response will be block-new-sharing.

    For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

    " + "documentation":"

    Enables block public access for AMIs at the account level in the specified Amazon Web Services Region. This prevents the public sharing of your AMIs. However, if you already have public AMIs, they will remain publicly available.

    The API can take up to 10 minutes to configure this setting. During this time, if you run GetImageBlockPublicAccessState, the response will be unblocked. When the API has completed the configuration, the response will be block-new-sharing.

    For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

    " }, "EnableImageDeprecation":{ "name":"EnableImageDeprecation", @@ -4551,7 +4581,7 @@ }, "input":{"shape":"EnableImageDeregistrationProtectionRequest"}, "output":{"shape":"EnableImageDeregistrationProtectionResult"}, - "documentation":"

    Enables deregistration protection for an AMI. When deregistration protection is enabled, the AMI can't be deregistered.

    To allow the AMI to be deregistered, you must first disable deregistration protection using DisableImageDeregistrationProtection.

    For more information, see Protect an AMI from deregistration in the Amazon EC2 User Guide.

    " + "documentation":"

    Enables deregistration protection for an AMI. When deregistration protection is enabled, the AMI can't be deregistered.

    To allow the AMI to be deregistered, you must first disable deregistration protection using DisableImageDeregistrationProtection.

    For more information, see Protect an Amazon EC2 AMI from deregistration in the Amazon EC2 User Guide.

    " }, "EnableIpamOrganizationAdminAccount":{ "name":"EnableIpamOrganizationAdminAccount", @@ -4701,6 +4731,16 @@ "output":{"shape":"ExportVerifiedAccessInstanceClientConfigurationResult"}, "documentation":"

    Exports the client configuration for a Verified Access instance.

    " }, + "GetActiveVpnTunnelStatus":{ + "name":"GetActiveVpnTunnelStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetActiveVpnTunnelStatusRequest"}, + "output":{"shape":"GetActiveVpnTunnelStatusResult"}, + "documentation":"

    Returns the currently negotiated security parameters for an active VPN tunnel, including IKE version, DH groups, encryption algorithms, and integrity algorithms.

    " + }, "GetAllowedImagesSettings":{ "name":"GetAllowedImagesSettings", "http":{ @@ -4859,7 +4899,7 @@ }, "input":{"shape":"GetImageBlockPublicAccessStateRequest"}, "output":{"shape":"GetImageBlockPublicAccessStateResult"}, - "documentation":"

    Gets the current state of block public access for AMIs at the account level in the specified Amazon Web Services Region.

    For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

    " + "documentation":"

    Gets the current state of block public access for AMIs at the account level in the specified Amazon Web Services Region.

    For more information, see Block public access to your AMIs in the Amazon EC2 User Guide.

    " }, "GetInstanceMetadataDefaults":{ "name":"GetInstanceMetadataDefaults", @@ -5535,7 +5575,7 @@ }, "input":{"shape":"ModifyInstanceMaintenanceOptionsRequest"}, "output":{"shape":"ModifyInstanceMaintenanceOptionsResult"}, - "documentation":"

    Modifies the recovery behavior of your instance to disable simplified automatic recovery or set the recovery behavior to default. The default configuration will not enable simplified automatic recovery for an unsupported instance type. For more information, see Simplified automatic recovery.

    " + "documentation":"

    Modifies the recovery behavior of your instance to disable simplified automatic recovery or set the recovery behavior to default. The default configuration will not enable simplified automatic recovery for an unsupported instance type. For more information, see Simplified automatic recovery.

    Modifies the reboot migration behavior during a user-initiated reboot of an instance that has a pending system-reboot event. For more information, see Enable or disable reboot migration.

    " }, "ModifyInstanceMetadataDefaults":{ "name":"ModifyInstanceMetadataDefaults", @@ -5676,6 +5716,16 @@ "output":{"shape":"ModifyPrivateDnsNameOptionsResult"}, "documentation":"

    Modifies the options for instance hostnames for the specified instance.

    " }, + "ModifyPublicIpDnsNameOptions":{ + "name":"ModifyPublicIpDnsNameOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyPublicIpDnsNameOptionsRequest"}, + "output":{"shape":"ModifyPublicIpDnsNameOptionsResult"}, + "documentation":"

    Modify public hostname options for a network interface. For more information, see EC2 instance hostnames, DNS names, and domains in the Amazon EC2 User Guide.

    " + }, "ModifyReservedInstances":{ "name":"ModifyReservedInstances", "http":{ @@ -6179,7 +6229,7 @@ }, "input":{"shape":"RegisterImageRequest"}, "output":{"shape":"RegisterImageResult"}, - "documentation":"

    Registers an AMI. When you're creating an instance-store backed AMI, registering the AMI is the final step in the creation process. For more information about creating AMIs, see Create an AMI from a snapshot and Create an instance-store backed AMI in the Amazon EC2 User Guide.

    For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself. We recommend that you always use CreateImage unless you have a specific reason to use RegisterImage.

    If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

    Register a snapshot of a root device volume

    You can use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using a block device mapping. You can't set the encryption state of the volume using the block device mapping. If the snapshot is encrypted, or encryption by default is enabled, the root volume of an instance launched from the AMI is encrypted.

    For more information, see Create an AMI from a snapshot and Use encryption with Amazon EBS-backed AMIs in the Amazon EC2 User Guide.

    Amazon Web Services Marketplace product codes

    If any snapshots have Amazon Web Services Marketplace product codes, they are copied to the new AMI.

    In most cases, AMIs for Windows, RedHat, SUSE, and SQL Server require correct licensing information to be present on the AMI. For more information, see Understand AMI billing information in the Amazon EC2 User Guide. When creating an AMI from a snapshot, the RegisterImage operation derives the correct billing information from the snapshot's metadata, but this requires the appropriate metadata to be present. To verify if the correct billing information was applied, check the PlatformDetails field on the new AMI. If the field is empty or doesn't match the expected operating system code (for example, Windows, RedHat, SUSE, or SQL), the AMI creation was unsuccessful, and you should discard the AMI and instead create the AMI from an instance using CreateImage. For more information, see Create an AMI from an instance in the Amazon EC2 User Guide.

    If you purchase a Reserved Instance to apply to an On-Demand Instance that was launched from an AMI with a billing product code, make sure that the Reserved Instance has the matching billing product code. If you purchase a Reserved Instance without the matching billing product code, the Reserved Instance will not be applied to the On-Demand Instance. For information about how to obtain the platform details and billing information of an AMI, see Understand AMI billing information in the Amazon EC2 User Guide.

    " + "documentation":"

    Registers an AMI. When you're creating an instance-store backed AMI, registering the AMI is the final step in the creation process. For more information about creating AMIs, see Create an AMI from a snapshot and Create an instance-store backed AMI in the Amazon EC2 User Guide.

    For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single request, so you don't have to register the AMI yourself. We recommend that you always use CreateImage unless you have a specific reason to use RegisterImage.

    If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. If you make changes to an image, deregister the previous image and register the new image.

    Register a snapshot of a root device volume

    You can use RegisterImage to create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify the snapshot using a block device mapping. You can't set the encryption state of the volume using the block device mapping. If the snapshot is encrypted, or encryption by default is enabled, the root volume of an instance launched from the AMI is encrypted.

    For more information, see Create an AMI from a snapshot and Use encryption with EBS-backed AMIs in the Amazon EC2 User Guide.

    Amazon Web Services Marketplace product codes

    If any snapshots have Amazon Web Services Marketplace product codes, they are copied to the new AMI.

    In most cases, AMIs for Windows, RedHat, SUSE, and SQL Server require correct licensing information to be present on the AMI. For more information, see Understand AMI billing information in the Amazon EC2 User Guide. When creating an AMI from a snapshot, the RegisterImage operation derives the correct billing information from the snapshot's metadata, but this requires the appropriate metadata to be present. To verify if the correct billing information was applied, check the PlatformDetails field on the new AMI. If the field is empty or doesn't match the expected operating system code (for example, Windows, RedHat, SUSE, or SQL), the AMI creation was unsuccessful, and you should discard the AMI and instead create the AMI from an instance using CreateImage. For more information, see Create an AMI from an instance in the Amazon EC2 User Guide.

    If you purchase a Reserved Instance to apply to an On-Demand Instance that was launched from an AMI with a billing product code, make sure that the Reserved Instance has the matching billing product code. If you purchase a Reserved Instance without the matching billing product code, the Reserved Instance will not be applied to the On-Demand Instance. For information about how to obtain the platform details and billing information of an AMI, see Understand AMI billing information in the Amazon EC2 User Guide.

    " }, "RegisterInstanceEventNotificationAttributes":{ "name":"RegisterInstanceEventNotificationAttributes", @@ -6491,7 +6541,7 @@ }, "input":{"shape":"RestoreImageFromRecycleBinRequest"}, "output":{"shape":"RestoreImageFromRecycleBinResult"}, - "documentation":"

    Restores an AMI from the Recycle Bin. For more information, see Recycle Bin in the Amazon EC2 User Guide.

    " + "documentation":"

    Restores an AMI from the Recycle Bin. For more information, see Recover deleted Amazon EBS snapshots and EBS-back AMIs with Recycle Bin in the Amazon EC2 User Guide.

    " }, "RestoreManagedPrefixListVersion":{ "name":"RestoreManagedPrefixListVersion", @@ -6620,7 +6670,7 @@ }, "input":{"shape":"StartDeclarativePoliciesReportRequest"}, "output":{"shape":"StartDeclarativePoliciesReportResult"}, - "documentation":"

    Generates an account status report. The report is generated asynchronously, and can take several hours to complete.

    The report provides the current status of all attributes supported by declarative policies for the accounts within the specified scope. The scope is determined by the specified TargetId, which can represent an individual account, or all the accounts that fall under the specified organizational unit (OU) or root (the entire Amazon Web Services Organization).

    The report is saved to your specified S3 bucket, using the following path structure (with the italicized placeholders representing your specific values):

    s3://amzn-s3-demo-bucket/your-optional-s3-prefix/ec2_targetId_reportId_yyyyMMddThhmmZ.csv

    Prerequisites for generating a report

    • The StartDeclarativePoliciesReport API can only be called by the management account or delegated administrators for the organization.

    • An S3 bucket must be available before generating the report (you can create a new one or use an existing one), it must be in the same Region where the report generation request is made, and it must have an appropriate bucket policy. For a sample S3 policy, see Sample Amazon S3 policy under .

    • Trusted access must be enabled for the service for which the declarative policy will enforce a baseline configuration. If you use the Amazon Web Services Organizations console, this is done automatically when you enable declarative policies. The API uses the following service principal to identify the EC2 service: ec2.amazonaws.com. For more information on how to enable trusted access with the Amazon Web Services CLI and Amazon Web Services SDKs, see Using Organizations with other Amazon Web Services services in the Amazon Web Services Organizations User Guide.

    • Only one report per organization can be generated at a time. Attempting to generate a report while another is in progress will result in an error.

    For more information, including the required IAM permissions to run this API, see Generating the account status report for declarative policies in the Amazon Web Services Organizations User Guide.

    " + "documentation":"

    Generates an account status report. The report is generated asynchronously, and can take several hours to complete.

    The report provides the current status of all attributes supported by declarative policies for the accounts within the specified scope. The scope is determined by the specified TargetId, which can represent an individual account, or all the accounts that fall under the specified organizational unit (OU) or root (the entire Amazon Web Services Organization).

    The report is saved to your specified S3 bucket, using the following path structure (with the italicized placeholders representing your specific values):

    s3://amzn-s3-demo-bucket/your-optional-s3-prefix/ec2_targetId_reportId_yyyyMMddThhmmZ.csv

    Prerequisites for generating a report

    • The StartDeclarativePoliciesReport API can only be called by the management account or delegated administrators for the organization.

    • An S3 bucket must be available before generating the report (you can create a new one or use an existing one), it must be in the same Region where the report generation request is made, and it must have an appropriate bucket policy. For a sample S3 policy, see Sample Amazon S3 policy under Examples.

    • Trusted access must be enabled for the service for which the declarative policy will enforce a baseline configuration. If you use the Amazon Web Services Organizations console, this is done automatically when you enable declarative policies. The API uses the following service principal to identify the EC2 service: ec2.amazonaws.com. For more information on how to enable trusted access with the Amazon Web Services CLI and Amazon Web Services SDKs, see Using Organizations with other Amazon Web Services services in the Amazon Web Services Organizations User Guide.

    • Only one report per organization can be generated at a time. Attempting to generate a report while another is in progress will result in an error.

    For more information, including the required IAM permissions to run this API, see Generating the account status report for declarative policies in the Amazon Web Services Organizations User Guide.

    " }, "StartInstances":{ "name":"StartInstances", @@ -6670,7 +6720,7 @@ }, "input":{"shape":"StopInstancesRequest"}, "output":{"shape":"StopInstancesResult"}, - "documentation":"

    Stops an Amazon EBS-backed instance. For more information, see Stop and start Amazon EC2 instances in the Amazon EC2 User Guide.

    When you stop an instance, we shut it down. You can restart your instance at any time.

    You can use the Stop operation together with the Hibernate parameter to hibernate an instance if the instance is enabled for hibernation and meets the hibernation prerequisites. Stopping an instance doesn't preserve data stored in RAM, while hibernation does. If hibernation fails, a normal shutdown occurs. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide.

    If your instance appears stuck in the stopping state, there might be an issue with the underlying host computer. You can use the Stop operation together with the Force parameter to force stop your instance. For more information, see Troubleshoot Amazon EC2 instance stop issues in the Amazon EC2 User Guide.

    Stopping and hibernating an instance differs from rebooting or terminating it. For example, a stopped or hibernated instance retains its root volume and any data volumes, unlike terminated instances where these volumes are automatically deleted. For more information about the differences between stopping, hibernating, rebooting, and terminating instances, see Amazon EC2 instance state changes in the Amazon EC2 User Guide.

    We don't charge for instance usage or data transfer fees when an instance is stopped. However, the root volume and any data volumes remain and continue to persist your data, and you're charged for volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, followed by per-second billing.

    You can't stop or hibernate instance store-backed instances.

    " + "documentation":"

    Stops an Amazon EBS-backed instance. You can restart your instance at any time using the StartInstances API. For more information, see Stop and start Amazon EC2 instances in the Amazon EC2 User Guide.

    When you stop an instance, we shut it down.

    You can use the Stop operation together with the Hibernate parameter to hibernate an instance if the instance is enabled for hibernation and meets the hibernation prerequisites. Stopping an instance doesn't preserve data stored in RAM, while hibernation does. If hibernation fails, a normal shutdown occurs. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide.

    If your instance appears stuck in the stopping state, there might be an issue with the underlying host computer. You can use the Stop operation together with the Force parameter to force stop your instance. For more information, see Troubleshoot Amazon EC2 instance stop issues in the Amazon EC2 User Guide.

    Stopping and hibernating an instance differs from rebooting or terminating it. For example, a stopped or hibernated instance retains its root volume and any data volumes, unlike terminated instances where these volumes are automatically deleted. For more information about the differences between stopping, hibernating, rebooting, and terminating instances, see Amazon EC2 instance state changes in the Amazon EC2 User Guide.

    We don't charge for instance usage or data transfer fees when an instance is stopped. However, the root volume and any data volumes remain and continue to persist your data, and you're charged for volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, followed by per-second billing.

    You can't stop or hibernate instance store-backed instances.

    " }, "TerminateClientVpnConnections":{ "name":"TerminateClientVpnConnections", @@ -6690,7 +6740,7 @@ }, "input":{"shape":"TerminateInstancesRequest"}, "output":{"shape":"TerminateInstancesResult"}, - "documentation":"

    Shuts down the specified instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

    If you specify multiple instances and the request fails (for example, because of a single incorrect instance ID), none of the instances are terminated.

    If you terminate multiple instances across multiple Availability Zones, and one or more of the specified instances are enabled for termination protection, the request fails with the following results:

    • The specified instances that are in the same Availability Zone as the protected instance are not terminated.

    • The specified instances that are in different Availability Zones, where no other specified instances are protected, are successfully terminated.

    For example, say you have the following instances:

    • Instance A: us-east-1a; Not protected

    • Instance B: us-east-1a; Not protected

    • Instance C: us-east-1b; Protected

    • Instance D: us-east-1b; not protected

    If you attempt to terminate all of these instances in the same request, the request reports failure with the following results:

    • Instance A and Instance B are successfully terminated because none of the specified instances in us-east-1a are enabled for termination protection.

    • Instance C and Instance D fail to terminate because at least one of the specified instances in us-east-1b (Instance C) is enabled for termination protection.

    Terminated instances remain visible after termination (for approximately one hour).

    By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

    You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Amazon EC2 instance state changes in the Amazon EC2 User Guide.

    For information about troubleshooting, see Troubleshooting terminating your instance in the Amazon EC2 User Guide.

    " + "documentation":"

    Shuts down the specified instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds.

    If you specify multiple instances and the request fails (for example, because of a single incorrect instance ID), none of the instances are terminated.

    If you terminate multiple instances across multiple Availability Zones, and one or more of the specified instances are enabled for termination protection, the request fails with the following results:

    • The specified instances that are in the same Availability Zone as the protected instance are not terminated.

    • The specified instances that are in different Availability Zones, where no other specified instances are protected, are successfully terminated.

    For example, say you have the following instances:

    • Instance A: us-east-1a; Not protected

    • Instance B: us-east-1a; Not protected

    • Instance C: us-east-1b; Protected

    • Instance D: us-east-1b; not protected

    If you attempt to terminate all of these instances in the same request, the request reports failure with the following results:

    • Instance A and Instance B are successfully terminated because none of the specified instances in us-east-1a are enabled for termination protection.

    • Instance C and Instance D fail to terminate because at least one of the specified instances in us-east-1b (Instance C) is enabled for termination protection.

    Terminated instances remain visible after termination (for approximately one hour).

    By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running.

    You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide.

    For more information about troubleshooting, see Troubleshooting terminating your instance in the Amazon EC2 User Guide.

    " }, "UnassignIpv6Addresses":{ "name":"UnassignIpv6Addresses", @@ -7292,6 +7342,57 @@ "locationName":"item" } }, + "ActiveVpnTunnelStatus":{ + "type":"structure", + "members":{ + "Phase1EncryptionAlgorithm":{ + "shape":"String", + "documentation":"

    The encryption algorithm negotiated in Phase 1 IKE negotiations.

    ", + "locationName":"phase1EncryptionAlgorithm" + }, + "Phase2EncryptionAlgorithm":{ + "shape":"String", + "documentation":"

    The encryption algorithm negotiated in Phase 2 IKE negotiations.

    ", + "locationName":"phase2EncryptionAlgorithm" + }, + "Phase1IntegrityAlgorithm":{ + "shape":"String", + "documentation":"

    The integrity algorithm negotiated in Phase 1 IKE negotiations.

    ", + "locationName":"phase1IntegrityAlgorithm" + }, + "Phase2IntegrityAlgorithm":{ + "shape":"String", + "documentation":"

    The integrity algorithm negotiated in Phase 2 IKE negotiations.

    ", + "locationName":"phase2IntegrityAlgorithm" + }, + "Phase1DHGroup":{ + "shape":"Integer", + "documentation":"

    The Diffie-Hellman group number being used in Phase 1 IKE negotiations.

    ", + "locationName":"phase1DHGroup" + }, + "Phase2DHGroup":{ + "shape":"Integer", + "documentation":"

    The Diffie-Hellman group number being used in Phase 2 IKE negotiations.

    ", + "locationName":"phase2DHGroup" + }, + "IkeVersion":{ + "shape":"String", + "documentation":"

    The version of the Internet Key Exchange (IKE) protocol being used.

    ", + "locationName":"ikeVersion" + }, + "ProvisioningStatus":{ + "shape":"VpnTunnelProvisioningStatus", + "documentation":"

    The current provisioning status of the VPN tunnel.

    ", + "locationName":"provisioningStatus" + }, + "ProvisioningStatusReason":{ + "shape":"String", + "documentation":"

    The reason for the current provisioning status.

    ", + "locationName":"provisioningStatusReason" + } + }, + "documentation":"

    Contains information about the current security configuration of an active VPN tunnel.

    " + }, "ActivityStatus":{ "type":"string", "enum":[ @@ -7503,6 +7604,11 @@ "documentation":"

    The carrier IP address associated. This option is only available for network interfaces which reside in a subnet in a Wavelength Zone (for example an EC2 instance).

    ", "locationName":"carrierIp" }, + "SubnetId":{ + "shape":"String", + "documentation":"

    The ID of the subnet where the IP address is allocated.

    ", + "locationName":"subnetId" + }, "ServiceManaged":{ "shape":"ServiceManaged", "documentation":"

    The service that manages the elastic IP address.

    The only option supported today is alb.

    ", @@ -7753,7 +7859,6 @@ }, "AllocateHostsRequest":{ "type":"structure", - "required":["AvailabilityZone"], "members":{ "InstanceFamily":{ "shape":"String", @@ -7781,6 +7886,10 @@ "documentation":"

    The IDs of the Outpost hardware assets on which to allocate the Dedicated Hosts. Targeting specific hardware assets on an Outpost can help to minimize latency between your workloads. This parameter is supported only if you specify OutpostArn. If you are allocating the Dedicated Hosts in a Region, omit this parameter.

    • If you specify this parameter, you can omit Quantity. In this case, Amazon EC2 allocates a Dedicated Host on each specified hardware asset.

    • If you specify both AssetIds and Quantity, then the value for Quantity must be equal to the number of asset IDs specified.

    ", "locationName":"AssetId" }, + "AvailabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

    The ID of the Availability Zone.

    " + }, "AutoPlacement":{ "shape":"AutoPlacement", "documentation":"

    Indicates whether the host accepts any untargeted instance launches that match its instance type configuration, or if it only accepts Host tenancy instance launches that specify its unique host ID. For more information, see Understanding auto-placement and affinity in the Amazon EC2 User Guide.

    Default: off

    ", @@ -8111,6 +8220,11 @@ "documentation":"

    The Availability Zone.

    ", "locationName":"availabilityZone" }, + "AvailabilityZoneId":{ + "shape":"String", + "documentation":"

    The ID of the Availability Zone.

    ", + "locationName":"availabilityZoneId" + }, "Instance":{ "shape":"AnalysisComponent", "documentation":"

    Information about the instance.

    ", @@ -9356,6 +9470,13 @@ "locationName":"item" } }, + "AssociatedSubnetList":{ + "type":"list", + "member":{ + "shape":"SubnetId", + "locationName":"item" + } + }, "AssociatedTargetNetwork":{ "type":"structure", "members":{ @@ -11686,6 +11807,11 @@ "shape":"CapacityReservationTenancy", "documentation":"

    The tenancy of the Capacity Reservation.

    ", "locationName":"tenancy" + }, + "AvailabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

    The ID of the Availability Zone.

    ", + "locationName":"availabilityZoneId" } }, "documentation":"

    Information about a Capacity Reservation.

    " @@ -13153,7 +13279,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see Ensuring idempotency in the Amazon EC2 API Reference.

    ", + "documentation":"

    Unique, case-sensitive identifier you provide to ensure idempotency of the request. For more information, see Ensuring idempotency in Amazon EC2 API requests in the Amazon EC2 API Reference.

    ", "idempotencyToken":true }, "Description":{ @@ -13197,7 +13323,7 @@ }, "SnapshotCopyCompletionDurationMinutes":{ "shape":"Long", - "documentation":"

    Specify a completion duration, in 15 minute increments, to initiate a time-based AMI copy. The specified completion duration applies to each of the snapshots associated with the AMI. Each snapshot associated with the AMI will be completed within the specified completion duration, regardless of their size.

    If you do not specify a value, the AMI copy operation is completed on a best-effort basis.

    For more information, see Time-based copies.

    " + "documentation":"

    Specify a completion duration, in 15 minute increments, to initiate a time-based AMI copy. The specified completion duration applies to each of the snapshots associated with the AMI. Each snapshot associated with the AMI will be completed within the specified completion duration, with copy throughput automatically adjusted for each snapshot based on its size to meet the timing target.

    If you do not specify a value, the AMI copy operation is completed on a best-effort basis.

    For more information, see Time-based copies for Amazon EBS snapshots and EBS-backed AMIs.

    " }, "DryRun":{ "shape":"Boolean", @@ -13999,6 +14125,47 @@ } } }, + "CreateDelegateMacVolumeOwnershipTaskRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "MacCredentials" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "documentation":"

    Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

    ", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    " + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

    The ID of the Amazon EC2 Mac instance.

    " + }, + "MacCredentials":{ + "shape":"SensitiveMacCredentials", + "documentation":"

    Specifies the following credentials:

    • Internal disk administrative user

      • Username - Only the default administrative user (aws-managed-user) is supported and it is used by default. You can't specify a different administrative user.

      • Password - If you did not change the default password for aws-managed-user, specify the default password, which is blank. Otherwise, specify your password.

    • Amazon EBS root volume administrative user

      • Username - If you did not change the default administrative user, specify ec2-user. Otherwise, specify the username for your administrative user.

      • Password - Specify the password for the administrative user.

    The credentials must be specified in the following JSON format:

    { \"internalDiskPassword\":\"internal-disk-admin_password\", \"rootVolumeUsername\":\"root-volume-admin_username\", \"rootVolumepassword\":\"root-volume-admin_password\" }

    " + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

    The tags to assign to the volume ownership delegation task.

    ", + "locationName":"TagSpecification" + } + } + }, + "CreateDelegateMacVolumeOwnershipTaskResult":{ + "type":"structure", + "members":{ + "MacModificationTask":{ + "shape":"MacModificationTask", + "documentation":"

    Information about the volume ownership delegation task.

    ", + "locationName":"macModificationTask" + } + } + }, "CreateDhcpOptionsRequest":{ "type":"structure", "required":["DhcpConfigurations"], @@ -14375,6 +14542,10 @@ "documentation":"

    The tags to apply to the AMI and snapshots on creation. You can tag the AMI, the snapshots, or both.

    • To tag the AMI, the value for ResourceType must be image.

    • To tag the snapshots that are created of the root volume and of other Amazon EBS volumes that are attached to the instance, the value for ResourceType must be snapshot. The same tag is applied to all of the snapshots that are created.

    If you specify other values for ResourceType, the request fails.

    To tag an AMI or snapshot after it has been created, see CreateTags.

    ", "locationName":"TagSpecification" }, + "SnapshotLocation":{ + "shape":"SnapshotLocationEnum", + "documentation":"

    Only supported for instances in Local Zones. If the source instance is not in a Local Zone, omit this parameter.

    The Amazon S3 location where the snapshots will be stored.

    • To create local snapshots in the same Local Zone as the source instance, specify local.

    • To create regional snapshots in the parent Region of the Local Zone, specify regional or omit this parameter.

    Default: regional

    " + }, "DryRun":{ "shape":"Boolean", "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", @@ -15199,6 +15370,55 @@ } } }, + "CreateMacSystemIntegrityProtectionModificationTaskRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "MacSystemIntegrityProtectionStatus" + ], + "members":{ + "ClientToken":{ + "shape":"String", + "documentation":"

    Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

    ", + "idempotencyToken":true + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    " + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

    The ID of the Amazon EC2 Mac instance.

    " + }, + "MacCredentials":{ + "shape":"SensitiveMacCredentials", + "documentation":"

    [Apple silicon Mac instances only] Specifies the following credentials:

    • Internal disk administrative user

      • Username - Only the default administrative user (aws-managed-user) is supported and it is used by default. You can't specify a different administrative user.

      • Password - If you did not change the default password for aws-managed-user, specify the default password, which is blank. Otherwise, specify your password.

    • Amazon EBS root volume administrative user

      • Username - If you did not change the default administrative user, specify ec2-user. Otherwise, specify the username for your administrative user.

      • Password - Specify the password for the administrative user.

    The credentials must be specified in the following JSON format:

    { \"internalDiskPassword\":\"internal-disk-admin_password\", \"rootVolumeUsername\":\"root-volume-admin_username\", \"rootVolumepassword\":\"root-volume-admin_password\" }

    " + }, + "MacSystemIntegrityProtectionConfiguration":{ + "shape":"MacSystemIntegrityProtectionConfigurationRequest", + "documentation":"

    Specifies the overrides to selectively enable or disable individual SIP settings. The individual settings you specify here override the overall SIP status you specify for MacSystemIntegrityProtectionStatus.

    " + }, + "MacSystemIntegrityProtectionStatus":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Specifies the overall SIP status for the instance. To enable all SIP settings, specify enabled. To disable all SIP settings, specify disabled.

    " + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

    Specifies tags to apply to the SIP modification task.

    ", + "locationName":"TagSpecification" + } + } + }, + "CreateMacSystemIntegrityProtectionModificationTaskResult":{ + "type":"structure", + "members":{ + "MacModificationTask":{ + "shape":"MacModificationTask", + "documentation":"

    Information about the SIP modification task.

    ", + "locationName":"macModificationTask" + } + } + }, "CreateManagedPrefixListRequest":{ "type":"structure", "required":[ @@ -15912,6 +16132,10 @@ "shape":"CoreNetworkArn", "documentation":"

    The Amazon Resource Name (ARN) of the core network.

    " }, + "OdbNetworkArn":{ + "shape":"OdbNetworkArn", + "documentation":"

    The Amazon Resource Name (ARN) of the ODB network.

    " + }, "DryRun":{ "shape":"Boolean", "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", @@ -16258,7 +16482,7 @@ }, "Location":{ "shape":"SnapshotLocationEnum", - "documentation":"

    Only supported for instances in Local Zones. If the source instance is not in a Local Zone, omit this parameter.

    • To create local snapshots in the same Local Zone as the source instance, specify local.

    • To create a regional snapshots in the parent Region of the Local Zone, specify regional or omit this parameter.

    Default value: regional

    " + "documentation":"

    Only supported for instances in Local Zones. If the source instance is not in a Local Zone, omit this parameter.

    • To create local snapshots in the same Local Zone as the source instance, specify local.

    • To create regional snapshots in the parent Region of the Local Zone, specify regional or omit this parameter.

    Default value: regional

    " } } }, @@ -18151,6 +18375,10 @@ "documentation":"

    The tags to apply to the VPN connection.

    ", "locationName":"TagSpecification" }, + "PreSharedKeyStorage":{ + "shape":"String", + "documentation":"

    Specifies the storage mode for the pre-shared key (PSK). Valid values are Standard\" (stored in the Site-to-Site VPN service) or SecretsManager (stored in Amazon Web Services Secrets Manager).

    " + }, "DryRun":{ "shape":"Boolean", "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", @@ -19907,6 +20135,29 @@ } } }, + "DeleteSnapshotResultSet":{ + "type":"list", + "member":{ + "shape":"DeleteSnapshotReturnCode", + "locationName":"item" + } + }, + "DeleteSnapshotReturnCode":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"SnapshotId", + "documentation":"

    The ID of the snapshot.

    ", + "locationName":"snapshotId" + }, + "ReturnCode":{ + "shape":"SnapshotReturnCodes", + "documentation":"

    The result code from the snapshot deletion attempt. Possible values:

    • success - The snapshot was successfully deleted.

    • skipped - The snapshot was not deleted because it's associated with other AMIs.

    • missing-permissions - The snapshot was not deleted because the role lacks DeleteSnapshot permissions. For more information, see How Amazon EBS works with IAM.

    • internal-error - The snapshot was not deleted due to a server error.

    • client-error - The snapshot was not deleted due to a client configuration error.

    For details about an error, check the DeleteSnapshot event in the CloudTrail event history. For more information, see View event history in the Amazon Web Services CloudTrail User Guide.

    ", + "locationName":"returnCode" + } + }, + "documentation":"

    The snapshot ID and its deletion result code.

    " + }, "DeleteSpotDatafeedSubscriptionRequest":{ "type":"structure", "members":{ @@ -20773,7 +21024,7 @@ }, "Cidr":{ "shape":"String", - "documentation":"

    The CIDR you want to deprovision from the pool. Enter the CIDR you want to deprovision with a netmask of /32. You must rerun this command for each IP address in the CIDR range. If your CIDR is a /24, you will have to run this command to deprovision each of the 256 IP addresses in the /24 CIDR.

    " + "documentation":"

    The CIDR you want to deprovision from the pool.

    " } } }, @@ -20807,6 +21058,10 @@ "shape":"ImageId", "documentation":"

    The ID of the AMI.

    " }, + "DeleteAssociatedSnapshots":{ + "shape":"Boolean", + "documentation":"

    Specifies whether to delete the snapshots associated with the AMI during deregistration.

    If a snapshot is associated with multiple AMIs, it is not deleted, regardless of this setting.

    Default: The snapshots are not deleted.

    " + }, "DryRun":{ "shape":"Boolean", "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", @@ -20818,7 +21073,17 @@ "DeregisterImageResult":{ "type":"structure", "members":{ -} + "Return":{ + "shape":"Boolean", + "documentation":"

    Returns true if the request succeeds; otherwise, it returns an error.

    ", + "locationName":"return" + }, + "DeleteSnapshotResults":{ + "shape":"DeleteSnapshotResultSet", + "documentation":"

    The deletion result for each snapshot associated with the AMI, including the snapshot ID and its success or error code.

    ", + "locationName":"deleteSnapshotResultSet" + } + } }, "DeregisterInstanceEventNotificationAttributesRequest":{ "type":"structure", @@ -23551,7 +23816,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

    One or more filters. Filter names and values are case-sensitive.

    • auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false).

    • bare-metal - Indicates whether it is a bare metal instance type (true | false).

    • burstable-performance-supported - Indicates whether the instance type is a burstable performance T instance type (true | false).

    • current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false).

    • dedicated-hosts-supported - Indicates whether the instance type supports Dedicated Hosts. (true | false)

    • ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps.

    • ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type.

    • ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s.

    • ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps.

    • ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type.

    • ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s.

    • ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default).

    • ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported).

    • ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported).

    • free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false).

    • hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false).

    • hypervisor - The hypervisor (nitro | xen).

    • instance-storage-info.disk.count - The number of local disks.

    • instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB.

    • instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd).

    • instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported).

    • instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported).

    • instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB.

    • instance-storage-supported - Indicates whether the instance type has local instance storage (true | false).

    • instance-type - The instance type (for example c5.2xlarge or c5*).

    • memory-info.size-in-mib - The memory size.

    • network-info.bandwidth-weightings - For instances that support bandwidth weighting to boost performance (default, vpc-1, ebs-1).

    • network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance.

    • network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false).

    • network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported).

    • network-info.flexible-ena-queues-support - Indicates whether an instance supports flexible ENA queues (supported | unsupported).

    • network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false).

    • network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface.

    • network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface.

    • network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false).

    • network-info.maximum-network-cards - The maximum number of network cards per instance.

    • network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

    • network-info.network-performance - The network performance (for example, \"25 Gigabit\").

    • nitro-enclaves-support - Indicates whether Nitro Enclaves is supported (supported | unsupported).

    • nitro-tpm-support - Indicates whether NitroTPM is supported (supported | unsupported).

    • nitro-tpm-info.supported-versions - The supported NitroTPM version (2.0).

    • processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64).

    • processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

    • processor-info.supported-features - The supported CPU features (amd-sev-snp).

    • supported-boot-mode - The boot mode (legacy-bios | uefi).

    • supported-root-device-type - The root device type (ebs | instance-store).

    • supported-usage-class - The usage class (on-demand | spot | capacity-block).

    • supported-virtualization-type - The virtualization type (hvm | paravirtual).

    • vcpu-info.default-cores - The default number of cores for the instance type.

    • vcpu-info.default-threads-per-core - The default number of threads per core for the instance type.

    • vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

    • vcpu-info.valid-cores - The number of cores that can be configured for the instance type.

    • vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, \"1\" or \"1,2\".

    ", + "documentation":"

    One or more filters. Filter names and values are case-sensitive.

    • auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false).

    • bare-metal - Indicates whether it is a bare metal instance type (true | false).

    • burstable-performance-supported - Indicates whether the instance type is a burstable performance T instance type (true | false).

    • current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false).

    • dedicated-hosts-supported - Indicates whether the instance type supports Dedicated Hosts. (true | false)

    • ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps.

    • ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type.

    • ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s.

    • ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps.

    • ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type.

    • ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s.

    • ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default).

    • ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported).

    • ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported).

    • free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false).

    • hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false).

    • hypervisor - The hypervisor (nitro | xen).

    • instance-storage-info.disk.count - The number of local disks.

    • instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB.

    • instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd).

    • instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported).

    • instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported).

    • instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB.

    • instance-storage-supported - Indicates whether the instance type has local instance storage (true | false).

    • instance-type - The instance type (for example c5.2xlarge or c5*).

    • memory-info.size-in-mib - The memory size.

    • network-info.bandwidth-weightings - For instances that support bandwidth weighting to boost performance (default, vpc-1, ebs-1).

    • network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance.

    • network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false).

    • network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported).

    • network-info.flexible-ena-queues-support - Indicates whether an instance supports flexible ENA queues (supported | unsupported).

    • network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false).

    • network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface.

    • network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface.

    • network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false).

    • network-info.maximum-network-cards - The maximum number of network cards per instance.

    • network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

    • network-info.network-performance - The network performance (for example, \"25 Gigabit\").

    • nitro-enclaves-support - Indicates whether Nitro Enclaves is supported (supported | unsupported).

    • nitro-tpm-support - Indicates whether NitroTPM is supported (supported | unsupported).

    • nitro-tpm-info.supported-versions - The supported NitroTPM version (2.0).

    • processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64).

    • processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

    • processor-info.supported-features - The supported CPU features (amd-sev-snp).

    • reboot-migration-support - Indicates whether enabling reboot migration is supported (supported | unsupported).

    • supported-boot-mode - The boot mode (legacy-bios | uefi).

    • supported-root-device-type - The root device type (ebs | instance-store).

    • supported-usage-class - The usage class (on-demand | spot | capacity-block).

    • supported-virtualization-type - The virtualization type (hvm | paravirtual).

    • vcpu-info.default-cores - The default number of cores for the instance type.

    • vcpu-info.default-threads-per-core - The default number of threads per core for the instance type.

    • vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

    • vcpu-info.valid-cores - The number of cores that can be configured for the instance type.

    • vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, \"1\" or \"1,2\".

    ", "locationName":"Filter" }, "MaxResults":{ @@ -24498,6 +24763,53 @@ } } }, + "DescribeMacModificationTasksMaxResults":{ + "type":"integer", + "max":500, + "min":1 + }, + "DescribeMacModificationTasksRequest":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"Boolean", + "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    " + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

    Specifies one or more filters for the request:

    • instance-id - The ID of the instance for which the task was created.

    • task-state - The state of the task (successful | failed | in-progress | pending).

    • mac-system-integrity-protection-configuration.sip-status - The overall SIP state requested in the task (enabled | disabled).

    • start-time - The date and time the task was created.

    • task-type - The type of task (sip-modification | volume-ownership-delegation).

    ", + "locationName":"Filter" + }, + "MacModificationTaskIds":{ + "shape":"MacModificationTaskIdList", + "documentation":"

    The ID of task.

    ", + "locationName":"MacModificationTaskId" + }, + "MaxResults":{ + "shape":"DescribeMacModificationTasksMaxResults", + "documentation":"

    The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

    " + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token to use to retrieve the next page of results.

    " + } + } + }, + "DescribeMacModificationTasksResult":{ + "type":"structure", + "members":{ + "MacModificationTasks":{ + "shape":"MacModificationTaskList", + "documentation":"

    Information about the tasks.

    ", + "locationName":"macModificationTaskSet" + }, + "NextToken":{ + "shape":"String", + "documentation":"

    The token to use to retrieve the next page of results. This value is null when there are no more results to return.

    ", + "locationName":"nextToken" + } + } + }, "DescribeManagedPrefixListsRequest":{ "type":"structure", "members":{ @@ -25007,7 +25319,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

    One or more filters.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

    • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).

    • addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface.

    • addresses.private-ip-address - The private IPv4 addresses associated with the network interface.

    • association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

    • association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

    • association.public-dns-name - The public DNS name for the network interface (IPv4).

    • attachment.attach-time - The time that the network interface was attached to an instance.

    • attachment.attachment-id - The ID of the interface attachment.

    • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    • attachment.device-index - The device index to which the network interface is attached.

    • attachment.instance-id - The ID of the instance to which the network interface is attached.

    • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • availability-zone - The Availability Zone of the network interface.

    • description - The description of the network interface.

    • group-id - The ID of a security group associated with the network interface.

    • ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface.

    • interface-type - The type of network interface (api_gateway_managed | aws_codestar_connections_managed | branch | ec2_instance_connect_endpoint | efa | efa-only | efs | gateway_load_balancer | gateway_load_balancer_endpoint | global_accelerator_managed | interface | iot_rules_managed | lambda | load_balancer | nat_gateway | network_load_balancer | quicksight | transit_gateway | trunk | vpc_endpoint).

    • mac-address - The MAC address of the network interface.

    • network-interface-id - The ID of the network interface.

    • operator.managed - A Boolean that indicates whether this is a managed network interface.

    • operator.principal - The principal that manages the network interface. Only valid for managed network interfaces, where managed is true.

    • owner-id - The Amazon Web Services account ID of the network interface owner.

    • private-dns-name - The private DNS name of the network interface (IPv4).

    • private-ip-address - The private IPv4 address or addresses of the network interface.

    • requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

    • requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services service (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    • source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

    • subnet-id - The ID of the subnet for the network interface.

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    • vpc-id - The ID of the VPC for the network interface.

    ", + "documentation":"

    One or more filters.

    • association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface.

    • association.association-id - The association ID returned when the network interface was associated with an IPv4 address.

    • addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    • addresses.association.public-ip - The association ID returned when the network interface was associated with the Elastic IP address (IPv4).

    • addresses.primary - Whether the private IPv4 address is the primary IP address associated with the network interface.

    • addresses.private-ip-address - The private IPv4 addresses associated with the network interface.

    • association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface.

    • association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface.

    • association.public-dns-name - The public DNS name for the network interface (IPv4).

    • attachment.attach-time - The time that the network interface was attached to an instance.

    • attachment.attachment-id - The ID of the interface attachment.

    • attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    • attachment.device-index - The device index to which the network interface is attached.

    • attachment.instance-id - The ID of the instance to which the network interface is attached.

    • attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    • attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    • availability-zone - The Availability Zone of the network interface.

    • description - The description of the network interface.

    • group-id - The ID of a security group associated with the network interface.

    • ipv6-addresses.ipv6-address - An IPv6 address associated with the network interface.

    • interface-type - The type of network interface (api_gateway_managed | aws_codestar_connections_managed | branch | ec2_instance_connect_endpoint | efa | efa-only | efs | evs | gateway_load_balancer | gateway_load_balancer_endpoint | global_accelerator_managed | interface | iot_rules_managed | lambda | load_balancer | nat_gateway | network_load_balancer | quicksight | transit_gateway | trunk | vpc_endpoint).

    • mac-address - The MAC address of the network interface.

    • network-interface-id - The ID of the network interface.

    • operator.managed - A Boolean that indicates whether this is a managed network interface.

    • operator.principal - The principal that manages the network interface. Only valid for managed network interfaces, where managed is true.

    • owner-id - The Amazon Web Services account ID of the network interface owner.

    • private-dns-name - The private DNS name of the network interface (IPv4).

    • private-ip-address - The private IPv4 address or addresses of the network interface.

    • requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

    • requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services service (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    • source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    • status - The status of the network interface. If the network interface is not attached to an instance, the status is available; if a network interface is attached to an instance the status is in-use.

    • subnet-id - The ID of the subnet for the network interface.

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    • vpc-id - The ID of the VPC for the network interface.

    ", "locationName":"filter" } }, @@ -25355,7 +25667,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

    One or more filters.

    • client-token - The idempotency token for the modification request.

    • create-date - The time when the modification request was created.

    • effective-date - The time when the modification becomes effective.

    • modification-result.reserved-instances-id - The ID for the Reserved Instances created as part of the modification request. This ID is only available when the status of the modification is fulfilled.

    • modification-result.target-configuration.availability-zone - The Availability Zone for the new Reserved Instances.

    • modification-result.target-configuration.instance-count - The number of new Reserved Instances.

    • modification-result.target-configuration.instance-type - The instance type of the new Reserved Instances.

    • reserved-instances-id - The ID of the Reserved Instances modified.

    • reserved-instances-modification-id - The ID of the modification request.

    • status - The status of the Reserved Instances modification request (processing | fulfilled | failed).

    • status-message - The reason for the status.

    • update-date - The time when the modification request was last updated.

    ", + "documentation":"

    One or more filters.

    • client-token - The idempotency token for the modification request.

    • create-date - The time when the modification request was created.

    • effective-date - The time when the modification becomes effective.

    • modification-result.reserved-instances-id - The ID for the Reserved Instances created as part of the modification request. This ID is only available when the status of the modification is fulfilled.

    • modification-result.target-configuration.availability-zone - The Availability Zone for the new Reserved Instances.

    • modification-result.target-configuration.availability-zone-id - The ID of the Availability Zone for the new Reserved Instances.

    • modification-result.target-configuration.instance-count - The number of new Reserved Instances.

    • modification-result.target-configuration.instance-type - The instance type of the new Reserved Instances.

    • reserved-instances-id - The ID of the Reserved Instances modified.

    • reserved-instances-modification-id - The ID of the modification request.

    • status - The status of the Reserved Instances modification request (processing | fulfilled | failed).

    • status-message - The reason for the status.

    • update-date - The time when the modification request was last updated.

    ", "locationName":"Filter" } }, @@ -25382,7 +25694,7 @@ "members":{ "AvailabilityZone":{ "shape":"String", - "documentation":"

    The Availability Zone in which the Reserved Instance can be used.

    " + "documentation":"

    The Availability Zone in which the Reserved Instance can be used.

    Either AvailabilityZone or AvailabilityZoneId can be specified, but not both.

    " }, "IncludeMarketplace":{ "shape":"Boolean", @@ -25417,6 +25729,10 @@ "documentation":"

    One or more Reserved Instances offering IDs.

    ", "locationName":"ReservedInstancesOfferingId" }, + "AvailabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

    The ID of the Availability Zone.

    Either AvailabilityZone or AvailabilityZoneId can be specified, but not both.

    " + }, "DryRun":{ "shape":"Boolean", "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", @@ -25424,7 +25740,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (for example, one year or three years), in seconds (31536000 | 94608000).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type that is covered by the reservation.

    • marketplace - Set to true to show only Reserved Instance Marketplace offerings. When this filter is not used, which is the default behavior, all offerings from both Amazon Web Services and the Reserved Instance Marketplace are listed.

    • product-description - The Reserved Instance product platform description (Linux/UNIX | Linux with SQL Server Standard | Linux with SQL Server Web | Linux with SQL Server Enterprise | SUSE Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Windows | Windows with SQL Server Standard | Windows with SQL Server Web | Windows with SQL Server Enterprise).

    • reserved-instances-offering-id - The Reserved Instances offering ID.

    • scope - The scope of the Reserved Instance (Availability Zone or Region).

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", + "documentation":"

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • availability-zone-id - The ID of the Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (for example, one year or three years), in seconds (31536000 | 94608000).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type that is covered by the reservation.

    • marketplace - Set to true to show only Reserved Instance Marketplace offerings. When this filter is not used, which is the default behavior, all offerings from both Amazon Web Services and the Reserved Instance Marketplace are listed.

    • product-description - The Reserved Instance product platform description (Linux/UNIX | Linux with SQL Server Standard | Linux with SQL Server Web | Linux with SQL Server Enterprise | SUSE Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Windows | Windows with SQL Server Standard | Windows with SQL Server Web | Windows with SQL Server Enterprise).

    • reserved-instances-offering-id - The Reserved Instances offering ID.

    • scope - The scope of the Reserved Instance (Availability Zone or Region).

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", "locationName":"Filter" }, "InstanceTenancy":{ @@ -25485,7 +25801,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (one year or three years), in seconds (31536000 | 94608000).

    • end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type that is covered by the reservation.

    • scope - The scope of the Reserved Instance (Region or Availability Zone).

    • product-description - The Reserved Instance product platform description (Linux/UNIX | Linux with SQL Server Standard | Linux with SQL Server Web | Linux with SQL Server Enterprise | SUSE Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Windows | Windows with SQL Server Standard | Windows with SQL Server Web | Windows with SQL Server Enterprise).

    • reserved-instances-id - The ID of the Reserved Instance.

    • start - The time at which the Reserved Instance purchase request was placed (for example, 2014-08-07T11:54:42.000Z).

    • state - The state of the Reserved Instance (payment-pending | active | payment-failed | retired).

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", + "documentation":"

    One or more filters.

    • availability-zone - The Availability Zone where the Reserved Instance can be used.

    • availability-zone-id - The ID of the Availability Zone where the Reserved Instance can be used.

    • duration - The duration of the Reserved Instance (one year or three years), in seconds (31536000 | 94608000).

    • end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z).

    • fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    • instance-type - The instance type that is covered by the reservation.

    • scope - The scope of the Reserved Instance (Region or Availability Zone).

    • product-description - The Reserved Instance product platform description (Linux/UNIX | Linux with SQL Server Standard | Linux with SQL Server Web | Linux with SQL Server Enterprise | SUSE Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Windows | Windows with SQL Server Standard | Windows with SQL Server Web | Windows with SQL Server Enterprise).

    • reserved-instances-id - The ID of the Reserved Instance.

    • start - The time at which the Reserved Instance purchase request was placed (for example, 2014-08-07T11:54:42.000Z).

    • state - The state of the Reserved Instance (payment-pending | active | payment-failed | retired).

    • tag:<key> - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    • tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    • usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    ", "locationName":"Filter" }, "OfferingType":{ @@ -30103,6 +30419,11 @@ "documentation":"

    The ARN of the Outpost on which the snapshot is stored.

    This parameter is not supported when using CreateImage.

    ", "locationName":"outpostArn" }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

    The Availability Zone where the EBS volume will be created (for example, us-east-1a).

    Either AvailabilityZone or AvailabilityZoneId can be specified, but not both. If neither is specified, Amazon EC2 automatically selects an Availability Zone within the Region.

    This parameter is not supported when using CreateImage.

    ", + "locationName":"availabilityZone" + }, "Encrypted":{ "shape":"Boolean", "documentation":"

    Indicates whether the encryption state of an EBS volume is changed while being restored from a backing snapshot. The effect of setting the encryption state to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see Amazon EBS encryption in the Amazon EBS User Guide.

    In no case can you remove encryption from an encrypted volume.

    Encrypted volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported instance types.

    This parameter is not returned by DescribeImageAttribute.

    For CreateImage and RegisterImage, whether you can include this parameter, and the allowed values differ depending on the type of block device mapping you are creating.

    • If you are creating a block device mapping for a new (empty) volume, you can include this parameter, and specify either true for an encrypted volume, or false for an unencrypted volume. If you omit this parameter, it defaults to false (unencrypted).

    • If you are creating a block device mapping from an existing encrypted or unencrypted snapshot, you must omit this parameter. If you include this parameter, the request will fail, regardless of the value that you specify.

    • If you are creating a block device mapping from an existing unencrypted volume, you can include this parameter, but you must specify false. If you specify true, the request will fail. In this case, we recommend that you omit the parameter.

    • If you are creating a block device mapping from an existing encrypted volume, you can include this parameter, and specify either true or false. However, if you specify false, the parameter is ignored and the block device mapping is always encrypted. In this case, we recommend that you omit the parameter.

    ", @@ -30111,6 +30432,10 @@ "VolumeInitializationRate":{ "shape":"Integer", "documentation":"

    Specifies the Amazon EBS Provisioned Rate for Volume Initialization (volume initialization rate), in MiB/s, at which to download the snapshot blocks from Amazon S3 to the volume. This is also known as volume initialization. Specifying a volume initialization rate ensures that the volume is initialized at a predictable and consistent rate after creation.

    This parameter is supported only for volumes created from snapshots. Omit this parameter if:

    • You want to create the volume using fast snapshot restore. You must specify a snapshot that is enabled for fast snapshot restore. In this case, the volume is fully initialized at creation.

      If you specify a snapshot that is enabled for fast snapshot restore and a volume initialization rate, the volume will be initialized at the specified rate instead of fast snapshot restore.

    • You want to create a volume that is initialized at the default rate.

    For more information, see Initialize Amazon EBS volumes in the Amazon EC2 User Guide.

    This parameter is not supported when using CreateImage.

    Valid range: 100 - 300 MiB/s

    " + }, + "AvailabilityZoneId":{ + "shape":"String", + "documentation":"

    The ID of the Availability Zone where the EBS volume will be created (for example, use1-az1).

    Either AvailabilityZone or AvailabilityZoneId can be specified, but not both. If neither is specified, Amazon EC2 automatically selects an Availability Zone within the Region.

    This parameter is not supported when using CreateImage.

    " } }, "documentation":"

    Describes a block device for an EBS volume.

    " @@ -31633,6 +31958,11 @@ "documentation":"

    The Availability Zones.

    ", "locationName":"availabilityZoneSet" }, + "AvailabilityZoneIds":{ + "shape":"ValueStringList", + "documentation":"

    The IDs of the Availability Zones.

    ", + "locationName":"availabilityZoneIdSet" + }, "Cidrs":{ "shape":"ValueStringList", "documentation":"

    The CIDR ranges.

    ", @@ -33596,6 +33926,37 @@ "type":"string", "enum":["ipsec.1"] }, + "GetActiveVpnTunnelStatusRequest":{ + "type":"structure", + "required":[ + "VpnConnectionId", + "VpnTunnelOutsideIpAddress" + ], + "members":{ + "VpnConnectionId":{ + "shape":"VpnConnectionId", + "documentation":"

    The ID of the VPN connection for which to retrieve the active tunnel status.

    " + }, + "VpnTunnelOutsideIpAddress":{ + "shape":"String", + "documentation":"

    The external IP address of the VPN tunnel for which to retrieve the active status.

    " + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request.

    " + } + } + }, + "GetActiveVpnTunnelStatusResult":{ + "type":"structure", + "members":{ + "ActiveVpnTunnelStatus":{ + "shape":"ActiveVpnTunnelStatus", + "documentation":"

    Information about the current security configuration of the VPN tunnel.

    ", + "locationName":"activeVpnTunnelStatus" + } + } + }, "GetAllowedImagesSettingsRequest":{ "type":"structure", "members":{ @@ -34301,6 +34662,10 @@ "NextToken":{ "shape":"String", "documentation":"

    The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

    " + }, + "Context":{ + "shape":"String", + "documentation":"

    Reserved.

    " } } }, @@ -35714,6 +36079,10 @@ "shape":"String", "documentation":"

    The IKE version to be used in the sample configuration file for your customer gateway device. You can specify one of the following versions: ikev1 or ikev2.

    " }, + "SampleType":{ + "shape":"String", + "documentation":"

    The type of sample configuration to generate. Valid values are \"compatibility\" (includes IKEv1) or \"recommended\" (throws UnsupportedOperationException for IKEv1).

    " + }, "DryRun":{ "shape":"Boolean", "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    " @@ -36611,7 +36980,7 @@ }, "BootMode":{ "shape":"BootModeValues", - "documentation":"

    The boot mode of the image. For more information, see Boot modes in the Amazon EC2 User Guide.

    ", + "documentation":"

    The boot mode of the image. For more information, see Instance launch behavior with Amazon EC2 boot modes in the Amazon EC2 User Guide.

    ", "locationName":"bootMode" }, "TpmSupport":{ @@ -36651,12 +37020,12 @@ }, "SourceImageId":{ "shape":"String", - "documentation":"

    The ID of the source AMI from which the AMI was created.

    The ID only appears if the AMI was created using CreateImage, CopyImage, or CreateRestoreImageTask. The ID does not appear if the AMI was created using any other API. For some older AMIs, the ID might not be available. For more information, see Identify the source AMI used to create a new AMI in the Amazon EC2 User Guide.

    ", + "documentation":"

    The ID of the source AMI from which the AMI was created.

    The ID only appears if the AMI was created using CreateImage, CopyImage, or CreateRestoreImageTask. The ID does not appear if the AMI was created using any other API. For some older AMIs, the ID might not be available. For more information, see Identify the source AMI used to create a new Amazon EC2 AMI in the Amazon EC2 User Guide.

    ", "locationName":"sourceImageId" }, "SourceImageRegion":{ "shape":"String", - "documentation":"

    The Region of the source AMI.

    The Region only appears if the AMI was created using CreateImage, CopyImage, or CreateRestoreImageTask. The Region does not appear if the AMI was created using any other API. For some older AMIs, the Region might not be available. For more information, see Identify the source AMI used to create a new AMI in the Amazon EC2 User Guide.

    ", + "documentation":"

    The Region of the source AMI.

    The Region only appears if the AMI was created using CreateImage, CopyImage, or CreateRestoreImageTask. The Region does not appear if the AMI was created using any other API. For some older AMIs, the Region might not be available. For more information, see Identify the source AMI used to create a new Amazon EC2 AMI in the Amazon EC2 User Guide.

    ", "locationName":"sourceImageRegion" }, "ImageId":{ @@ -36757,7 +37126,7 @@ }, "UefiData":{ "shape":"AttributeValue", - "documentation":"

    Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data, use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the python-uefivars tool on GitHub. For more information, see UEFI Secure Boot in the Amazon EC2 User Guide.

    ", + "documentation":"

    Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data, use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the python-uefivars tool on GitHub. For more information, see UEFI Secure Boot for Amazon EC2 instances in the Amazon EC2 User Guide.

    ", "locationName":"uefiData" }, "LastLaunchedTime":{ @@ -38085,7 +38454,7 @@ }, "PublicDnsName":{ "shape":"String", - "documentation":"

    [IPv4 only] The public DNS name assigned to the instance. This name is not available until the instance enters the running state. This name is only available if you've enabled DNS hostnames for your VPC.

    ", + "documentation":"

    The public DNS name assigned to the instance. This name is not available until the instance enters the running state. This name is only available if you've enabled DNS hostnames for your VPC. The format of this name depends on the public hostname type.

    ", "locationName":"dnsName" }, "StateTransitionReason":{ @@ -38950,6 +39319,11 @@ "shape":"InstanceAutoRecoveryState", "documentation":"

    Provides information on the current automatic recovery behavior of your instance.

    ", "locationName":"autoRecovery" + }, + "RebootMigration":{ + "shape":"InstanceRebootMigrationState", + "documentation":"

    Specifies whether to attempt reboot migration during a user-initiated reboot of an instance that has a scheduled system-reboot event:

    • default - Amazon EC2 attempts to migrate the instance to new hardware (reboot migration). If successful, the system-reboot event is cleared. If unsuccessful, an in-place reboot occurs and the event remains scheduled.

    • disabled - Amazon EC2 keeps the instance on the same hardware (in-place reboot). The system-reboot event remains scheduled.

    This setting only applies to supported instances that have a scheduled reboot event. For more information, see Enable or disable reboot migration in the Amazon EC2 User Guide.

    ", + "locationName":"rebootMigration" } }, "documentation":"

    The maintenance options for the instance.

    " @@ -39214,7 +39588,7 @@ }, "InterfaceType":{ "shape":"String", - "documentation":"

    The type of network interface.

    Valid values: interface | efa | efa-only | trunk

    ", + "documentation":"

    The type of network interface.

    Valid values: interface | efa | efa-only | evs | trunk

    ", "locationName":"interfaceType" }, "Ipv4Prefixes":{ @@ -39499,6 +39873,13 @@ "locationName":"item" } }, + "InstanceRebootMigrationState":{ + "type":"string", + "enum":[ + "disabled", + "default" + ] + }, "InstanceRequirements":{ "type":"structure", "members":{ @@ -40980,7 +41361,62 @@ "p5en.48xlarge", "f2.12xlarge", "f2.48xlarge", - "trn2.48xlarge" + "trn2.48xlarge", + "c7i-flex.12xlarge", + "c7i-flex.16xlarge", + "m7i-flex.12xlarge", + "m7i-flex.16xlarge", + "i7ie.metal-24xl", + "i7ie.metal-48xl", + "i8g.48xlarge", + "c8gd.medium", + "c8gd.large", + "c8gd.xlarge", + "c8gd.2xlarge", + "c8gd.4xlarge", + "c8gd.8xlarge", + "c8gd.12xlarge", + "c8gd.16xlarge", + "c8gd.24xlarge", + "c8gd.48xlarge", + "c8gd.metal-24xl", + "c8gd.metal-48xl", + "i7i.large", + "i7i.xlarge", + "i7i.2xlarge", + "i7i.4xlarge", + "i7i.8xlarge", + "i7i.12xlarge", + "i7i.16xlarge", + "i7i.24xlarge", + "i7i.48xlarge", + "i7i.metal-24xl", + "i7i.metal-48xl", + "p6-b200.48xlarge", + "m8gd.medium", + "m8gd.large", + "m8gd.xlarge", + "m8gd.2xlarge", + "m8gd.4xlarge", + "m8gd.8xlarge", + "m8gd.12xlarge", + "m8gd.16xlarge", + "m8gd.24xlarge", + "m8gd.48xlarge", + "m8gd.metal-24xl", + "m8gd.metal-48xl", + "r8gd.medium", + "r8gd.large", + "r8gd.xlarge", + "r8gd.2xlarge", + "r8gd.4xlarge", + "r8gd.8xlarge", + "r8gd.12xlarge", + "r8gd.16xlarge", + "r8gd.24xlarge", + "r8gd.48xlarge", + "r8gd.metal-24xl", + "r8gd.metal-48xl" ] }, "InstanceTypeHypervisor":{ @@ -41142,6 +41578,11 @@ "shape":"PhcSupport", "documentation":"

    Indicates whether a local Precision Time Protocol (PTP) hardware clock (PHC) is supported.

    ", "locationName":"phcSupport" + }, + "RebootMigrationSupport":{ + "shape":"RebootMigrationSupport", + "documentation":"

    Indicates whether reboot migration during a user-initiated reboot is supported for instances that have a scheduled system-reboot event. For more information, see Enable or disable reboot migration in the Amazon EC2 User Guide.

    ", + "locationName":"rebootMigrationSupport" } }, "documentation":"

    Describes the instance type.

    " @@ -45729,6 +46170,78 @@ "locationName":"item" } }, + "MacModificationTask":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

    The ID of the Amazon EC2 Mac instance.

    ", + "locationName":"instanceId" + }, + "MacModificationTaskId":{ + "shape":"MacModificationTaskId", + "documentation":"

    The ID of task.

    ", + "locationName":"macModificationTaskId" + }, + "MacSystemIntegrityProtectionConfig":{ + "shape":"MacSystemIntegrityProtectionConfiguration", + "documentation":"

    [SIP modification tasks only] Information about the SIP configuration.

    ", + "locationName":"macSystemIntegrityProtectionConfig" + }, + "StartTime":{ + "shape":"MillisecondDateTime", + "documentation":"

    The date and time the task was created, in the UTC timezone (YYYY-MM-DDThh:mm:ss.sssZ).

    ", + "locationName":"startTime" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    The tags assigned to the task.

    ", + "locationName":"tagSet" + }, + "TaskState":{ + "shape":"MacModificationTaskState", + "documentation":"

    The state of the task.

    ", + "locationName":"taskState" + }, + "TaskType":{ + "shape":"MacModificationTaskType", + "documentation":"

    The type of task.

    ", + "locationName":"taskType" + } + }, + "documentation":"

    Information about a System Integrity Protection (SIP) modification task or volume ownership delegation task for an Amazon EC2 Mac instance.

    " + }, + "MacModificationTaskId":{"type":"string"}, + "MacModificationTaskIdList":{ + "type":"list", + "member":{ + "shape":"MacModificationTaskId", + "locationName":"item" + } + }, + "MacModificationTaskList":{ + "type":"list", + "member":{ + "shape":"MacModificationTask", + "locationName":"item" + } + }, + "MacModificationTaskState":{ + "type":"string", + "enum":[ + "successful", + "failed", + "in-progress", + "pending" + ] + }, + "MacModificationTaskType":{ + "type":"string", + "enum":[ + "sip-modification", + "volume-ownership-delegation" + ] + }, "MacOSVersionStringList":{ "type":"list", "member":{ @@ -45736,6 +46249,93 @@ "locationName":"item" } }, + "MacSystemIntegrityProtectionConfiguration":{ + "type":"structure", + "members":{ + "AppleInternal":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Indicates whether Apple Internal was enabled or disabled by the task.

    ", + "locationName":"appleInternal" + }, + "BaseSystem":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Indicates whether Base System was enabled or disabled by the task.

    ", + "locationName":"baseSystem" + }, + "DebuggingRestrictions":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Indicates whether Debugging Restrictions was enabled or disabled by the task.

    ", + "locationName":"debuggingRestrictions" + }, + "DTraceRestrictions":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Indicates whether Dtrace Restrictions was enabled or disabled by the task.

    ", + "locationName":"dTraceRestrictions" + }, + "FilesystemProtections":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Indicates whether Filesystem Protections was enabled or disabled by the task.

    ", + "locationName":"filesystemProtections" + }, + "KextSigning":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Indicates whether Kext Signing was enabled or disabled by the task.

    ", + "locationName":"kextSigning" + }, + "NvramProtections":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Indicates whether NVRAM Protections was enabled or disabled by the task.

    ", + "locationName":"nvramProtections" + }, + "Status":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Indicates SIP was enabled or disabled by the task.

    ", + "locationName":"status" + } + }, + "documentation":"

    Describes the configuration for a System Integrity Protection (SIP) modification task.

    " + }, + "MacSystemIntegrityProtectionConfigurationRequest":{ + "type":"structure", + "members":{ + "AppleInternal":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Enables or disables Apple Internal.

    " + }, + "BaseSystem":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Enables or disables Base System.

    " + }, + "DebuggingRestrictions":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Enables or disables Debugging Restrictions.

    " + }, + "DTraceRestrictions":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Enables or disables Dtrace Restrictions.

    " + }, + "FilesystemProtections":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Enables or disables Filesystem Protections.

    " + }, + "KextSigning":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Enables or disables Kext Signing.

    " + }, + "NvramProtections":{ + "shape":"MacSystemIntegrityProtectionSettingStatus", + "documentation":"

    Enables or disables Nvram Protections.

    " + } + }, + "documentation":"

    Describes a custom configuration for a System Integrity Protection (SIP) modification task.

    " + }, + "MacSystemIntegrityProtectionSettingStatus":{ + "type":"string", + "enum":[ + "enabled", + "disabled" + ] + }, "MaintenanceDetails":{ "type":"structure", "members":{ @@ -46864,6 +47464,10 @@ "shape":"InstanceAutoRecoveryState", "documentation":"

    Disables the automatic recovery behavior of your instance or sets it to default.

    " }, + "RebootMigration":{ + "shape":"InstanceRebootMigrationState", + "documentation":"

    Specifies whether to attempt reboot migration during a user-initiated reboot of an instance that has a scheduled system-reboot event:

    • default - Amazon EC2 attempts to migrate the instance to new hardware (reboot migration). If successful, the system-reboot event is cleared. If unsuccessful, an in-place reboot occurs and the event remains scheduled.

    • disabled - Amazon EC2 keeps the instance on the same hardware (in-place reboot). The system-reboot event remains scheduled.

    This setting only applies to supported instances that have a scheduled reboot event. For more information, see Enable or disable reboot migration in the Amazon EC2 User Guide.

    " + }, "DryRun":{ "shape":"Boolean", "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    " @@ -46882,6 +47486,11 @@ "shape":"InstanceAutoRecoveryState", "documentation":"

    Provides information on the current automatic recovery behavior of your instance.

    ", "locationName":"autoRecovery" + }, + "RebootMigration":{ + "shape":"InstanceRebootMigrationState", + "documentation":"

    Specifies whether to attempt reboot migration during a user-initiated reboot of an instance that has a scheduled system-reboot event:

    • default - Amazon EC2 attempts to migrate the instance to new hardware (reboot migration). If successful, the system-reboot event is cleared. If unsuccessful, an in-place reboot occurs and the event remains scheduled.

    • disabled - Amazon EC2 keeps the instance on the same hardware (in-place reboot). The system-reboot event remains scheduled.

    This setting only applies to supported instances that have a scheduled reboot event. For more information, see Enable or disable reboot migration in the Amazon EC2 User Guide.

    ", + "locationName":"rebootMigration" } } }, @@ -47434,6 +48043,11 @@ "shape":"Boolean", "documentation":"

    Indicates whether to assign a public IPv4 address to a network interface. This option can be enabled for any network interface but will only apply to the primary network interface (eth0).

    " }, + "AssociatedSubnetIds":{ + "shape":"SubnetIdList", + "documentation":"

    A list of subnet IDs to associate with the network interface.

    ", + "locationName":"AssociatedSubnetId" + }, "DryRun":{ "shape":"Boolean", "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", @@ -47503,6 +48117,37 @@ } } }, + "ModifyPublicIpDnsNameOptionsRequest":{ + "type":"structure", + "required":[ + "NetworkInterfaceId", + "HostnameType" + ], + "members":{ + "NetworkInterfaceId":{ + "shape":"NetworkInterfaceId", + "documentation":"

    A network interface ID.

    " + }, + "HostnameType":{ + "shape":"PublicIpDnsOption", + "documentation":"

    The public hostname type. For more information, see EC2 instance hostnames, DNS names, and domains in the Amazon EC2 User Guide.

    • public-dual-stack-dns-name: A dual-stack public hostname for a network interface. Requests from within the VPC resolve to both the private IPv4 address and the IPv6 Global Unicast Address of the network interface. Requests from the internet resolve to both the public IPv4 and the IPv6 GUA address of the network interface.

    • public-ipv4-dns-name: An IPv4-enabled public hostname for a network interface. Requests from within the VPC resolve to the private primary IPv4 address of the network interface. Requests from the internet resolve to the public IPv4 address of the network interface.

    • public-ipv6-dns-name: An IPv6-enabled public hostname for a network interface. Requests from within the VPC or from the internet resolve to the IPv6 GUA of the network interface.

    " + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

    Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    " + } + } + }, + "ModifyPublicIpDnsNameOptionsResult":{ + "type":"structure", + "members":{ + "Successful":{ + "shape":"Boolean", + "documentation":"

    Whether or not the request was successful.

    ", + "locationName":"successful" + } + } + }, "ModifyReservedInstancesRequest":{ "type":"structure", "required":[ @@ -49200,6 +49845,10 @@ "SkipTunnelReplacement":{ "shape":"Boolean", "documentation":"

    Choose whether or not to trigger immediate tunnel replacement. This is only applicable when turning on or off EnableTunnelLifecycleControl.

    Valid values: True | False

    " + }, + "PreSharedKeyStorage":{ + "shape":"String", + "documentation":"

    Specifies the storage mode for the pre-shared key (PSK). Valid values are Standard (stored in Site-to-Site VPN service) or SecretsManager (stored in Amazon Web Services Secrets Manager).

    " } } }, @@ -50419,9 +51068,19 @@ }, "PrivateDnsName":{ "shape":"String", - "documentation":"

    The private DNS name.

    ", + "documentation":"

    The private hostname. For more information, see EC2 instance hostnames, DNS names, and domains in the Amazon EC2 User Guide.

    ", "locationName":"privateDnsName" }, + "PublicDnsName":{ + "shape":"String", + "documentation":"

    A public hostname. For more information, see EC2 instance hostnames, DNS names, and domains in the Amazon EC2 User Guide.

    ", + "locationName":"publicDnsName" + }, + "PublicIpDnsNameOptions":{ + "shape":"PublicIpDnsNameOptions", + "documentation":"

    Public hostname type options. For more information, see EC2 instance hostnames, DNS names, and domains in the Amazon EC2 User Guide.

    ", + "locationName":"publicIpDnsNameOptions" + }, "PrivateIpAddress":{ "shape":"String", "documentation":"

    The IPv4 address of the network interface within the subnet.

    ", @@ -50496,6 +51155,11 @@ "shape":"OperatorResponse", "documentation":"

    The service provider that manages the network interface.

    ", "locationName":"operator" + }, + "AssociatedSubnets":{ + "shape":"AssociatedSubnetList", + "documentation":"

    The subnets associated with this network interface.

    ", + "locationName":"associatedSubnetSet" } }, "documentation":"

    Describes a network interface.

    " @@ -50694,6 +51358,11 @@ "documentation":"

    The IPv6 address.

    ", "locationName":"ipv6Address" }, + "PublicIpv6DnsName":{ + "shape":"String", + "documentation":"

    An IPv6-enabled public hostname for a network interface. Requests from within the VPC or from the internet resolve to the IPv6 GUA of the network interface. For more information, see EC2 instance hostnames, DNS names, and domains in the Amazon EC2 User Guide.

    ", + "locationName":"publicIpv6DnsName" + }, "IsPrimaryIpv6":{ "shape":"Boolean", "documentation":"

    Determines if an IPv6 address associated with a network interface is the primary IPv6 address. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information, see ModifyNetworkInterfaceAttribute.

    ", @@ -51014,6 +51683,7 @@ "locationName":"item" } }, + "OdbNetworkArn":{"type":"string"}, "OfferingClassType":{ "type":"string", "enum":[ @@ -52916,6 +53586,40 @@ "documentation":"

    The status of an updated pointer (PTR) record for an Elastic IP address.

    " }, "PublicIpAddress":{"type":"string"}, + "PublicIpDnsNameOptions":{ + "type":"structure", + "members":{ + "DnsHostnameType":{ + "shape":"String", + "documentation":"

    The public hostname type. For more information, see EC2 instance hostnames, DNS names, and domains in the Amazon EC2 User Guide.

    ", + "locationName":"dnsHostnameType" + }, + "PublicIpv4DnsName":{ + "shape":"String", + "documentation":"

    An IPv4-enabled public hostname for a network interface. Requests from within the VPC resolve to the private primary IPv4 address of the network interface. Requests from the internet resolve to the public IPv4 address of the network interface.

    ", + "locationName":"publicIpv4DnsName" + }, + "PublicIpv6DnsName":{ + "shape":"String", + "documentation":"

    An IPv6-enabled public hostname for a network interface. Requests from within the VPC or from the internet resolve to the IPv6 GUA of the network interface.

    ", + "locationName":"publicIpv6DnsName" + }, + "PublicDualStackDnsName":{ + "shape":"String", + "documentation":"

    A dual-stack public hostname for a network interface. Requests from within the VPC resolve to both the private IPv4 address and the IPv6 Global Unicast Address of the network interface. Requests from the internet resolve to both the public IPv4 and the IPv6 GUA address of the network interface.

    ", + "locationName":"publicDualStackDnsName" + } + }, + "documentation":"

    Public hostname type options. For more information, see EC2 instance hostnames, DNS names, and domains in the Amazon EC2 User Guide.

    " + }, + "PublicIpDnsOption":{ + "type":"string", + "enum":[ + "public-dual-stack-dns-name", + "public-ipv4-dns-name", + "public-ipv6-dns-name" + ] + }, "PublicIpStringList":{ "type":"list", "member":{ @@ -53339,6 +54043,13 @@ } } }, + "RebootMigrationSupport":{ + "type":"string", + "enum":[ + "unsupported", + "supported" + ] + }, "RecurringCharge":{ "type":"structure", "members":{ @@ -53472,16 +54183,16 @@ "members":{ "ImageLocation":{ "shape":"String", - "documentation":"

    The full path to your AMI manifest in Amazon S3 storage. The specified bucket must have the aws-exec-read canned access control list (ACL) to ensure that it can be accessed by Amazon EC2. For more information, see Canned ACLs in the Amazon S3 Service Developer Guide.

    " + "documentation":"

    The full path to your AMI manifest in Amazon S3 storage. The specified bucket must have the aws-exec-read canned access control list (ACL) to ensure that it can be accessed by Amazon EC2. For more information, see Canned ACL in the Amazon S3 Service Developer Guide.

    " }, "BillingProducts":{ "shape":"BillingProductList", - "documentation":"

    The billing product codes. Your account must be authorized to specify billing product codes.

    If your account is not authorized to specify billing product codes, you can publish AMIs that include billable software and list them on the Amazon Web Services Marketplace. You must first register as a seller on the Amazon Web Services Marketplace. For more information, see Getting started as a seller and AMI-based products in the Amazon Web Services Marketplace Seller Guide.

    ", + "documentation":"

    The billing product codes. Your account must be authorized to specify billing product codes.

    If your account is not authorized to specify billing product codes, you can publish AMIs that include billable software and list them on the Amazon Web Services Marketplace. You must first register as a seller on the Amazon Web Services Marketplace. For more information, see Getting started as an Amazon Web Services Marketplace seller and AMI-based products in Amazon Web Services Marketplace in the Amazon Web Services Marketplace Seller Guide.

    ", "locationName":"BillingProduct" }, "BootMode":{ "shape":"BootModeValues", - "documentation":"

    The boot mode of the AMI. A value of uefi-preferred indicates that the AMI supports both UEFI and Legacy BIOS.

    The operating system contained in the AMI must be configured to support the specified boot mode.

    For more information, see Boot modes in the Amazon EC2 User Guide.

    " + "documentation":"

    The boot mode of the AMI. A value of uefi-preferred indicates that the AMI supports both UEFI and Legacy BIOS.

    The operating system contained in the AMI must be configured to support the specified boot mode.

    For more information, see Instance launch behavior with Amazon EC2 boot modes in the Amazon EC2 User Guide.

    " }, "TpmSupport":{ "shape":"TpmSupportValues", @@ -53489,7 +54200,7 @@ }, "UefiData":{ "shape":"StringType", - "documentation":"

    Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data, use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the python-uefivars tool on GitHub. For more information, see UEFI Secure Boot in the Amazon EC2 User Guide.

    " + "documentation":"

    Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data, use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the python-uefivars tool on GitHub. For more information, see UEFI Secure Boot for Amazon EC2 instances in the Amazon EC2 User Guide.

    " }, "ImdsSupport":{ "shape":"ImdsSupportValues", @@ -53537,7 +54248,7 @@ }, "BlockDeviceMappings":{ "shape":"BlockDeviceMappingRequestList", - "documentation":"

    The block device mapping entries.

    If you specify an Amazon EBS volume using the ID of an Amazon EBS snapshot, you can't specify the encryption state of the volume.

    If you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost only. For more information, Amazon EBS local snapshots on Outposts in the Amazon EBS User Guide.

    ", + "documentation":"

    The block device mapping entries.

    If you specify an Amazon EBS volume using the ID of an Amazon EBS snapshot, you can't specify the encryption state of the volume.

    If you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost only. For more information, Create AMIs from local snapshots in the Amazon EBS User Guide.

    ", "locationName":"BlockDeviceMapping" }, "VirtualizationType":{ @@ -54228,6 +54939,10 @@ "shape":"CoreNetworkArn", "documentation":"

    The Amazon Resource Name (ARN) of the core network.

    " }, + "OdbNetworkArn":{ + "shape":"OdbNetworkArn", + "documentation":"

    The Amazon Resource Name (ARN) of the ODB network.

    " + }, "DryRun":{ "shape":"Boolean", "documentation":"

    Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

    ", @@ -55093,6 +55808,11 @@ "documentation":"

    Any tags assigned to the resource.

    ", "locationName":"tagSet" }, + "AvailabilityZoneId":{ + "shape":"String", + "documentation":"

    The ID of the Availability Zone.

    ", + "locationName":"availabilityZoneId" + }, "ReservedInstancesId":{ "shape":"String", "documentation":"

    The ID of the Reserved Instance.

    ", @@ -55178,6 +55898,11 @@ "shape":"scope", "documentation":"

    Whether the Reserved Instance is applied to instances in a Region or instances in a specific Availability Zone.

    ", "locationName":"scope" + }, + "AvailabilityZoneId":{ + "shape":"String", + "documentation":"

    The ID of the Availability Zone.

    ", + "locationName":"availabilityZoneId" } }, "documentation":"

    Describes the configuration settings for the modified Reserved Instances.

    " @@ -55410,6 +56135,11 @@ "documentation":"

    Whether the Reserved Instance is applied to instances in a Region or an Availability Zone.

    ", "locationName":"scope" }, + "AvailabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

    The ID of the Availability Zone.

    ", + "locationName":"availabilityZoneId" + }, "ReservedInstancesOfferingId":{ "shape":"String", "documentation":"

    The ID of the Reserved Instance offering. This is the offering ID used in GetReservedInstancesExchangeQuote to confirm that an exchange can be made.

    ", @@ -55789,7 +56519,8 @@ "ipam-resource-discovery-association", "instance-connect-endpoint", "verified-access-endpoint-target", - "ipam-external-resource-verification-token" + "ipam-external-resource-verification-token", + "mac-modification-task" ] }, "ResponseError":{ @@ -56572,6 +57303,11 @@ "shape":"CoreNetworkArn", "documentation":"

    The Amazon Resource Name (ARN) of the core network.

    ", "locationName":"coreNetworkArn" + }, + "OdbNetworkArn":{ + "shape":"OdbNetworkArn", + "documentation":"

    The Amazon Resource Name (ARN) of the ODB network.

    ", + "locationName":"odbNetworkArn" } }, "documentation":"

    Describes a route in a route table.

    " @@ -58692,6 +59428,10 @@ } } }, + "SensitiveMacCredentials":{ + "type":"string", + "sensitive":true + }, "SensitiveUrl":{ "type":"string", "sensitive":true @@ -59435,6 +60175,16 @@ "locationName":"item" } }, + "SnapshotReturnCodes":{ + "type":"string", + "enum":[ + "success", + "skipped", + "missing-permissions", + "internal-error", + "client-error" + ] + }, "SnapshotSet":{ "type":"list", "member":{ @@ -60898,6 +61648,11 @@ "documentation":"

    The state of VPC Block Public Access (BPA).

    ", "locationName":"blockPublicAccessStates" }, + "Type":{ + "shape":"String", + "documentation":"

    Indicates if this is a subnet used with Amazon Elastic VMware Service (EVS). Possible values are Elastic VMware Service or no value. For more information about Amazon EVS, see Amazon Elastic VMware Service API Reference .

    ", + "locationName":"type" + }, "SubnetId":{ "shape":"String", "documentation":"

    The ID of the subnet.

    ", @@ -60905,7 +61660,7 @@ }, "State":{ "shape":"SubnetState", - "documentation":"

    The current state of the subnet.

    ", + "documentation":"

    The current state of the subnet.

    • failed: The underlying infrastructure to support the subnet failed to provision as expected.

    • failed-insufficient-capacity: The underlying infrastructure to support the subnet failed to provision due to a shortage of EC2 instance capacity.

    ", "locationName":"state" }, "VpcId":{ @@ -61074,6 +61829,13 @@ } }, "SubnetId":{"type":"string"}, + "SubnetIdList":{ + "type":"list", + "member":{ + "shape":"SubnetId", + "locationName":"AssociatedSubnetId" + } + }, "SubnetIdStringList":{ "type":"list", "member":{ @@ -61154,7 +61916,9 @@ "enum":[ "pending", "available", - "unavailable" + "unavailable", + "failed", + "failed-insufficient-capacity" ] }, "Subscription":{ @@ -66188,6 +66952,11 @@ "shape":"VolumeStatusAttachmentStatusList", "documentation":"

    Information about the instances to which the volume is attached.

    ", "locationName":"attachmentStatuses" + }, + "AvailabilityZoneId":{ + "shape":"String", + "documentation":"

    The ID of the Availability Zone.

    ", + "locationName":"availabilityZoneId" } }, "documentation":"

    Describes the volume status.

    " @@ -67247,6 +68016,11 @@ "documentation":"

    Information about the VPN tunnel.

    ", "locationName":"vgwTelemetry" }, + "PreSharedKeyArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the Secrets Manager secret storing the pre-shared key(s) for the VPN connection.

    ", + "locationName":"preSharedKeyArn" + }, "VpnConnectionId":{ "shape":"String", "documentation":"

    The ID of the VPN connection.

    ", @@ -67662,6 +68436,14 @@ "type":"list", "member":{"shape":"VpnTunnelOptionsSpecification"} }, + "VpnTunnelProvisioningStatus":{ + "type":"string", + "enum":[ + "available", + "pending", + "failed" + ] + }, "WeekDay":{ "type":"string", "enum":[ diff --git a/services/ec2instanceconnect/pom.xml b/services/ec2instanceconnect/pom.xml index bb767fce9444..d04ee5b4df83 100644 --- a/services/ec2instanceconnect/pom.xml +++ b/services/ec2instanceconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ec2instanceconnect AWS Java SDK :: Services :: EC2 Instance Connect diff --git a/services/ecr/pom.xml b/services/ecr/pom.xml index 4c3ab2f8f260..a331e3e6b4fa 100644 --- a/services/ecr/pom.xml +++ b/services/ecr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ecr AWS Java SDK :: Services :: Amazon EC2 Container Registry diff --git a/services/ecr/src/main/resources/codegen-resources/customization.config b/services/ecr/src/main/resources/codegen-resources/customization.config index 09955317f32a..7bc9d5f27c4e 100644 --- a/services/ecr/src/main/resources/codegen-resources/customization.config +++ b/services/ecr/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "describeRepositories", "getAuthorizationToken" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ecr/src/main/resources/codegen-resources/service-2.json b/services/ecr/src/main/resources/codegen-resources/service-2.json index 2c5424fb7749..84695aafe0fd 100644 --- a/services/ecr/src/main/resources/codegen-resources/service-2.json +++ b/services/ecr/src/main/resources/codegen-resources/service-2.json @@ -304,7 +304,7 @@ {"shape":"RepositoryNotFoundException"}, {"shape":"ImageNotFoundException"} ], - "documentation":"

    Returns metadata about the images in a repository.

    Beginning with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size, so it may return a larger image size than the image sizes returned by DescribeImages.

    " + "documentation":"

    Returns metadata about the images in a repository.

    Starting with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size. Therefore, Docker might return a larger image than the image shown in the Amazon Web Services Management Console.

    The new version of Amazon ECR Basic Scanning doesn't use the ImageDetail$imageScanFindingsSummary and ImageDetail$imageScanStatus attributes from the API response to return scan results. Use the DescribeImageScanFindings API instead. For more information about Amazon Web Services native basic scanning, see Scan images for software vulnerabilities in Amazon ECR.

    " }, "DescribePullThroughCacheRules":{ "name":"DescribePullThroughCacheRules", @@ -918,6 +918,14 @@ "shape":"Date", "documentation":"

    The date and time the Amazon ECR container image was pushed.

    " }, + "lastInUseAt":{ + "shape":"Date", + "documentation":"

    The most recent date and time a cluster was running the image.

    " + }, + "inUseCount":{ + "shape":"InUseCount", + "documentation":"

    The number of Amazon ECS or Amazon EKS clusters currently running the image.

    " + }, "registry":{ "shape":"RegistryId", "documentation":"

    The registry the Amazon ECR container image belongs to.

    " @@ -1135,7 +1143,7 @@ }, "upstreamRegistryUrl":{ "shape":"Url", - "documentation":"

    The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry.

    • Amazon ECR (ecr) – dkr.ecr.<region>.amazonaws.com

    • Amazon ECR Public (ecr-public) – public.ecr.aws

    • Docker Hub (docker-hub) – registry-1.docker.io

    • GitHub Container Registry (github-container-registry) – ghcr.io

    • GitLab Container Registry (gitlab-container-registry) – registry.gitlab.com

    • Kubernetes (k8s) – registry.k8s.io

    • Microsoft Azure Container Registry (azure-container-registry) – <custom>.azurecr.io

    • Quay (quay) – quay.io

    " + "documentation":"

    The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry.

    • Amazon ECR (ecr) – <accountId>.dkr.ecr.<region>.amazonaws.com

    • Amazon ECR Public (ecr-public) – public.ecr.aws

    • Docker Hub (docker-hub) – registry-1.docker.io

    • GitHub Container Registry (github-container-registry) – ghcr.io

    • GitLab Container Registry (gitlab-container-registry) – registry.gitlab.com

    • Kubernetes (k8s) – registry.k8s.io

    • Microsoft Azure Container Registry (azure-container-registry) – <custom>.azurecr.io

    • Quay (quay) – quay.io

    " }, "registryId":{ "shape":"RegistryId", @@ -1459,8 +1467,7 @@ }, "DeleteRegistryPolicyRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteRegistryPolicyResponse":{ "type":"structure", @@ -1729,8 +1736,7 @@ }, "DescribeRegistryRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeRegistryResponse":{ "type":"structure", @@ -2013,7 +2019,7 @@ "members":{ "authorizationData":{ "shape":"AuthorizationDataList", - "documentation":"

    A list of authorization token data objects that correspond to the registryIds values in the request.

    " + "documentation":"

    A list of authorization token data objects that correspond to the registryIds values in the request.

    The size of the authorization token returned by Amazon ECR is not fixed. We recommend that you don't make assumptions about the maximum size.

    " } } }, @@ -2073,7 +2079,7 @@ }, "maxResults":{ "shape":"LifecyclePreviewMaxResults", - "documentation":"

    The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest in
 paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest only returns
 maxResults results in a single page along with a nextToken
 response element. The remaining results of the initial request can be seen by sending
 another GetLifecyclePolicyPreviewRequest request with the returned nextToken
 value. This value can be between 1 and 1000. If this
 parameter is not used, then GetLifecyclePolicyPreviewRequest returns up to
 100 results and a nextToken value, if
 applicable. This option cannot be used when you specify images with imageIds.

    " + "documentation":"

    The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest in
 paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest only returns
 maxResults results in a single page along with a nextToken
 response element. The remaining results of the initial request can be seen by sending
 another GetLifecyclePolicyPreviewRequest request with the returned nextToken
 value. This value can be between 1 and 100. If this
 parameter is not used, then GetLifecyclePolicyPreviewRequest returns up to
100 results and a nextToken value, if
 applicable. This option cannot be used when you specify images with imageIds.

    " }, "filter":{ "shape":"LifecyclePolicyPreviewFilter", @@ -2151,8 +2157,7 @@ }, "GetRegistryPolicyRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetRegistryPolicyResponse":{ "type":"structure", @@ -2169,8 +2174,7 @@ }, "GetRegistryScanningConfigurationRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetRegistryScanningConfigurationResponse":{ "type":"structure", @@ -2282,7 +2286,7 @@ }, "imageSizeInBytes":{ "shape":"ImageSizeInBytes", - "documentation":"

    The size, in bytes, of the image in the repository.

    If the image is a manifest list, this will be the max size of all manifests in the list.

    Starting with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size. Therefore, Docker might return a larger image than the image sizes returned by DescribeImages.

    " + "documentation":"

    The size, in bytes, of the image in the repository.

    If the image is a manifest list, this will be the max size of all manifests in the list.

    Starting with Docker version 1.9, the Docker client compresses image layers before pushing them to a V2 Docker registry. The output of the docker images command shows the uncompressed image size. Therefore, Docker might return a larger image than the image shown in the Amazon Web Services Management Console.

    " }, "imagePushedAt":{ "shape":"PushTimestamp", @@ -2552,6 +2556,10 @@ "type":"list", "member":{"shape":"ImageTag"} }, + "InUseCount":{ + "type":"long", + "min":0 + }, "InitiateLayerUploadRequest":{ "type":"structure", "required":["repositoryName"], @@ -3627,7 +3635,7 @@ }, "imageTagMutability":{ "shape":"ImageTagMutability", - "documentation":"

    The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.

    " + "documentation":"

    The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.

    " }, "repositoryPolicy":{ "shape":"RepositoryPolicyText", @@ -4129,8 +4137,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagStatus":{ "type":"string", @@ -4239,8 +4246,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdatePullThroughCacheRuleRequest":{ "type":"structure", diff --git a/services/ecrpublic/pom.xml b/services/ecrpublic/pom.xml index 094ca5951c74..fa6cbd19bb4a 100644 --- a/services/ecrpublic/pom.xml +++ b/services/ecrpublic/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ecrpublic AWS Java SDK :: Services :: ECR PUBLIC diff --git a/services/ecrpublic/src/main/resources/codegen-resources/customization.config b/services/ecrpublic/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/ecrpublic/src/main/resources/codegen-resources/customization.config +++ b/services/ecrpublic/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ecs/pom.xml b/services/ecs/pom.xml index 4349c254a8b6..9a3f936952f8 100644 --- a/services/ecs/pom.xml +++ b/services/ecs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ecs AWS Java SDK :: Services :: Amazon EC2 Container Service diff --git a/services/ecs/src/main/resources/codegen-resources/customization.config b/services/ecs/src/main/resources/codegen-resources/customization.config index d938cd7baafd..bd024bf3aca1 100644 --- a/services/ecs/src/main/resources/codegen-resources/customization.config +++ b/services/ecs/src/main/resources/codegen-resources/customization.config @@ -16,6 +16,5 @@ "submitContainerStateChange", "submitTaskStateChange" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ecs/src/main/resources/codegen-resources/service-2.json b/services/ecs/src/main/resources/codegen-resources/service-2.json index c30807c2a0ae..ae72bc17a9e6 100644 --- a/services/ecs/src/main/resources/codegen-resources/service-2.json +++ b/services/ecs/src/main/resources/codegen-resources/service-2.json @@ -67,7 +67,7 @@ {"shape":"AccessDeniedException"}, {"shape":"NamespaceNotFoundException"} ], - "documentation":"

    Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, use UpdateService.

    On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

    Amazon Elastic Inference (EI) is no longer available to customers.

    In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

    There are two service scheduler strategies available:

    • REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    • DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

    If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%.

    If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

    If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service.

    When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

    When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide

    " + "documentation":"

    Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, use UpdateService.

    On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

    Amazon Elastic Inference (EI) is no longer available to customers.

    In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more information, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

    There are two service scheduler strategies available:

    • REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    • DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

    If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%.

    If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

    If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service.

    When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

    When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide

    " }, "CreateTaskSet":{ "name":"CreateTaskSet", @@ -731,7 +731,7 @@ {"shape":"BlockedException"}, {"shape":"ConflictException"} ], - "documentation":"

    Starts a new task using the specified task definition.

    On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

    Amazon Elastic Inference (EI) is no longer available to customers.

    You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

    Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command.

    To manage eventual consistency, you can do the following:

    • Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time.

    • Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.

    If you get a ConflictException error, the RunTask request could not be processed due to conflicts. The provided clientToken is already in use with a different RunTask request. The resourceIds are the existing task ARNs which are already associated with the clientToken.

    To fix this issue:

    • Run RunTask with a unique clientToken.

    • Run RunTask with the clientToken and the original set of parameters

    If you get a ClientExceptionerror, the RunTask could not be processed because you use managed scaling and there is a capacity error because the quota of tasks in the PROVISIONING per cluster has been reached. For information about the service quotas, see Amazon ECS service quotas.

    " + "documentation":"

    Starts a new task using the specified task definition.

    On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

    Amazon Elastic Inference (EI) is no longer available to customers.

    You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

    Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more information, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command.

    To manage eventual consistency, you can do the following:

    • Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time.

    • Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.

    If you get a ConflictException error, the RunTask request could not be processed due to conflicts. The provided clientToken is already in use with a different RunTask request. The resourceIds are the existing task ARNs which are already associated with the clientToken.

    To fix this issue:

    • Run RunTask with a unique clientToken.

    • Run RunTask with the clientToken and the original set of parameters

    If you get a ClientExceptionerror, the RunTask could not be processed because you use managed scaling and there is a capacity error because the quota of tasks in the PROVISIONING per cluster has been reached. For information about the service quotas, see Amazon ECS service quotas.

    " }, "StartTask":{ "name":"StartTask", @@ -748,7 +748,7 @@ {"shape":"ClusterNotFoundException"}, {"shape":"UnsupportedFeatureException"} ], - "documentation":"

    Starts a new task from the specified task definition on the specified container instance or instances.

    On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

    Amazon Elastic Inference (EI) is no longer available to customers.

    Alternatively, you can useRunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    " + "documentation":"

    Starts a new task from the specified task definition on the specified container instance or instances.

    On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

    Amazon Elastic Inference (EI) is no longer available to customers.

    Alternatively, you can useRunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more information, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    " }, "StopServiceDeployment":{ "name":"StopServiceDeployment", @@ -970,7 +970,7 @@ {"shape":"NamespaceNotFoundException"}, {"shape":"UnsupportedFeatureException"} ], - "documentation":"

    Modifies the parameters of a service.

    On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

    For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.

    For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.

    You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

    If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

    You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

    • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

    • The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

    When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

    When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.

    • Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.

    • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.

      • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

      • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

    When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

    • Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.

    • Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.

    You must have a service-linked role when you update any of the following service properties:

    • loadBalancers,

    • serviceRegistries

    For more information about the role see the CreateService request parameter role .

    " + "documentation":"

    Modifies the parameters of a service.

    On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

    For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more information, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more information on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.

    For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.

    You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

    You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more information, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

    If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

    If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

    You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

    • If minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount temporarily during a deployment. For example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

    • The maximumPercent parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).

    When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

    When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.

    • Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.

    • By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.

      • Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.

      • Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.

    When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

    • Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.

    • Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.

    " }, "UpdateServicePrimaryTaskSet":{ "name":"UpdateServicePrimaryTaskSet", @@ -1362,7 +1362,7 @@ }, "configuration":{ "shape":"ClusterConfiguration", - "documentation":"

    The execute command configuration for the cluster.

    " + "documentation":"

    The execute command and managed storage configuration for the cluster.

    " }, "status":{ "shape":"String", @@ -1585,7 +1585,7 @@ }, "reason":{ "shape":"String", - "documentation":"

    A short (255 max characters) human-readable string to provide additional details about a running or stopped container.

    " + "documentation":"

    A short (1024 max characters) human-readable string to provide additional details about a running or stopped container.

    " }, "networkBindings":{ "shape":"NetworkBindings", @@ -1640,7 +1640,7 @@ }, "image":{ "shape":"String", - "documentation":"

    The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the docker container create command and the IMAGE parameter of docker run.

    • When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.

    • Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag or registry/repository@digest. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.

    • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

    • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

    • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

    " + "documentation":"

    The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest . For images using tags (repository-url/image:tag), up to 255 characters total are allowed, including letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs (#). For images using digests (repository-url/image@digest), the 255 character limit applies only to the repository URL and image name (everything before the @ sign). The only supported hash function is sha256, and the hash value after sha256: must be exactly 64 characters (only letters A-F, a-f, and numbers 0-9 are allowed). This parameter maps to Image in the docker container create command and the IMAGE parameter of docker run.

    • When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.

    • Images in Amazon ECR repositories can be specified by either using the full registry/repository:tag or registry/repository@digest. For example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>:latest or 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.

    • Images in official repositories on Docker Hub use a single name (for example, ubuntu or mongo).

    • Images in other repositories on Docker Hub are qualified with an organization name (for example, amazon/amazon-ecs-agent).

    • Images in other online repositories are qualified further by a domain name (for example, quay.io/assemblyline/ubuntu).

    " }, "repositoryCredentials":{ "shape":"RepositoryCredentials", @@ -3520,7 +3520,7 @@ "documentation":"

    The optional grace period to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You can specify between 0 and 300 seconds. By default, the startPeriod is off. This value applies only when you specify a command.

    If a health check succeeds within the startPeriod, then the container is considered healthy and any subsequent failures count toward the maximum number of retries.

    " } }, - "documentation":"

    An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK parameter of docker run.

    The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.

    You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.

    The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.

    Amazon ECS performs health checks on containers with the default that launched the container instance or the task.

    The following describes the possible healthStatus values for a container:

    • HEALTHY-The container health check has passed successfully.

    • UNHEALTHY-The container health check has failed.

    • UNKNOWN-The container health check is being evaluated, there's no container health check defined, or Amazon ECS doesn't have the health status of the container.

    The following describes the possible healthStatus values based on the container health checker status of essential containers in the task with the following priority order (high to low):

    • UNHEALTHY-One or more essential containers have failed their health check.

    • UNKNOWN-Any essential container running within the task is in an UNKNOWN state and no other essential containers have an UNHEALTHY state.

    • HEALTHY-All essential containers within the task have passed their health checks.

    Consider the following task health example with 2 containers.

    • If Container1 is UNHEALTHY and Container2 is UNKNOWN, the task health is UNHEALTHY.

    • If Container1 is UNHEALTHY and Container2 is HEALTHY, the task health is UNHEALTHY.

    • If Container1 is HEALTHY and Container2 is UNKNOWN, the task health is UNKNOWN.

    • If Container1 is HEALTHY and Container2 is HEALTHY, the task health is HEALTHY.

    Consider the following task health example with 3 containers.

    • If Container1 is UNHEALTHY and Container2 is UNKNOWN, and Container3 is UNKNOWN, the task health is UNHEALTHY.

    • If Container1 is UNHEALTHY and Container2 is UNKNOWN, and Container3 is HEALTHY, the task health is UNHEALTHY.

    • If Container1 is UNHEALTHY and Container2 is HEALTHY, and Container3 is HEALTHY, the task health is UNHEALTHY.

    • If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3 is HEALTHY, the task health is UNKNOWN.

    • If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3 is UNKNOWN, the task health is UNKNOWN.

    • If Container1 is HEALTHY and Container2 is HEALTHY, and Container3 is HEALTHY, the task health is HEALTHY.

    If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.

    The following are notes about container health check support:

    • If the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this won't cause a container to transition to an UNHEALTHY status. This is by design, to ensure that containers remain running during agent restarts or temporary unavailability. The health check status is the \"last heard from\" response from the Amazon ECS agent, so if the container was considered HEALTHY prior to the disconnect, that status will remain until the agent reconnects and another health check occurs. There are no assumptions made about the status of the container health checks.

    • Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.

    • Container health checks are supported for Fargate tasks if you're using platform version 1.1.0 or greater. For more information, see Fargate platform versions.

    • Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.

    For an example of how to specify a task definition with multiple containers where container dependency is specified, see Container dependency in the Amazon Elastic Container Service Developer Guide.

    " + "documentation":"

    An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK parameter of docker run.

    The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.

    You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.

    The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.

    Amazon ECS performs health checks on containers with the default that launched the container instance or the task.

    The following describes the possible healthStatus values for a container:

    • HEALTHY-The container health check has passed successfully.

    • UNHEALTHY-The container health check has failed.

    • UNKNOWN-The container health check is being evaluated, there's no container health check defined, or Amazon ECS doesn't have the health status of the container.

    The following describes the possible healthStatus values based on the container health checker status of essential containers in the task with the following priority order (high to low):

    • UNHEALTHY-One or more essential containers have failed their health check.

    • UNKNOWN-Any essential container running within the task is in an UNKNOWN state and no other essential containers have an UNHEALTHY state.

    • HEALTHY-All essential containers within the task have passed their health checks.

    Consider the following task health example with 2 containers.

    • If Container1 is UNHEALTHY and Container2 is UNKNOWN, the task health is UNHEALTHY.

    • If Container1 is UNHEALTHY and Container2 is HEALTHY, the task health is UNHEALTHY.

    • If Container1 is HEALTHY and Container2 is UNKNOWN, the task health is UNKNOWN.

    • If Container1 is HEALTHY and Container2 is HEALTHY, the task health is HEALTHY.

    Consider the following task health example with 3 containers.

    • If Container1 is UNHEALTHY and Container2 is UNKNOWN, and Container3 is UNKNOWN, the task health is UNHEALTHY.

    • If Container1 is UNHEALTHY and Container2 is UNKNOWN, and Container3 is HEALTHY, the task health is UNHEALTHY.

    • If Container1 is UNHEALTHY and Container2 is HEALTHY, and Container3 is HEALTHY, the task health is UNHEALTHY.

    • If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3 is HEALTHY, the task health is UNKNOWN.

    • If Container1 is HEALTHY and Container2 is UNKNOWN, and Container3 is UNKNOWN, the task health is UNKNOWN.

    • If Container1 is HEALTHY and Container2 is HEALTHY, and Container3 is HEALTHY, the task health is HEALTHY.

    If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.

    When a container health check fails for a task that is part of a service, the following process occurs:

    1. The task is marked as UNHEALTHY.

    2. The unhealthy task will be stopped, and during the stopping process, it will go through the following states:

      • DEACTIVATING - In this state, Amazon ECS performs additional steps before stopping the task. For example, for tasks that are part of services configured to use Elastic Load Balancing target groups, target groups will be deregistered in this state.

      • STOPPING - The task is in the process of being stopped.

      • DEPROVISIONING - Resources associated with the task are being cleaned up.

      • STOPPED - The task has been completely stopped.

    3. After the old task stops, a new task will be launched to ensure service operation, and the new task will go through the following lifecycle:

      • PROVISIONING - Resources required for the task are being provisioned.

      • PENDING - The task is waiting to be placed on a container instance.

      • ACTIVATING - In this state, Amazon ECS pulls container images, creates containers, configures task networking, registers load balancer target groups, and configures service discovery status.

      • RUNNING - The task is running and performing its work.

    For more detailed information about task lifecycle states, see Task lifecycle in the Amazon Elastic Container Service Developer Guide.

    The following are notes about container health check support:

    • If the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this won't cause a container to transition to an UNHEALTHY status. This is by design, to ensure that containers remain running during agent restarts or temporary unavailability. The health check status is the \"last heard from\" response from the Amazon ECS agent, so if the container was considered HEALTHY prior to the disconnect, that status will remain until the agent reconnects and another health check occurs. There are no assumptions made about the status of the container health checks.

    • Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.

    • Container health checks are supported for Fargate tasks if you're using platform version 1.1.0 or greater. For more information, see Fargate platform versions.

    • Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.

    For an example of how to specify a task definition with multiple containers where container dependency is specified, see Container dependency in the Amazon Elastic Container Service Developer Guide.

    " }, "HealthStatus":{ "type":"string", @@ -4179,7 +4179,7 @@ }, "options":{ "shape":"LogConfigurationOptionsMap", - "documentation":"

    The configuration options to send to the log driver.

    The options you can specify depend on the log driver. Some of the options you can specify when you use the awslogs log driver to route logs to Amazon CloudWatch include the following:

    awslogs-create-group

    Required: No

    Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to false.

    Your IAM policy must include the logs:CreateLogGroup permission before you attempt to use awslogs-create-group.

    awslogs-region

    Required: Yes

    Specify the Amazon Web Services Region that the awslogs log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.

    awslogs-group

    Required: Yes

    Make sure to specify a log group that the awslogs log driver sends its log streams to.

    awslogs-stream-prefix

    Required: Yes, when using Fargate.Optional when using EC2.

    Use the awslogs-stream-prefix option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format prefix-name/container-name/ecs-task-id.

    If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.

    For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.

    You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.

    awslogs-datetime-format

    Required: No

    This option defines a multiline start pattern in Python strftime format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages.

    One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.

    For more information, see awslogs-datetime-format.

    You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options.

    Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.

    awslogs-multiline-pattern

    Required: No

    This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages.

    For more information, see awslogs-multiline-pattern.

    This option is ignored if awslogs-datetime-format is also configured.

    You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options.

    Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.

    The following options apply to all supported log drivers.

    mode

    Required: No

    Valid values: non-blocking | blocking

    This option defines the delivery mode of log messages from the container to the log driver specified using logDriver. The delivery mode you choose affects application availability when the flow of logs from container is interrupted.

    If you use the blocking mode and the flow of logs is interrupted, calls from container code to write to the stdout and stderr streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.

    If you use the non-blocking mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the max-buffer-size option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver.

    You can set a default mode for all containers in a specific Amazon Web Services Region by using the defaultLogDriverMode account setting. If you don't specify the mode option or configure the account setting, Amazon ECS will default to the blocking mode. For more information about the account setting, see Default log driver mode in the Amazon Elastic Container Service Developer Guide.

    max-buffer-size

    Required: No

    Default value: 1m

    When non-blocking mode is used, the max-buffer-size log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.

    To route logs using the splunk log router, you need to specify a splunk-token and a splunk-url.

    When you use the awsfirelens log router to route logs to an Amazon Web Services Service or Amazon Web Services Partner Network destination for log storage and analytics, you can set the log-driver-buffer-limit option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.

    Other options you can specify when using awsfirelens to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the Amazon Web Services Region with region and a name for the log stream with delivery_stream.

    When you export logs to Amazon Kinesis Data Streams, you can specify an Amazon Web Services Region with region and a data stream name with stream.

    When you export logs to Amazon OpenSearch Service, you can specify options like Name, Host (OpenSearch Service endpoint without protocol), Port, Index, Type, Aws_auth, Aws_region, Suppress_Type_Name, and tls. For more information, see Under the hood: FireLens for Amazon ECS Tasks.

    When you export logs to Amazon S3, you can specify the bucket using the bucket option. You can also specify region, total_file_size, upload_timeout, and use_put_object as options.

    This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

    " + "documentation":"

    The configuration options to send to the log driver.

    The options you can specify depend on the log driver. Some of the options you can specify when you use the awslogs log driver to route logs to Amazon CloudWatch include the following:

    awslogs-create-group

    Required: No

    Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to false.

    Your IAM policy must include the logs:CreateLogGroup permission before you attempt to use awslogs-create-group.

    awslogs-region

    Required: Yes

    Specify the Amazon Web Services Region that the awslogs log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.

    awslogs-group

    Required: Yes

    Make sure to specify a log group that the awslogs log driver sends its log streams to.

    awslogs-stream-prefix

    Required: Yes, when using Fargate.Optional when using EC2.

    Use the awslogs-stream-prefix option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format prefix-name/container-name/ecs-task-id.

    If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.

    For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.

    You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.

    awslogs-datetime-format

    Required: No

    This option defines a multiline start pattern in Python strftime format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages.

    One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.

    For more information, see awslogs-datetime-format.

    You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options.

    Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.

    awslogs-multiline-pattern

    Required: No

    This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages.

    For more information, see awslogs-multiline-pattern.

    This option is ignored if awslogs-datetime-format is also configured.

    You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options.

    Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.

    The following options apply to all supported log drivers.

    mode

    Required: No

    Valid values: non-blocking | blocking

    This option defines the delivery mode of log messages from the container to the log driver specified using logDriver. The delivery mode you choose affects application availability when the flow of logs from container is interrupted.

    If you use the blocking mode and the flow of logs is interrupted, calls from container code to write to the stdout and stderr streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.

    If you use the non-blocking mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the max-buffer-size option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver.

    You can set a default mode for all containers in a specific Amazon Web Services Region by using the defaultLogDriverMode account setting. If you don't specify the mode option or configure the account setting, Amazon ECS will default to the non-blocking mode. For more information about the account setting, see Default log driver mode in the Amazon Elastic Container Service Developer Guide.

    On June 25, 2025, Amazon ECS changed the default log driver mode from blocking to non-blocking to prioritize task availability over logging. To continue using the blocking mode after this change, do one of the following:

    • Set the mode option in your container definition's logConfiguration as blocking.

    • Set the defaultLogDriverMode account setting to blocking.

    max-buffer-size

    Required: No

    Default value: 1m

    When non-blocking mode is used, the max-buffer-size log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.

    To route logs using the splunk log router, you need to specify a splunk-token and a splunk-url.

    When you use the awsfirelens log router to route logs to an Amazon Web Services Service or Amazon Web Services Partner Network destination for log storage and analytics, you can set the log-driver-buffer-limit option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.

    Other options you can specify when using awsfirelens to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the Amazon Web Services Region with region and a name for the log stream with delivery_stream.

    When you export logs to Amazon Kinesis Data Streams, you can specify an Amazon Web Services Region with region and a data stream name with stream.

    When you export logs to Amazon OpenSearch Service, you can specify options like Name, Host (OpenSearch Service endpoint without protocol), Port, Index, Type, Aws_auth, Aws_region, Suppress_Type_Name, and tls. For more information, see Under the hood: FireLens for Amazon ECS Tasks.

    When you export logs to Amazon S3, you can specify the bucket using the bucket option. You can also specify region, total_file_size, upload_timeout, and use_put_object as options.

    This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

    " }, "secretOptions":{ "shape":"SecretList", @@ -4328,11 +4328,11 @@ "members":{ "kmsKeyId":{ "shape":"String", - "documentation":"

    Specify a Key Management Service key ID to encrypt the managed storage.

    The key must be a single Region key.

    " + "documentation":"

    Specify a Key Management Service key ID to encrypt Amazon ECS managed storage.

    When you specify a kmsKeyId, Amazon ECS uses the key to encrypt data volumes managed by Amazon ECS that are attached to tasks in the cluster. The following data volumes are managed by Amazon ECS: Amazon EBS. For more information about encryption of Amazon EBS volumes attached to Amazon ECS tasks, see Encrypt data stored in Amazon EBS volumes for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

    The key must be a single Region key.

    " }, "fargateEphemeralStorageKmsKeyId":{ "shape":"String", - "documentation":"

    Specify the Key Management Service key ID for the Fargate ephemeral storage.

    The key must be a single Region key.

    " + "documentation":"

    Specify the Key Management Service key ID for Fargate ephemeral storage.

    When you specify a fargateEphemeralStorageKmsKeyId, Amazon Web Services Fargate uses the key to encrypt data at rest in ephemeral storage. For more information about Fargate ephemeral storage encryption, see Customer managed keys for Amazon Web Services Fargate ephemeral storage for Amazon ECS in the Amazon Elastic Container Service Developer Guide.

    The key must be a single Region key.

    " } }, "documentation":"

    The managed storage configuration for the cluster.

    " @@ -4468,6 +4468,8 @@ "WINDOWS_SERVER_2004_CORE", "WINDOWS_SERVER_2022_CORE", "WINDOWS_SERVER_2022_FULL", + "WINDOWS_SERVER_2025_CORE", + "WINDOWS_SERVER_2025_FULL", "WINDOWS_SERVER_20H2_CORE", "LINUX" ] @@ -4674,7 +4676,7 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

    The resource name for which to modify the account setting.

    The following are the valid values for the account setting name.

    • serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • awsvpcTrunking - When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    • containerInsights - Container Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.

      To use Container Insights with enhanced observability, set the containerInsights account setting to enhanced.

      To use Container Insights, set the containerInsights account setting to enabled.

      For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.

    • dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.

    • fargateFIPSMode - If you specify fargateFIPSMode, Fargate FIPS 140 compliance is affected.

    • fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    • tagResourceAuthorization - Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    • defaultLogDriverMode -Amazon ECS supports setting a default delivery mode of log messages from a container to the logDriver that you specify in the container's logConfiguration. The delivery mode affects application stability when the flow of logs from the container to the log driver is interrupted. The defaultLogDriverMode setting supports two values: blocking and non-blocking. If you don't specify a delivery mode in your container definition's logConfiguration, the mode you specify using this account setting will be used as the default. For more information about log delivery modes, see LogConfiguration.

    • guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " + "documentation":"

    The resource name for which to modify the account setting.

    The following are the valid values for the account setting name.

    • serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • awsvpcTrunking - When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    • containerInsights - Container Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.

      To use Container Insights with enhanced observability, set the containerInsights account setting to enhanced.

      To use Container Insights, set the containerInsights account setting to enabled.

      For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.

    • dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.

    • fargateFIPSMode - If you specify fargateFIPSMode, Fargate FIPS 140 compliance is affected.

    • fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    • tagResourceAuthorization - Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    • defaultLogDriverMode -Amazon ECS supports setting a default delivery mode of log messages from a container to the logDriver that you specify in the container's logConfiguration. The delivery mode affects application stability when the flow of logs from the container to the log driver is interrupted. The defaultLogDriverMode setting supports two values: blocking and non-blocking. If you don't specify a delivery mode in your container definition's logConfiguration, the mode you specify using this account setting will be used as the default. For more information about log delivery modes, see LogConfiguration.

      On June 25, 2025, Amazon ECS changed the default log driver mode from blocking to non-blocking to prioritize task availability over logging. To continue using the blocking mode after this change, do one of the following:

      • Set the mode option in your container definition's logConfiguration as blocking.

      • Set the defaultLogDriverMode account setting to blocking.

    • guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " }, "value":{ "shape":"String", @@ -4700,7 +4702,7 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

    The Amazon ECS account setting name to modify.

    The following are the valid values for the account setting name.

    • serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • awsvpcTrunking - When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    • containerInsights - Container Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.

      To use Container Insights with enhanced observability, set the containerInsights account setting to enhanced.

      To use Container Insights, set the containerInsights account setting to enabled.

      For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.

    • dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.

    • fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    • tagResourceAuthorization - Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    • defaultLogDriverMode - Amazon ECS supports setting a default delivery mode of log messages from a container to the logDriver that you specify in the container's logConfiguration. The delivery mode affects application stability when the flow of logs from the container to the log driver is interrupted. The defaultLogDriverMode setting supports two values: blocking and non-blocking. If you don't specify a delivery mode in your container definition's logConfiguration, the mode you specify using this account setting will be used as the default. For more information about log delivery modes, see LogConfiguration.

    • guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " + "documentation":"

    The Amazon ECS account setting name to modify.

    The following are the valid values for the account setting name.

    • serviceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • taskLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • containerInstanceLongArnFormat - When modified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

    • awsvpcTrunking - When modified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    • containerInsights - Container Insights with enhanced observability provides all the Container Insights metrics, plus additional task and container metrics. This version supports enhanced observability for Amazon ECS clusters using the Amazon EC2 and Fargate launch types. After you configure Container Insights with enhanced observability on Amazon ECS, Container Insights auto-collects detailed infrastructure telemetry from the cluster level down to the container level in your environment and displays these critical performance data in curated dashboards removing the heavy lifting in observability set-up.

      To use Container Insights with enhanced observability, set the containerInsights account setting to enhanced.

      To use Container Insights, set the containerInsights account setting to enabled.

      For more information, see Monitor Amazon ECS containers using Container Insights with enhanced observability in the Amazon Elastic Container Service Developer Guide.

    • dualStackIPv6 - When turned on, when using a VPC in dual stack mode, your tasks using the awsvpc network mode can have an IPv6 address assigned. For more information on using IPv6 with tasks launched on Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 with tasks launched on Fargate, see Using a VPC in dual-stack mode.

    • fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

    • tagResourceAuthorization - Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

    • defaultLogDriverMode - Amazon ECS supports setting a default delivery mode of log messages from a container to the logDriver that you specify in the container's logConfiguration. The delivery mode affects application stability when the flow of logs from the container to the log driver is interrupted. The defaultLogDriverMode setting supports two values: blocking and non-blocking. If you don't specify a delivery mode in your container definition's logConfiguration, the mode you specify using this account setting will be used as the default. For more information about log delivery modes, see LogConfiguration.

      On June 25, 2025, Amazon ECS changed the default log driver mode from blocking to non-blocking to prioritize task availability over logging. To continue using the blocking mode after this change, do one of the following:

      • Set the mode option in your container definition's logConfiguration as blocking.

      • Set the defaultLogDriverMode account setting to blocking.

    • guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    " }, "value":{ "shape":"String", @@ -5688,11 +5690,11 @@ "members":{ "encrypted":{ "shape":"BoxedBoolean", - "documentation":"

    Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on by default. This parameter maps 1:1 with the Encrypted parameter of the CreateVolume API in the Amazon EC2 API Reference.

    " + "documentation":"

    Indicates whether the volume should be encrypted. If you turn on Region-level Amazon EBS encryption by default but set this value as false, the setting is overridden and the volume is encrypted with the KMS key specified for Amazon EBS encryption by default. This parameter maps 1:1 with the Encrypted parameter of the CreateVolume API in the Amazon EC2 API Reference.

    " }, "kmsKeyId":{ "shape":"EBSKMSKeyId", - "documentation":"

    The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the KmsKeyId parameter of the CreateVolume API in the Amazon EC2 API Reference.

    Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.

    " + "documentation":"

    The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When a key is specified using this parameter, it overrides Amazon EBS default encryption or any KMS key that you specified for cluster-level managed storage encryption. This parameter maps 1:1 with the KmsKeyId parameter of the CreateVolume API in the Amazon EC2 API Reference. For more information about encrypting Amazon EBS volumes attached to tasks, see Encrypt data stored in Amazon EBS volumes attached to Amazon ECS tasks.

    Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.

    " }, "volumeType":{ "shape":"EBSVolumeType", @@ -5704,7 +5706,11 @@ }, "snapshotId":{ "shape":"EBSSnapshotId", - "documentation":"

    The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume size. This parameter maps 1:1 with the SnapshotId parameter of the CreateVolume API in the Amazon EC2 API Reference.

    " + "documentation":"

    The snapshot that Amazon ECS uses to create volumes for attachment to tasks maintained by the service. You must specify either snapshotId or sizeInGiB in your volume configuration. This parameter maps 1:1 with the SnapshotId parameter of the CreateVolume API in the Amazon EC2 API Reference.

    " + }, + "volumeInitializationRate":{ + "shape":"BoxedInteger", + "documentation":"

    The rate, in MiB/s, at which data is fetched from a snapshot of an existing EBS volume to create new volumes for attachment to the tasks maintained by the service. This property can be specified only if you specify a snapshotId. For more information, see Initialize Amazon EBS volumes in the Amazon EBS User Guide.

    " }, "iops":{ "shape":"BoxedInteger", @@ -5724,7 +5730,7 @@ }, "filesystemType":{ "shape":"TaskFilesystemType", - "documentation":"

    The filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.

    The available Linux filesystem types are
 ext3, ext4, and xfs. If no value is specified, the xfs filesystem type is used by default.

    The available Windows filesystem types are NTFS.

    " + "documentation":"

    The filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the tasks will fail to start.

    The available Linux filesystem types are
 ext3, ext4, and xfs. If no value is specified, the xfs filesystem type is used by default.

    The available Windows filesystem types are NTFS.

    " } }, "documentation":"

    The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task in the service. For information about the supported launch types and operating systems, see Supported operating systems and launch types in the Amazon Elastic Container Service Developer Guide.

    Many of these parameters map 1:1 with the Amazon EBS CreateVolume API request parameters.

    " @@ -6246,7 +6252,7 @@ }, "value":{ "shape":"String", - "documentation":"

    The namespaced kernel parameter to set a value for.

    Valid IPC namespace values: \"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\", and Sysctls that start with \"fs.mqueue.*\"

    Valid network namespace values: Sysctls that start with \"net.*\"

    All of these values are supported by Fargate.

    " + "documentation":"

    The namespaced kernel parameter to set a value for.

    Valid IPC namespace values: \"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\", and Sysctls that start with \"fs.mqueue.*\"

    Valid network namespace values: Sysctls that start with \"net.*\". Only namespaced Sysctls that exist within the container starting with \"net.* are accepted.

    All of these values are supported by Fargate.

    " } }, "documentation":"

    A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the docker container create command and the --sysctl option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain longer lived connections.

    We don't recommend that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network mode. Doing this has the following disadvantages:

    • For tasks that use the awsvpc network mode including Fargate, if you set systemControls for any container, it applies to all containers in the task. If you set different systemControls for multiple containers in a single task, the container that's started last determines which systemControls take effect.

    • For tasks that use the host network mode, the network namespace systemControls aren't supported.

    If you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see IPC mode.

    • For tasks that use the host IPC mode, IPC namespace systemControls aren't supported.

    • For tasks that use the task IPC mode, IPC namespace systemControls values apply to all containers within a task.

    This parameter is not supported for Windows containers.

    This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.

    " @@ -6675,11 +6681,11 @@ "members":{ "encrypted":{ "shape":"BoxedBoolean", - "documentation":"

    Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on by default. This parameter maps 1:1 with the Encrypted parameter of the CreateVolume API in the Amazon EC2 API Reference.

    " + "documentation":"

    Indicates whether the volume should be encrypted. If you turn on Region-level Amazon EBS encryption by default but set this value as false, the setting is overridden and the volume is encrypted with the KMS key specified for Amazon EBS encryption by default. This parameter maps 1:1 with the Encrypted parameter of the CreateVolume API in the Amazon EC2 API Reference.

    " }, "kmsKeyId":{ "shape":"EBSKMSKeyId", - "documentation":"

    The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the KmsKeyId parameter of the CreateVolume API in the Amazon EC2 API Reference.

    Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.

    " + "documentation":"

    The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When a key is specified using this parameter, it overrides Amazon EBS default encryption or any KMS key that you specified for cluster-level managed storage encryption. This parameter maps 1:1 with the KmsKeyId parameter of the CreateVolume API in the Amazon EC2 API Reference. For more information about encrypting Amazon EBS volumes attached to a task, see Encrypt data stored in Amazon EBS volumes attached to Amazon ECS tasks.

    Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.

    " }, "volumeType":{ "shape":"EBSVolumeType", @@ -6693,6 +6699,10 @@ "shape":"EBSSnapshotId", "documentation":"

    The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume size. This parameter maps 1:1 with the SnapshotId parameter of the CreateVolume API in the Amazon EC2 API Reference.

    " }, + "volumeInitializationRate":{ + "shape":"BoxedInteger", + "documentation":"

    The rate, in MiB/s, at which data is fetched from a snapshot of an existing Amazon EBS volume to create a new volume for attachment to the task. This property can be specified only if you specify a snapshotId. For more information, see Initialize Amazon EBS volumes in the Amazon EBS User Guide.

    " + }, "iops":{ "shape":"BoxedInteger", "documentation":"

    The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

    The following are the supported values for each volume type.

    • gp3: 3,000 - 16,000 IOPS

    • io1: 100 - 64,000 IOPS

    • io2: 100 - 256,000 IOPS

    This parameter is required for io1 and io2 volume types. The default for gp3 volumes is 3,000 IOPS. This parameter is not supported for st1, sc1, or standard volume types.

    This parameter maps 1:1 with the Iops parameter of the CreateVolume API in the Amazon EC2 API Reference.

    " @@ -7228,7 +7238,7 @@ "members":{ "cluster":{ "shape":"String", - "documentation":"

    The short name or full Amazon Resource Name (ARN) of the cluster that your service runs on. If you do not specify a cluster, the default cluster is assumed.

    " + "documentation":"

    The short name or full Amazon Resource Name (ARN) of the cluster that your service runs on. If you do not specify a cluster, the default cluster is assumed.

    You can't change the cluster name.

    " }, "service":{ "shape":"String", @@ -7244,7 +7254,7 @@ }, "capacityProviderStrategy":{ "shape":"CapacityProviderStrategy", - "documentation":"

    The capacity provider strategy to update the service to use.

    if the service uses the default capacity provider strategy for the cluster, the service can be updated to use one or more capacity providers as opposed to the default capacity provider strategy. However, when a service is using a capacity provider strategy that's not the default capacity provider strategy, the service can't be updated to use the cluster's default capacity provider strategy.

    A capacity provider strategy consists of one or more capacity providers along with the base and weight to assign to them. A capacity provider must be associated with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used.

    If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New capacity providers can be created with the CreateClusterCapacityProvider API operation.

    To use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.

    The PutClusterCapacityProvidersAPI operation is used to update the list of available capacity providers for a cluster after the cluster is created.

    " + "documentation":"

    The details of a capacity provider strategy. You can set a capacity provider when you create a cluster, run a task, or update a service.

    When you use Fargate, the capacity providers are FARGATE or FARGATE_SPOT.

    When you use Amazon EC2, the capacity providers are Auto Scaling groups.

    You can change capacity providers for rolling deployments and blue/green deployments.

    The following list provides the valid transitions:

    • Update the Fargate launch type to an Auto Scaling group capacity provider.

    • Update the Amazon EC2 launch type to a Fargate capacity provider.

    • Update the Fargate capacity provider to an Auto Scaling group capacity provider.

    • Update the Amazon EC2 capacity provider to a Fargate capacity provider.

    • Update the Auto Scaling group or Fargate capacity provider back to the launch type.

      Pass an empty list in the capacityProviderStrategy parameter.

    For information about Amazon Web Services CDK considerations, see Amazon Web Services CDK considerations.

    " }, "deploymentConfiguration":{ "shape":"DeploymentConfiguration", @@ -7288,7 +7298,7 @@ }, "loadBalancers":{ "shape":"LoadBalancers", - "documentation":"

    A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the container name, and the container port to access from the load balancer. The container name is as it appears in a container definition.

    When you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks with the updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks are running.

    For services that use rolling updates, you can add, update, or remove Elastic Load Balancing target groups. You can update from a single target group to multiple target groups and from multiple target groups to a single target group.

    For services that use blue/green deployments, you can update Elastic Load Balancing target groups by using CreateDeployment through CodeDeploy. Note that multiple target groups are not supported for blue/green deployments. For more information see Register multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

    For services that use the external deployment controller, you can add, update, or remove load balancers by using CreateTaskSet. Note that multiple target groups are not supported for external deployments. For more information see Register multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

    You can remove existing loadBalancers by passing an empty list.

    " + "documentation":"

    You must have a service-linked role when you update this property

    A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the container name, and the container port to access from the load balancer. The container name is as it appears in a container definition.

    When you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks with the updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks are running.

    For services that use rolling updates, you can add, update, or remove Elastic Load Balancing target groups. You can update from a single target group to multiple target groups and from multiple target groups to a single target group.

    For services that use blue/green deployments, you can update Elastic Load Balancing target groups by using CreateDeployment through CodeDeploy. Note that multiple target groups are not supported for blue/green deployments. For more information see Register multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

    For services that use the external deployment controller, you can add, update, or remove load balancers by using CreateTaskSet. Note that multiple target groups are not supported for external deployments. For more information see Register multiple target groups with a service in the Amazon Elastic Container Service Developer Guide.

    You can remove existing loadBalancers by passing an empty list.

    " }, "propagateTags":{ "shape":"PropagateTags", @@ -7296,7 +7306,7 @@ }, "serviceRegistries":{ "shape":"ServiceRegistries", - "documentation":"

    The details for the service discovery registries to assign to this service. For more information, see Service Discovery.

    When you add, update, or remove the service registries configuration, Amazon ECS starts new tasks with the updated service registries configuration, and then stops the old tasks when the new tasks are running.

    You can remove existing serviceRegistries by passing an empty list.

    " + "documentation":"

    You must have a service-linked role when you update this property.

    For more information about the role see the CreateService request parameter role .

    The details for the service discovery registries to assign to this service. For more information, see Service Discovery.

    When you add, update, or remove the service registries configuration, Amazon ECS starts new tasks with the updated service registries configuration, and then stops the old tasks when the new tasks are running.

    You can remove existing serviceRegistries by passing an empty list.

    " }, "serviceConnectConfiguration":{ "shape":"ServiceConnectConfiguration", @@ -7412,7 +7422,7 @@ }, "agentHash":{ "shape":"String", - "documentation":"

    The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent GitHub repository.

    " + "documentation":"

    The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent GitHub repository.

    " }, "dockerVersion":{ "shape":"String", diff --git a/services/efs/pom.xml b/services/efs/pom.xml index f408cda93a31..76ab5efef605 100644 --- a/services/efs/pom.xml +++ b/services/efs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT efs AWS Java SDK :: Services :: Amazon Elastic File System diff --git a/services/efs/src/main/resources/codegen-resources/customization.config b/services/efs/src/main/resources/codegen-resources/customization.config index 33df059ee4eb..ddfc00a7e632 100644 --- a/services/efs/src/main/resources/codegen-resources/customization.config +++ b/services/efs/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ "excludedSimpleMethods": [ "describeMountTargets" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/efs/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/efs/src/main/resources/codegen-resources/endpoint-rule-set.json index 8a7960d98327..3d3ccd0d215d 100644 --- a/services/efs/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/efs/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -1,12 +1,6 @@ { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -26,6 +20,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -57,152 +57,287 @@ "type": "error" }, { - "conditions": [ + "conditions": [], + "rules": [ { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, - true - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree" } ], "type": "tree" }, { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], + "conditions": [], "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { "ref": "Region" } - ], - "assign": "PartitionResult" + ] } ], "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws" ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://efs.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } ], - "rules": [ + "endpoint": { + "url": "https://efs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "endpoint": { - "url": "https://elasticfilesystem-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-cn" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "type": "tree" + "endpoint": { + "url": "https://efs.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-cn" + ] }, - true - ] - } - ], - "rules": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://efs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-us-gov" + ] + }, { "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://efs.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", "argv": [ { "fn": "getAttr", @@ -210,105 +345,258 @@ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://efs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticfilesystem-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://elasticfilesystem-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ], "type": "tree" }, { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - true - ] - } - ], - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", + "ref": "UseDualStack" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] }, - "supportsDualStack" + true ] } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticfilesystem-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://elasticfilesystem.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://elasticfilesystem.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ], "type": "tree" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://elasticfilesystem.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://elasticfilesystem.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ], "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ], "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] } \ No newline at end of file diff --git a/services/efs/src/main/resources/codegen-resources/endpoint-tests.json b/services/efs/src/main/resources/codegen-resources/endpoint-tests.json index 3270bbfe6da0..b47d439e3bbf 100644 --- a/services/efs/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/efs/src/main/resources/codegen-resources/endpoint-tests.json @@ -1,471 +1,75 @@ { "testCases": [ { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region not set and fips disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.af-south-1.amazonaws.com" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.af-south-1.amazonaws.com" - } - }, - "params": { - "Region": "af-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-east-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-east-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-northeast-3.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-3", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-south-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-2", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ap-southeast-3.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-3", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ap-southeast-3.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-3", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.ca-central-1.amazonaws.com" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ca-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.ca-central-1.amazonaws.com" - } - }, - "params": { - "Region": "ca-central-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.eu-central-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.eu-central-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-central-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.eu-north-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.eu-north-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.eu-south-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.eu-south-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-south-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.eu-west-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.eu-west-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.eu-west-2.amazonaws.com" + "url": "https://example.com" } }, "params": { - "Region": "eu-west-2", - "UseFIPS": false, - "UseDualStack": false + "Endpoint": "https://example.com", + "UseFIPS": false } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For custom endpoint with fips enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.eu-west-2.amazonaws.com" - } + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "Region": "eu-west-2", - "UseFIPS": true, - "UseDualStack": false + "Endpoint": "https://example.com", + "UseFIPS": true } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with fips disabled and dualstack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem.eu-west-3.amazonaws.com" - } + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "Region": "eu-west-3", + "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.eu-west-3.amazonaws.com" + "url": "https://efs-fips.us-east-1.api.aws" } }, "params": { - "Region": "eu-west-3", + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.me-south-1.amazonaws.com" - } - }, - "params": { - "Region": "me-south-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region me-south-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.me-south-1.amazonaws.com" + "url": "https://elasticfilesystem-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "me-south-1", + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.sa-east-1.amazonaws.com" + "url": "https://efs.us-east-1.api.aws" } }, "params": { - "Region": "sa-east-1", + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region sa-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.sa-east-1.amazonaws.com" - } - }, - "params": { - "Region": "sa-east-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { @@ -482,423 +86,299 @@ } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.us-east-1.amazonaws.com" + "url": "https://efs-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "us-east-1", + "Region": "cn-northwest-1", "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.us-east-2.amazonaws.com" - } - }, - "params": { - "Region": "us-east-2", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.us-east-2.amazonaws.com" + "url": "https://elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "us-east-2", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.us-west-1.amazonaws.com" + "url": "https://efs.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "us-west-1", + "Region": "cn-northwest-1", "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.us-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.us-west-2.amazonaws.com" + "url": "https://elasticfilesystem.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "us-west-2", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.us-west-2.amazonaws.com" + "url": "https://efs-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-west-2", + "Region": "us-gov-west-1", "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.us-east-1.api.aws" + "url": "https://elasticfilesystem-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.us-east-1.api.aws" + "url": "https://efs.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.cn-north-1.amazonaws.com.cn" + "url": "https://elasticfilesystem.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "cn-north-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.cn-north-1.amazonaws.com.cn" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "cn-north-1", + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-northwest-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn" + "url": "https://elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "cn-northwest-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem.cn-north-1.api.amazonwebservices.com.cn" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "cn-north-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.us-gov-east-1.amazonaws.com" + "url": "https://elasticfilesystem.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-iso-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.us-gov-east-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-gov-east-1", + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem-fips.us-gov-west-1.amazonaws.com" + "url": "https://elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "us-gov-west-1", + "Region": "us-isob-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem.us-gov-east-1.api.aws" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-gov-east-1", + "Region": "us-isob-east-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.us-iso-east-1.c2s.ic.gov" + "url": "https://elasticfilesystem.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-isob-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-iso-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://elasticfilesystem-fips.eu-isoe-west-1.cloud.adc-e.uk" + } }, "params": { - "Region": "us-iso-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": true, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", "expect": { "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-iso-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elasticfilesystem.us-isob-east-1.sc2s.sgov.gov" + "url": "https://elasticfilesystem.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-isob-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-isob-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + "endpoint": { + "url": "https://elasticfilesystem-fips.us-isof-south-1.csp.hci.ic.gov" + } }, "params": { - "Region": "us-isob-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", "expect": { "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-isob-east-1", + "Region": "us-isof-south-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" + "url": "https://elasticfilesystem.us-isof-south-1.csp.hci.ic.gov" } }, "params": { + "Region": "us-isof-south-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", - "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", - "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { diff --git a/services/efs/src/main/resources/codegen-resources/service-2.json b/services/efs/src/main/resources/codegen-resources/service-2.json index b59403799c80..554e5f11ff8a 100644 --- a/services/efs/src/main/resources/codegen-resources/service-2.json +++ b/services/efs/src/main/resources/codegen-resources/service-2.json @@ -31,7 +31,7 @@ {"shape":"AccessPointLimitExceeded"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in the application's own directory and any subdirectories. To learn more, see Mounting a file system using EFS access points.

    If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near the limit of 1,000 access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit.

    This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

    Access points can be tagged on creation. If tags are specified in the creation action, IAM performs additional authorization on the elasticfilesystem:TagResource action to verify if users have permissions to create tags. Therefore, you must grant explicit permissions to use the elasticfilesystem:TagResource action. For more information, see Granting permissions to tag resources during creation.

    " + "documentation":"

    Creates an EFS access point. An access point is an application-specific view into an EFS file system that applies an operating system user and group, and a file system path, to any file system request made through the access point. The operating system user and group override any identity information provided by the NFS client. The file system path is exposed as the access point's root directory. Applications using the access point can only access data in the application's own directory and any subdirectories. A file system can have a maximum of 10,000 access points unless you request an increase. To learn more, see Mounting a file system using EFS access points.

    If multiple requests to create access points on the same file system are sent in quick succession, and the file system is near the limit of access points, you may experience a throttling response for these requests. This is to ensure that the file system does not exceed the stated access point limit.

    This operation requires permissions for the elasticfilesystem:CreateAccessPoint action.

    Access points can be tagged on creation. If tags are specified in the creation action, IAM performs additional authorization on the elasticfilesystem:TagResource action to verify if users have permissions to create tags. Therefore, you must grant explicit permissions to use the elasticfilesystem:TagResource action. For more information, see Granting permissions to tag resources during creation.

    " }, "CreateFileSystem":{ "name":"CreateFileSystem", @@ -77,7 +77,7 @@ {"shape":"UnsupportedAvailabilityZone"}, {"shape":"AvailabilityZonesMismatch"} ], - "documentation":"

    Creates a mount target for a file system. You can then mount the file system on EC2 instances by using the mount target.

    You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system.

    You can create only one mount target for a One Zone file system. You must create that mount target in the same Availability Zone in which the file system is located. Use the AvailabilityZoneName and AvailabiltyZoneId properties in the DescribeFileSystems response object to get this information. Use the subnetId associated with the file system's Availability Zone when creating the mount target.

    For more information, see Amazon EFS: How it Works.

    To create a mount target for a file system, the file system's lifecycle state must be available. For more information, see DescribeFileSystems.

    In the request, provide the following:

    • The file system ID for which you are creating the mount target.

    • A subnet ID, which determines the following:

      • The VPC in which Amazon EFS creates the mount target

      • The Availability Zone in which Amazon EFS creates the mount target

      • The IP address range from which Amazon EFS selects the IP address of the mount target (if you don't specify an IP address in the request)

    After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system by using the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

    Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the subnet specified in the request to add another mount target must meet the following requirements:

    • Must belong to the same VPC as the subnets of the existing mount targets

    • Must not be in the same Availability Zone as any of the subnets of the existing mount targets

    If the request satisfies the requirements, Amazon EFS does the following:

    • Creates a new mount target in the specified subnet.

    • Also creates a new network interface in the subnet as follows:

      • If the request provides an IpAddress, Amazon EFS assigns that IP address to the network interface. Otherwise, Amazon EFS assigns a free address in the subnet (in the same way that the Amazon EC2 CreateNetworkInterface call does when a request does not specify a primary private IP address).

      • If the request provides SecurityGroups, this network interface is associated with those security groups. Otherwise, it belongs to the default security group for the subnet's VPC.

      • Assigns the description Mount target fsmt-id for file system fs-id where fsmt-id is the mount target ID, and fs-id is the FileSystemId.

      • Sets the requesterManaged property of the network interface to true, and the requesterId value to EFS.

      Each Amazon EFS mount target has one corresponding requester-managed EC2 network interface. After the network interface is created, Amazon EFS sets the NetworkInterfaceId field in the mount target's description to the network interface ID, and the IpAddress field to its address. If network interface creation fails, the entire CreateMountTarget operation fails.

    The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still creating, you can check the mount target creation status by calling the DescribeMountTargets operation, which among other things returns the mount target state.

    We recommend that you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, see Amazon EFS. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario. If the Availability Zone in which your mount target is created goes down, then you can't access your file system through that mount target.

    This operation requires permissions for the following action on the file system:

    • elasticfilesystem:CreateMountTarget

    This operation also requires permissions for the following Amazon EC2 actions:

    • ec2:DescribeSubnets

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    " + "documentation":"

    Creates a mount target for a file system. You can then mount the file system on EC2 instances by using the mount target.

    You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system.

    You can create only one mount target for a One Zone file system. You must create that mount target in the same Availability Zone in which the file system is located. Use the AvailabilityZoneName and AvailabiltyZoneId properties in the DescribeFileSystems response object to get this information. Use the subnetId associated with the file system's Availability Zone when creating the mount target.

    For more information, see Amazon EFS: How it Works.

    To create a mount target for a file system, the file system's lifecycle state must be available. For more information, see DescribeFileSystems.

    In the request, provide the following:

    • The file system ID for which you are creating the mount target.

    • A subnet ID, which determines the following:

      • The VPC in which Amazon EFS creates the mount target

      • The Availability Zone in which Amazon EFS creates the mount target

      • The IP address range from which Amazon EFS selects the IP address of the mount target (if you don't specify an IP address in the request)

    After creating the mount target, Amazon EFS returns a response that includes, a MountTargetId and an IpAddress. You use this IP address when mounting the file system in an EC2 instance. You can also use the mount target's DNS name when mounting the file system. The EC2 instance on which you mount the file system by using the mount target can resolve the mount target's DNS name to its IP address. For more information, see How it Works: Implementation Overview.

    Note that you can create mount targets for a file system in only one VPC, and there can be only one mount target per Availability Zone. That is, if the file system already has one or more mount targets created for it, the subnet specified in the request to add another mount target must meet the following requirements:

    • Must belong to the same VPC as the subnets of the existing mount targets

    • Must not be in the same Availability Zone as any of the subnets of the existing mount targets

    If the request satisfies the requirements, Amazon EFS does the following:

    • Creates a new mount target in the specified subnet.

    • Also creates a new network interface in the subnet as follows:

      • If the request provides an IpAddress, Amazon EFS assigns that IP address to the network interface. Otherwise, Amazon EFS assigns a free address in the subnet (in the same way that the Amazon EC2 CreateNetworkInterface call does when a request does not specify a primary private IP address).

      • If the request provides SecurityGroups, this network interface is associated with those security groups. Otherwise, it belongs to the default security group for the subnet's VPC.

      • Assigns the description Mount target fsmt-id for file system fs-id where fsmt-id is the mount target ID, and fs-id is the FileSystemId.

      • Sets the requesterManaged property of the network interface to true, and the requesterId value to EFS.

      Each Amazon EFS mount target has one corresponding requester-managed EC2 network interface. After the network interface is created, Amazon EFS sets the NetworkInterfaceId field in the mount target's description to the network interface ID, and the IpAddress field to its address. If network interface creation fails, the entire CreateMountTarget operation fails.

    The CreateMountTarget call returns only after creating the network interface, but while the mount target state is still creating, you can check the mount target creation status by calling the DescribeMountTargets operation, which among other things returns the mount target state.

    We recommend that you create a mount target in each of the Availability Zones. There are cost considerations for using a file system in an Availability Zone through a mount target created in another Availability Zone. For more information, see Amazon EFS pricing. In addition, by always using a mount target local to the instance's Availability Zone, you eliminate a partial failure scenario. If the Availability Zone in which your mount target is created goes down, then you can't access your file system through that mount target.

    This operation requires permissions for the following action on the file system:

    • elasticfilesystem:CreateMountTarget

    This operation also requires permissions for the following Amazon EC2 actions:

    • ec2:DescribeSubnets

    • ec2:DescribeNetworkInterfaces

    • ec2:CreateNetworkInterface

    " }, "CreateReplicationConfiguration":{ "name":"CreateReplicationConfiguration", @@ -312,7 +312,7 @@ {"shape":"BadRequest"}, {"shape":"FileSystemNotFound"} ], - "documentation":"

    Returns the current LifecycleConfiguration object for the specified Amazon EFS file system. Lifecycle management uses the LifecycleConfiguration object to identify when to move files between storage classes. For a file system without a LifecycleConfiguration object, the call returns an empty array in the response.

    This operation requires permissions for the elasticfilesystem:DescribeLifecycleConfiguration operation.

    " + "documentation":"

    Returns the current LifecycleConfiguration object for the specified EFS file system. Lifecycle management uses the LifecycleConfiguration object to identify when to move files between storage classes. For a file system without a LifecycleConfiguration object, the call returns an empty array in the response.

    This operation requires permissions for the elasticfilesystem:DescribeLifecycleConfiguration operation.

    " }, "DescribeMountTargetSecurityGroups":{ "name":"DescribeMountTargetSecurityGroups", @@ -789,7 +789,7 @@ }, "ProvisionedThroughputInMibps":{ "shape":"ProvisionedThroughputInMibps", - "documentation":"

    The throughput, measured in mebibytes per second (MiBps), that you want to provision for a file system that you're creating. Required if ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with the upper limit depending on Region. To increase this limit, contact Amazon Web Services Support. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

    " + "documentation":"

    The throughput, measured in mebibytes per second (MiBps), that you want to provision for a file system that you're creating. Required if ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with the upper limit depending on Region. To increase this limit, contact Amazon Web ServicesSupport. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

    " }, "AvailabilityZoneName":{ "shape":"AvailabilityZoneName", @@ -822,11 +822,19 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    Valid IPv4 address within the address range of the specified subnet.

    " + "documentation":"

    If the IP address type for the mount target is IPv4, then specify the IPv4 address within the address range of the specified subnet.

    " + }, + "Ipv6Address":{ + "shape":"Ipv6Address", + "documentation":"

    If the IP address type for the mount target is IPv6, then specify the IPv6 address within the address range of the specified subnet.

    " + }, + "IpAddressType":{ + "shape":"IpAddressType", + "documentation":"

    Specify the type of IP address of the mount target you are creating. Options are IPv4, dual stack, or IPv6. If you don’t specify an IpAddressType, then IPv4 is used.

    • IPV4_ONLY – Create mount target with IPv4 only subnet or dual-stack subnet.

    • DUAL_STACK – Create mount target with dual-stack subnet.

    • IPV6_ONLY – Create mount target with IPv6 only subnet.

    Creating IPv6 mount target only ENI in dual-stack subnet is not supported.

    " }, "SecurityGroups":{ "shape":"SecurityGroups", - "documentation":"

    Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified.

    " + "documentation":"

    VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as the subnet specified. The maximum number of security groups depends on account quota. For more information, see Amazon VPC Quotas in the Amazon VPC User Guide (see the Security Groups table).

    " } }, "documentation":"

    " @@ -1369,7 +1377,7 @@ }, "FileSystemId":{ "shape":"FileSystemId", - "documentation":"

    The ID or ARN of the file system to use for the destination. For cross-account replication, this must be an ARN. The file system's replication overwrite replication must be disabled. If no ID or ARN is specified, then a new file system is created.

    " + "documentation":"

    The ID or ARN of the file system to use for the destination. For cross-account replication, this must be an ARN. The file system's replication overwrite replication must be disabled. If no ID or ARN is specified, then a new file system is created.

    When you initially configure replication to an existing file system, Amazon EFS writes data to or removes existing data from the destination file system to match data in the source file system. If you don't want to change data in the destination file system, then you should replicate to a new file system instead. For more information, see https://docs.aws.amazon.com/efs/latest/ug/create-replication.html.

    " }, "RoleArn":{ "shape":"RoleArn", @@ -1565,7 +1573,7 @@ "members":{ "ReplicationOverwriteProtection":{ "shape":"ReplicationOverwriteProtection", - "documentation":"

    The status of the file system's replication overwrite protection.

    • ENABLED – The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is ENABLED by default.

    • DISABLED – The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication.

    • REPLICATING – The file system is being used as the destination file system in a replication configuration. The file system is read-only and is only modified only by EFS replication.

    If the replication configuration is deleted, the file system's replication overwrite protection is re-enabled, the file system becomes writeable.

    " + "documentation":"

    The status of the file system's replication overwrite protection.

    • ENABLED – The file system cannot be used as the destination file system in a replication configuration. The file system is writeable. Replication overwrite protection is ENABLED by default.

    • DISABLED – The file system can be used as the destination file system in a replication configuration. The file system is read-only and can only be modified by EFS replication.

    • REPLICATING – The file system is being used as the destination file system in a replication configuration. The file system is read-only and is modified only by EFS replication.

    If the replication configuration is deleted, the file system's replication overwrite protection is re-enabled, the file system becomes writeable.

    " } }, "documentation":"

    Describes the protection on a file system.

    " @@ -1677,6 +1685,19 @@ "error":{"httpStatusCode":409}, "exception":true }, + "IpAddressType":{ + "type":"string", + "enum":[ + "IPV4_ONLY", + "IPV6_ONLY", + "DUAL_STACK" + ] + }, + "Ipv6Address":{ + "type":"string", + "max":39, + "min":3 + }, "KmsKeyId":{ "type":"string", "max":2048, @@ -1789,7 +1810,7 @@ }, "SecurityGroups":{ "shape":"SecurityGroups", - "documentation":"

    An array of up to five VPC security group IDs.

    " + "documentation":"

    An array of VPC security group IDs.

    " } }, "documentation":"

    " @@ -1842,6 +1863,10 @@ "shape":"IpAddress", "documentation":"

    Address at which the file system can be mounted by using the mount target.

    " }, + "Ipv6Address":{ + "shape":"Ipv6Address", + "documentation":"

    The IPv6 address for the mount target.

    " + }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", "documentation":"

    The ID of the network interface that Amazon EFS created when it created the mount target.

    " @@ -1891,7 +1916,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

    The calling account has reached the limit for elastic network interfaces for the specific Amazon Web Services Region. Either delete some network interfaces or request that the account quota be raised. For more information, see Amazon VPC Quotas in the Amazon VPC User Guide (see the Network interfaces per Region entry in the Network interfaces table).

    ", + "documentation":"

    The calling account has reached the limit for elastic network interfaces for the specific Amazon Web Services Region. Either delete some network interfaces or request that the account quota be raised. For more information, see Amazon VPC Quotas in the Amazon VPC User Guide (see the Network interfaces per Region entry in the Network interfaces table).

    ", "error":{"httpStatusCode":409}, "exception":true }, @@ -1947,7 +1972,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

    Returned if the default file system policy is in effect for the EFS file system specified.

    ", + "documentation":"

    Returned if no backup is specified for a One Zone EFS file system.

    ", "error":{"httpStatusCode":404}, "exception":true }, @@ -2226,7 +2251,7 @@ "ErrorCode":{"shape":"ErrorCode"}, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

    Returned if the size of SecurityGroups specified in the request is greater than five.

    ", + "documentation":"

    Returned if the number of SecurityGroups specified in the request is greater than the limit, which is based on account quota. Either delete some security groups or request that the account quota be raised. For more information, see Amazon VPC Quotas in the Amazon VPC User Guide (see the Security Groups table).

    ", "error":{"httpStatusCode":400}, "exception":true }, @@ -2480,7 +2505,7 @@ }, "ProvisionedThroughputInMibps":{ "shape":"ProvisionedThroughputInMibps", - "documentation":"

    (Optional) The throughput, measured in mebibytes per second (MiBps), that you want to provision for a file system that you're creating. Required if ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with the upper limit depending on Region. To increase this limit, contact Amazon Web Services Support. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

    " + "documentation":"

    (Optional) The throughput, measured in mebibytes per second (MiBps), that you want to provision for a file system that you're creating. Required if ThroughputMode is set to provisioned. Valid values are 1-3414 MiBps, with the upper limit depending on Region. To increase this limit, contact Amazon Web ServicesSupport. For more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.

    " } } }, diff --git a/services/eks/pom.xml b/services/eks/pom.xml index 806092247638..e4f6eb0a91c1 100644 --- a/services/eks/pom.xml +++ b/services/eks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT eks AWS Java SDK :: Services :: EKS diff --git a/services/eks/src/main/resources/codegen-resources/customization.config b/services/eks/src/main/resources/codegen-resources/customization.config index 2e7c25143ce8..9ebe291adc93 100644 --- a/services/eks/src/main/resources/codegen-resources/customization.config +++ b/services/eks/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listClusters" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/eks/src/main/resources/codegen-resources/service-2.json b/services/eks/src/main/resources/codegen-resources/service-2.json index fba86b68b0d0..40b255882e95 100644 --- a/services/eks/src/main/resources/codegen-resources/service-2.json +++ b/services/eks/src/main/resources/codegen-resources/service-2.json @@ -122,7 +122,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"UnsupportedAvailabilityZoneException"} ], - "documentation":"

    Creates an Amazon EKS control plane.

    The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single tenant and unique. It runs on its own set of Amazon EC2 instances.

    The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the nodes (for example, to support kubectl exec, logs, and proxy data flows).

    Amazon EKS nodes run in your Amazon Web Services account and connect to your cluster's control plane over the Kubernetes API server endpoint and a certificate file that is created for your cluster.

    You can use the endpointPublicAccess and endpointPrivateAccess parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

    You can use the logging parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

    CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.

    In most cases, it takes several minutes to create a cluster. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch nodes into your cluster. For more information, see Allowing users to access your cluster and Launching Amazon EKS nodes in the Amazon EKS User Guide.

    " + "documentation":"

    Creates an Amazon EKS control plane.

    The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single tenant and unique. It runs on its own set of Amazon EC2 instances.

    The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the nodes (for example, to support kubectl exec, logs, and proxy data flows).

    Amazon EKS nodes run in your Amazon Web Services account and connect to your cluster's control plane over the Kubernetes API server endpoint and a certificate file that is created for your cluster.

    You can use the endpointPublicAccess and endpointPrivateAccess parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. The endpoint domain name and IP address family depends on the value of the ipFamily for the cluster. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

    You can use the logging parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

    CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.

    In most cases, it takes several minutes to create a cluster. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch nodes into your cluster. For more information, see Allowing users to access your cluster and Launching Amazon EKS nodes in the Amazon EKS User Guide.

    " }, "CreateEksAnywhereSubscription":{ "name":"CreateEksAnywhereSubscription", @@ -194,7 +194,7 @@ {"shape":"ResourceLimitExceededException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

    Creates an EKS Pod Identity association between a service account in an Amazon EKS cluster and an IAM role with EKS Pod Identity. Use EKS Pod Identity to give temporary IAM credentials to pods and the credentials are rotated automatically.

    Amazon EKS Pod Identity associations provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.

    If a pod uses a service account that has an association, Amazon EKS sets environment variables in the containers of the pod. The environment variables configure the Amazon Web Services SDKs, including the Command Line Interface, to use the EKS Pod Identity credentials.

    Pod Identity is a simpler method than IAM roles for service accounts, as this method doesn't use OIDC identity providers. Additionally, you can configure a role for Pod Identity once, and reuse it across clusters.

    " + "documentation":"

    Creates an EKS Pod Identity association between a service account in an Amazon EKS cluster and an IAM role with EKS Pod Identity. Use EKS Pod Identity to give temporary IAM credentials to Pods and the credentials are rotated automatically.

    Amazon EKS Pod Identity associations provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.

    If a Pod uses a service account that has an association, Amazon EKS sets environment variables in the containers of the Pod. The environment variables configure the Amazon Web Services SDKs, including the Command Line Interface, to use the EKS Pod Identity credentials.

    EKS Pod Identity is a simpler method than IAM roles for service accounts, as this method doesn't use OIDC identity providers. Additionally, you can configure a role for EKS Pod Identity once, and reuse it across clusters.

    Similar to Amazon Web Services IAM behavior, EKS Pod Identity associations are eventually consistent, and may take several seconds to be effective after the initial API call returns successfully. You must design your applications to account for these potential delays. We recommend that you don’t include association create/updates in the critical, high-availability code paths of your application. Instead, make changes in a separate initialization or setup routine that you run less frequently.

    You can set a target IAM role in the same or a different account for advanced scenarios. With a target role, EKS Pod Identity automatically performs two role assumptions in sequence: first assuming the role in the association that is in this account, then using those credentials to assume the target IAM role. This process provides your Pod with temporary credentials that have the permissions defined in the target role, allowing secure access to resources in another Amazon Web Services account.

    " }, "DeleteAccessEntry":{ "name":"DeleteAccessEntry", @@ -710,7 +710,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

    Returns a list of all insights checked for against the specified cluster. You can filter which insights are returned by category, associated Kubernetes version, and status.

    " + "documentation":"

    Returns a list of all insights checked for against the specified cluster. You can filter which insights are returned by category, associated Kubernetes version, and status. The default filter lists all categories and every status.

    The following lists the available categories:

    • UPGRADE_READINESS: Amazon EKS identifies issues that could impact your ability to upgrade to new versions of Kubernetes. These are called upgrade insights.

    • MISCONFIGURATION: Amazon EKS identifies misconfiguration in your EKS Hybrid Nodes setup that could impair functionality of your cluster or workloads. These are called configuration insights.

    " }, "ListNodegroups":{ "name":"ListNodegroups", @@ -874,7 +874,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"} ], - "documentation":"

    Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with DescribeUpdate.

    You can use this operation to do the following actions:

    • You can use this API operation to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide .

      CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.

    • You can also use this API operation to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide .

    • You can also use this API operation to choose different subnets and security groups for the cluster. You must specify at least two subnets that are in different Availability Zones. You can't change which VPC the subnets are from, the subnets must be in the same VPC as the subnets that the cluster was created with. For more information about the VPC requirements, see https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html in the Amazon EKS User Guide .

    • You can also use this API operation to enable or disable ARC zonal shift. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster.

    • You can also use this API operation to add, change, or remove the configuration in the cluster for EKS Hybrid Nodes. To remove the configuration, use the remoteNetworkConfig key with an object containing both subkeys with empty arrays for each. Here is an inline example: \"remoteNetworkConfig\": { \"remoteNodeNetworks\": [], \"remotePodNetworks\": [] }.

    Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active.

    " + "documentation":"

    Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with DescribeUpdate.

    You can use this operation to do the following actions:

    • You can use this API operation to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the Amazon EKS User Guide .

      CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing.

    • You can also use this API operation to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Cluster API server endpoint in the Amazon EKS User Guide .

    • You can also use this API operation to choose different subnets and security groups for the cluster. You must specify at least two subnets that are in different Availability Zones. You can't change which VPC the subnets are from, the subnets must be in the same VPC as the subnets that the cluster was created with. For more information about the VPC requirements, see https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html in the Amazon EKS User Guide .

    • You can also use this API operation to enable or disable ARC zonal shift. If zonal shift is enabled, Amazon Web Services configures zonal autoshift for the cluster.

    • You can also use this API operation to add, change, or remove the configuration in the cluster for EKS Hybrid Nodes. To remove the configuration, use the remoteNetworkConfig key with an object containing both subkeys with empty arrays for each. Here is an inline example: \"remoteNetworkConfig\": { \"remoteNodeNetworks\": [], \"remotePodNetworks\": [] }.

    Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active.

    " }, "UpdateClusterVersion":{ "name":"UpdateClusterVersion", @@ -963,7 +963,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

    Updates a EKS Pod Identity association. Only the IAM role can be changed; an association can't be moved between clusters, namespaces, or service accounts. If you need to edit the namespace or service account, you need to delete the association and then create a new association with your desired settings.

    " + "documentation":"

    Updates a EKS Pod Identity association. In an update, you can change the IAM role, the target IAM role, or disableSessionTags. You must change at least one of these in an update. An association can't be moved between clusters, namespaces, or service accounts. If you need to edit the namespace or service account, you need to delete the association and then create a new association with your desired settings.

    Similar to Amazon Web Services IAM behavior, EKS Pod Identity associations are eventually consistent, and may take several seconds to be effective after the initial API call returns successfully. You must design your applications to account for these potential delays. We recommend that you don’t include association create/updates in the critical, high-availability code paths of your application. Instead, make changes in a separate initialization or setup routine that you run less frequently.

    You can set a target IAM role in the same or a different account for advanced scenarios. With a target role, EKS Pod Identity automatically performs two role assumptions in sequence: first assuming the role in the association that is in this account, then using those credentials to assume the target IAM role. This process provides your Pod with temporary credentials that have the permissions defined in the target role, allowing secure access to resources in another Amazon Web Services account.

    " } }, "shapes":{ @@ -1164,7 +1164,7 @@ }, "podIdentityAssociations":{ "shape":"StringList", - "documentation":"

    An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.

    " + "documentation":"

    An array of EKS Pod Identity associations owned by the add-on. Each association maps a role to a service account in a namespace in the cluster.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using EKS Pod Identity in the Amazon EKS User Guide.

    " } }, "documentation":"

    An Amazon EKS add-on. For more information, see Amazon EKS add-ons in the Amazon EKS User Guide.

    " @@ -1280,7 +1280,7 @@ "documentation":"

    The ARN of an IAM Role.

    " } }, - "documentation":"

    A type of Pod Identity Association owned by an Amazon EKS Add-on.

    Each EKS Pod Identity Association maps a role to a service account in a namespace in the cluster.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.

    " + "documentation":"

    A type of EKS Pod Identity association owned by an Amazon EKS add-on.

    Each association maps a role to a service account in a namespace in the cluster.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using EKS Pod Identity in the Amazon EKS User Guide.

    " }, "AddonPodIdentityAssociationsList":{ "type":"list", @@ -1291,14 +1291,14 @@ "members":{ "serviceAccount":{ "shape":"String", - "documentation":"

    The Kubernetes Service Account name used by the addon.

    " + "documentation":"

    The Kubernetes Service Account name used by the add-on.

    " }, "recommendedManagedPolicies":{ "shape":"StringList", - "documentation":"

    A suggested IAM Policy for the addon.

    " + "documentation":"

    A suggested IAM Policy for the add-on.

    " } }, - "documentation":"

    Information about how to configure IAM for an Addon.

    " + "documentation":"

    Information about how to configure IAM for an add-on.

    " }, "AddonPodIdentityConfigurationList":{ "type":"list", @@ -1330,7 +1330,7 @@ }, "computeTypes":{ "shape":"StringList", - "documentation":"

    Indicates the compute type of the addon version.

    " + "documentation":"

    Indicates the compute type of the add-on version.

    " }, "compatibilities":{ "shape":"Compatibilities", @@ -1342,7 +1342,7 @@ }, "requiresIamPermissions":{ "shape":"Boolean", - "documentation":"

    Indicates if the Addon requires IAM Permissions to operate, such as networking permissions.

    " + "documentation":"

    Indicates if the add-on requires IAM Permissions to operate, such as networking permissions.

    " } }, "documentation":"

    Information about an add-on version.

    " @@ -1565,7 +1565,10 @@ }, "Category":{ "type":"string", - "enum":["UPGRADE_READINESS"] + "enum":[ + "UPGRADE_READINESS", + "MISCONFIGURATION" + ] }, "CategoryList":{ "type":"list", @@ -2112,7 +2115,7 @@ }, "podIdentityAssociations":{ "shape":"AddonPodIdentityAssociationsList", - "documentation":"

    An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.

    " + "documentation":"

    An array of EKS Pod Identity associations to be created. Each association maps a Kubernetes service account to an IAM role.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using EKS Pod Identity in the Amazon EKS User Guide.

    " } } }, @@ -2177,7 +2180,7 @@ }, "bootstrapSelfManagedAddons":{ "shape":"BoxedBoolean", - "documentation":"

    If you set this value to False when creating a cluster, the default networking add-ons will not be installed.

    The default networking addons include vpc-cni, coredns, and kube-proxy.

    Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.

    " + "documentation":"

    If you set this value to False when creating a cluster, the default networking add-ons will not be installed.

    The default networking add-ons include vpc-cni, coredns, and kube-proxy.

    Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.

    " }, "upgradePolicy":{ "shape":"UpgradePolicyRequest", @@ -2417,13 +2420,13 @@ "members":{ "clusterName":{ "shape":"String", - "documentation":"

    The name of the cluster to create the association in.

    ", + "documentation":"

    The name of the cluster to create the EKS Pod Identity association in.

    ", "location":"uri", "locationName":"name" }, "namespace":{ "shape":"String", - "documentation":"

    The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the pods that use the service account must be in this namespace.

    " + "documentation":"

    The name of the Kubernetes namespace inside the cluster to create the EKS Pod Identity association in. The service account and the Pods that use the service account must be in this namespace.

    " }, "serviceAccount":{ "shape":"String", @@ -2431,7 +2434,7 @@ }, "roleArn":{ "shape":"String", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the pods that use this service account.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the Pods that use this service account.

    " }, "clientRequestToken":{ "shape":"String", @@ -2441,6 +2444,14 @@ "tags":{ "shape":"TagMap", "documentation":"

    Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources.

    The following basic restrictions apply to tags:

    • Maximum number of tags per resource – 50

    • For each resource, each tag key must be unique, and each tag key can have only one value.

    • Maximum key length – 128 Unicode characters in UTF-8

    • Maximum value length – 256 Unicode characters in UTF-8

    • If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.

    • Tag keys and values are case-sensitive.

    • Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.

    " + }, + "disableSessionTags":{ + "shape":"BoxedBoolean", + "documentation":"

    Disable the automatic sessions tags that are appended by EKS Pod Identity.

    EKS Pod Identity adds a pre-defined set of session tags when it assumes the role. You can use these tags to author a single role that can work across resources by allowing access to Amazon Web Services resources based on matching tags. By default, EKS Pod Identity attaches six tags, including tags for cluster name, namespace, and service account name. For the list of tags added by EKS Pod Identity, see List of session tags added by EKS Pod Identity in the Amazon EKS User Guide.

    Amazon Web Services compresses inline session policies, managed policy ARNs, and session tags into a packed binary format that has a separate limit. If you receive a PackedPolicyTooLarge error indicating the packed binary format has exceeded the size limit, you can attempt to reduce the size by disabling the session tags added by EKS Pod Identity.

    " + }, + "targetRoleArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the target IAM role to associate with the service account. This role is assumed by using the EKS Pod Identity association role, then the credentials for this role are injected into the Pod.

    When you run applications on Amazon EKS, your application might need to access Amazon Web Services resources from a different role that exists in the same or different Amazon Web Services account. For example, your application running in “Account A” might need to access resources, such as Amazon S3 buckets in “Account B” or within “Account A” itself. You can create a association to access Amazon Web Services resources in “Account B” by creating two IAM roles: a role in “Account A” and a role in “Account B” (which can be the same or different account), each with the necessary trust and permission policies. After you provide these roles in the IAM role and Target IAM role fields, EKS will perform role chaining to ensure your application gets the required permissions. This means Role A will assume Role B, allowing your Pods to securely access resources like S3 buckets in the target account.

    " } } }, @@ -2476,8 +2487,7 @@ }, "DeleteAccessEntryResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteAddonRequest":{ "type":"structure", @@ -2760,7 +2770,7 @@ }, "podIdentityConfiguration":{ "shape":"AddonPodIdentityConfigurationList", - "documentation":"

    The Kubernetes service account name used by the addon, and any suggested IAM policies. Use this information to create an IAM Role for the Addon.

    " + "documentation":"

    The Kubernetes service account name used by the add-on, and any suggested IAM policies. Use this information to create an IAM Role for the add-on.

    " } } }, @@ -3195,8 +3205,7 @@ }, "DisassociateAccessPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateIdentityProviderConfigRequest":{ "type":"structure", @@ -3742,7 +3751,7 @@ "members":{ "categories":{ "shape":"CategoryList", - "documentation":"

    The categories to use to filter insights.

    " + "documentation":"

    The categories to use to filter insights. The following lists the available categories:

    • UPGRADE_READINESS: Amazon EKS identifies issues that could impact your ability to upgrade to new versions of Kubernetes. These are called upgrade insights.

    • MISCONFIGURATION: Amazon EKS identifies misconfiguration in your EKS Hybrid Nodes setup that could impair functionality of your cluster or workloads. These are called configuration insights.

    " }, "kubernetesVersions":{ "shape":"StringList", @@ -4974,7 +4983,7 @@ }, "namespace":{ "shape":"String", - "documentation":"

    The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the pods that use the service account must be in this namespace.

    " + "documentation":"

    The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the Pods that use the service account must be in this namespace.

    " }, "serviceAccount":{ "shape":"String", @@ -4982,7 +4991,7 @@ }, "roleArn":{ "shape":"String", - "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the pods that use this service account.

    " + "documentation":"

    The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the Pods that use this service account.

    " }, "associationArn":{ "shape":"String", @@ -5002,11 +5011,23 @@ }, "modifiedAt":{ "shape":"Timestamp", - "documentation":"

    The most recent timestamp that the association was modified at

    " + "documentation":"

    The most recent timestamp that the association was modified at.

    " }, "ownerArn":{ "shape":"String", - "documentation":"

    If defined, the Pod Identity Association is owned by an Amazon EKS Addon.

    " + "documentation":"

    If defined, the EKS Pod Identity association is owned by an Amazon EKS add-on.

    " + }, + "disableSessionTags":{ + "shape":"BoxedBoolean", + "documentation":"

    The state of the automatic sessions tags. The value of true disables these tags.

    EKS Pod Identity adds a pre-defined set of session tags when it assumes the role. You can use these tags to author a single role that can work across resources by allowing access to Amazon Web Services resources based on matching tags. By default, EKS Pod Identity attaches six tags, including tags for cluster name, namespace, and service account name. For the list of tags added by EKS Pod Identity, see List of session tags added by EKS Pod Identity in the Amazon EKS User Guide.

    " + }, + "targetRoleArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the target IAM role to associate with the service account. This role is assumed by using the EKS Pod Identity association role, then the credentials for this role are injected into the Pod.

    " + }, + "externalId":{ + "shape":"String", + "documentation":"

    The unique identifier for this EKS Pod Identity association for a target IAM role. You put this value in the trust policy of the target role, in a Condition to match the sts.ExternalId. This ensures that the target role can only be assumed by this association. This prevents the confused deputy problem. For more information about the confused deputy problem, see The confused deputy problem in the IAM User Guide.

    If you want to use the same target role with multiple associations or other roles, use independent statements in the trust policy to allow sts:AssumeRole access from each role.

    " } }, "documentation":"

    Amazon EKS Pod Identity associations provide the ability to manage credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.

    " @@ -5024,7 +5045,7 @@ }, "namespace":{ "shape":"String", - "documentation":"

    The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the pods that use the service account must be in this namespace.

    " + "documentation":"

    The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the Pods that use the service account must be in this namespace.

    " }, "serviceAccount":{ "shape":"String", @@ -5040,7 +5061,7 @@ }, "ownerArn":{ "shape":"String", - "documentation":"

    If defined, the Pod Identity Association is owned by an Amazon EKS Addon.

    " + "documentation":"

    If defined, the association is owned by an Amazon EKS add-on.

    " } }, "documentation":"

    The summarized description of the association.

    Each summary is simplified by removing these fields compared to the full PodIdentityAssociation :

    • The IAM role: roleArn

    • The timestamp that the association was created at: createdAt

    • The most recent timestamp that the association was modified at:. modifiedAt

    • The tags on the association: tags

    " @@ -5106,11 +5127,11 @@ "members":{ "remoteNodeNetworks":{ "shape":"RemoteNodeNetworkList", - "documentation":"

    The list of network CIDRs that can contain hybrid nodes.

    These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

    • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

    • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

    • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

    " + "documentation":"

    The list of network CIDRs that can contain hybrid nodes.

    These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

    • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

    • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

    • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

    " }, "remotePodNetworks":{ "shape":"RemotePodNetworkList", - "documentation":"

    The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes.

    These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    " + "documentation":"

    The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes.

    These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    " } }, "documentation":"

    The configuration in the cluster for EKS Hybrid Nodes. You can add, change, or remove this configuration after the cluster is created.

    " @@ -5134,10 +5155,10 @@ "members":{ "cidrs":{ "shape":"StringList", - "documentation":"

    A network CIDR that can contain hybrid nodes.

    These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

    • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

    • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

    • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

    " + "documentation":"

    A network CIDR that can contain hybrid nodes.

    These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

    • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

    • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

    • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

    " } }, - "documentation":"

    A network CIDR that can contain hybrid nodes.

    These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

    • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

    • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

    • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

    " + "documentation":"

    A network CIDR that can contain hybrid nodes.

    These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

    • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

    • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

    • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

    " }, "RemoteNodeNetworkList":{ "type":"list", @@ -5149,10 +5170,10 @@ "members":{ "cidrs":{ "shape":"StringList", - "documentation":"

    A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

    These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    " + "documentation":"

    A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

    These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    " } }, - "documentation":"

    A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

    These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    " + "documentation":"

    A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

    These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

    Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

    It must satisfy the following requirements:

    • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /32, maximum allowed size is /8. Publicly-routable addresses aren't supported.

    • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

    " }, "RemotePodNetworkList":{ "type":"list", @@ -5379,8 +5400,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -5474,8 +5494,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "Update":{ "type":"structure", @@ -5602,7 +5621,7 @@ }, "podIdentityAssociations":{ "shape":"AddonPodIdentityAssociationsList", - "documentation":"

    An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the Amazon EKS User Guide.

    " + "documentation":"

    An array of EKS Pod Identity associations to be updated. Each association maps a Kubernetes service account to an IAM role. If this value is left blank, no change. If an empty array is provided, existing associations owned by the add-on are deleted.

    For more information, see Attach an IAM Role to an Amazon EKS add-on using EKS Pod Identity in the Amazon EKS User Guide.

    " } } }, @@ -5926,12 +5945,20 @@ }, "roleArn":{ "shape":"String", - "documentation":"

    The new IAM role to change the

    " + "documentation":"

    The new IAM role to change in the association.

    " }, "clientRequestToken":{ "shape":"String", "documentation":"

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

    ", "idempotencyToken":true + }, + "disableSessionTags":{ + "shape":"BoxedBoolean", + "documentation":"

    Disable the automatic sessions tags that are appended by EKS Pod Identity.

    EKS Pod Identity adds a pre-defined set of session tags when it assumes the role. You can use these tags to author a single role that can work across resources by allowing access to Amazon Web Services resources based on matching tags. By default, EKS Pod Identity attaches six tags, including tags for cluster name, namespace, and service account name. For the list of tags added by EKS Pod Identity, see List of session tags added by EKS Pod Identity in the Amazon EKS User Guide.

    Amazon Web Services compresses inline session policies, managed policy ARNs, and session tags into a packed binary format that has a separate limit. If you receive a PackedPolicyTooLarge error indicating the packed binary format has exceeded the size limit, you can attempt to reduce the size by disabling the session tags added by EKS Pod Identity.

    " + }, + "targetRoleArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the target IAM role to associate with the service account. This role is assumed by using the EKS Pod Identity association role, then the credentials for this role are injected into the Pod.

    When you run applications on Amazon EKS, your application might need to access Amazon Web Services resources from a different role that exists in the same or different Amazon Web Services account. For example, your application running in “Account A” might need to access resources, such as buckets in “Account B” or within “Account A” itself. You can create a association to access Amazon Web Services resources in “Account B” by creating two IAM roles: a role in “Account A” and a role in “Account B” (which can be the same or different account), each with the necessary trust and permission policies. After you provide these roles in the IAM role and Target IAM role fields, EKS will perform role chaining to ensure your application gets the required permissions. This means Role A will assume Role B, allowing your Pods to securely access resources like S3 buckets in the target account.

    " } } }, @@ -5940,7 +5967,7 @@ "members":{ "association":{ "shape":"PodIdentityAssociation", - "documentation":"

    The full description of the EKS Pod Identity association that was updated.

    " + "documentation":"

    The full description of the association that was updated.

    " } } }, @@ -6027,15 +6054,15 @@ }, "endpointPublicAccess":{ "shape":"BoxedBoolean", - "documentation":"

    Set this value to false to disable public access to your cluster's Kubernetes API server endpoint. If you disable public access, your cluster's Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is true, which enables public access for your Kubernetes API server. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide .

    " + "documentation":"

    Set this value to false to disable public access to your cluster's Kubernetes API server endpoint. If you disable public access, your cluster's Kubernetes API server can only receive requests from within the cluster VPC. The default value for this parameter is true, which enables public access for your Kubernetes API server. The endpoint domain name and IP address family depends on the value of the ipFamily for the cluster. For more information, see Cluster API server endpoint in the Amazon EKS User Guide .

    " }, "endpointPrivateAccess":{ "shape":"BoxedBoolean", - "documentation":"

    Set this value to true to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false, which disables private access for your Kubernetes API server. If you disable private access and you have nodes or Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide .

    " + "documentation":"

    Set this value to true to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false, which disables private access for your Kubernetes API server. If you disable private access and you have nodes or Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods. For more information, see Cluster API server endpoint in the Amazon EKS User Guide .

    " }, "publicAccessCidrs":{ "shape":"StringList", - "documentation":"

    The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0. If you've disabled private endpoint access, make sure that you specify the necessary CIDR blocks for every node and Fargate Pod in the cluster. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide .

    " + "documentation":"

    The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0 and additionally ::/0 for dual-stack `IPv6` clusters. If you've disabled private endpoint access, make sure that you specify the necessary CIDR blocks for every node and Fargate Pod in the cluster. For more information, see Cluster API server endpoint in the Amazon EKS User Guide .

    Note that the public endpoints are dual-stack for only IPv6 clusters that are made after October 2024. You can't add IPv6 CIDR blocks to IPv4 clusters or IPv6 clusters that were made before October 2024.

    " } }, "documentation":"

    An object representing the VPC configuration to use for an Amazon EKS cluster.

    " @@ -6065,11 +6092,11 @@ }, "endpointPrivateAccess":{ "shape":"Boolean", - "documentation":"

    This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint instead of traversing the internet. If this value is disabled and you have nodes or Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods. For more information, see Amazon EKS cluster endpoint access control in the Amazon EKS User Guide .

    " + "documentation":"

    This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint instead of traversing the internet. If this value is disabled and you have nodes or Fargate pods in the cluster, then ensure that publicAccessCidrs includes the necessary CIDR blocks for communication with the nodes or Fargate pods. For more information, see Cluster API server endpoint in the Amazon EKS User Guide .

    " }, "publicAccessCidrs":{ "shape":"StringList", - "documentation":"

    The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint.

    " + "documentation":"

    The CIDR blocks that are allowed access to your cluster's public Kubernetes API server endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that you specify is denied. The default value is 0.0.0.0/0 and additionally ::/0 for dual-stack `IPv6` clusters. If you've disabled private endpoint access, make sure that you specify the necessary CIDR blocks for every node and Fargate Pod in the cluster. For more information, see Cluster API server endpoint in the Amazon EKS User Guide .

    Note that the public endpoints are dual-stack for only IPv6 clusters that are made after October 2024. You can't add IPv6 CIDR blocks to IPv4 clusters or IPv6 clusters that were made before October 2024.

    " } }, "documentation":"

    An object representing an Amazon EKS cluster VPC configuration response.

    " diff --git a/services/eksauth/pom.xml b/services/eksauth/pom.xml index 10b568e69ba6..d28afb6062ba 100644 --- a/services/eksauth/pom.xml +++ b/services/eksauth/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT eksauth AWS Java SDK :: Services :: EKS Auth diff --git a/services/eksauth/src/main/resources/codegen-resources/customization.config b/services/eksauth/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/eksauth/src/main/resources/codegen-resources/customization.config +++ b/services/eksauth/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/elasticache/pom.xml b/services/elasticache/pom.xml index 131812061f52..e4d9047454aa 100644 --- a/services/elasticache/pom.xml +++ b/services/elasticache/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT elasticache AWS Java SDK :: Services :: Amazon ElastiCache diff --git a/services/elasticbeanstalk/pom.xml b/services/elasticbeanstalk/pom.xml index 09fff385d82d..7a015854976e 100644 --- a/services/elasticbeanstalk/pom.xml +++ b/services/elasticbeanstalk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT elasticbeanstalk AWS Java SDK :: Services :: AWS Elastic Beanstalk diff --git a/services/elasticloadbalancing/pom.xml b/services/elasticloadbalancing/pom.xml index 98199fbb5119..e426e4da228b 100644 --- a/services/elasticloadbalancing/pom.xml +++ b/services/elasticloadbalancing/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT elasticloadbalancing AWS Java SDK :: Services :: Elastic Load Balancing diff --git a/services/elasticloadbalancingv2/pom.xml b/services/elasticloadbalancingv2/pom.xml index 611aa8b45853..c04b40fc9c47 100644 --- a/services/elasticloadbalancingv2/pom.xml +++ b/services/elasticloadbalancingv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT elasticloadbalancingv2 AWS Java SDK :: Services :: Elastic Load Balancing V2 diff --git a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/paginators-1.json b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/paginators-1.json index e6876acb2060..49651f6d008d 100644 --- a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,10 @@ { "pagination": { + "DescribeAccountLimits": { + "input_token": "Marker", + "output_token": "NextMarker", + "result_key": "Limits" + }, "DescribeListenerCertificates": { "input_token": "Marker", "output_token": "NextMarker", @@ -28,17 +33,20 @@ "DescribeTrustStoreAssociations": { "input_token": "Marker", "limit_key": "PageSize", - "output_token": "NextMarker" + "output_token": "NextMarker", + "result_key": "TrustStoreAssociations" }, "DescribeTrustStoreRevocations": { "input_token": "Marker", "limit_key": "PageSize", - "output_token": "NextMarker" + "output_token": "NextMarker", + "result_key": "TrustStoreRevocations" }, "DescribeTrustStores": { "input_token": "Marker", "limit_key": "PageSize", - "output_token": "NextMarker" + "output_token": "NextMarker", + "result_key": "TrustStores" } } } \ No newline at end of file diff --git a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json index 86b332d919e5..b5be9f6062c7 100644 --- a/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json +++ b/services/elasticloadbalancingv2/src/main/resources/codegen-resources/service-2.json @@ -30,7 +30,7 @@ {"shape":"TooManyCertificatesException"}, {"shape":"CertificateNotFoundException"} ], - "documentation":"

    Adds the specified SSL server certificate to the certificate list for the specified HTTPS or TLS listener.

    If the certificate in already in the certificate list, the call is successful but the certificate is not added again.

    For more information, see HTTPS listeners in the Application Load Balancers Guide or TLS listeners in the Network Load Balancers Guide.

    " + "documentation":"

    Adds the specified SSL server certificate to the certificate list for the specified HTTPS or TLS listener.

    If the certificate in already in the certificate list, the call is successful but the certificate is not added again.

    For more information, see SSL certificates in the Application Load Balancers Guide or Server certificates in the Network Load Balancers Guide.

    " }, "AddTags":{ "name":"AddTags", @@ -204,7 +204,7 @@ {"shape":"TooManyTagsException"}, {"shape":"DuplicateTagKeysException"} ], - "documentation":"

    Creates a trust store.

    " + "documentation":"

    Creates a trust store.

    For more information, see Mutual TLS for Application Load Balancers.

    " }, "DeleteListener":{ "name":"DeleteListener", @@ -324,7 +324,7 @@ {"shape":"TargetGroupNotFoundException"}, {"shape":"InvalidTargetException"} ], - "documentation":"

    Deregisters the specified targets from the specified target group. After the targets are deregistered, they no longer receive traffic from the load balancer.

    The load balancer stops sending requests to targets that are deregistering, but uses connection draining to ensure that in-flight traffic completes on the existing connections. This deregistration delay is configured by default but can be updated for each target group.

    For more information, see the following:

    Note: If the specified target does not exist, the action returns successfully.

    " + "documentation":"

    Deregisters the specified targets from the specified target group. After the targets are deregistered, they no longer receive traffic from the load balancer.

    The load balancer stops sending requests to targets that are deregistering, but uses connection draining to ensure that in-flight traffic completes on the existing connections. This deregistration delay is configured by default but can be updated for each target group.

    For more information, see the following:

    Note: If the specified target does not exist, the action returns successfully.

    " }, "DescribeAccountLimits":{ "name":"DescribeAccountLimits", @@ -385,7 +385,7 @@ "errors":[ {"shape":"ListenerNotFoundException"} ], - "documentation":"

    Describes the default certificate and the certificate list for the specified HTTPS or TLS listener.

    If the default certificate is also in the certificate list, it appears twice in the results (once with IsDefault set to true and once with IsDefault set to false).

    For more information, see SSL certificates in the Application Load Balancers Guide or Server certificates in the Network Load Balancers Guide.

    " + "documentation":"

    Describes the default certificate and the certificate list for the specified HTTPS or TLS listener.

    If the default certificate is also in the certificate list, it appears twice in the results (once with IsDefault set to true and once with IsDefault set to false).

    For more information, see SSL certificates in the Application Load Balancers Guide or Server certificates in the Network Load Balancers Guide.

    " }, "DescribeListeners":{ "name":"DescribeListeners", @@ -469,7 +469,7 @@ "errors":[ {"shape":"SSLPolicyNotFoundException"} ], - "documentation":"

    Describes the specified policies or all policies used for SSL negotiation.

    For more information, see Security policies in the Application Load Balancers Guide or Security policies in the Network Load Balancers Guide.

    " + "documentation":"

    Describes the specified policies or all policies used for SSL negotiation.

    For more information, see Security policies in the Application Load Balancers Guide and Security policies in the Network Load Balancers Guide.

    " }, "DescribeTags":{ "name":"DescribeTags", @@ -842,7 +842,7 @@ {"shape":"InvalidTargetException"}, {"shape":"TooManyRegistrationsForTargetIdException"} ], - "documentation":"

    Registers the specified targets with the specified target group.

    If the target is an EC2 instance, it must be in the running state when you register it.

    By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports.

    With a Network Load Balancer, you can't register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address.

    " + "documentation":"

    Registers the specified targets with the specified target group.

    If the target is an EC2 instance, it must be in the running state when you register it.

    By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports.

    For more information, see the following:

    " }, "RemoveListenerCertificates":{ "name":"RemoveListenerCertificates", @@ -979,8 +979,7 @@ "shapes":{ "ALPNPolicyNotSupportedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified ALPN policy is not supported.

    ", "error":{ "code":"ALPNPolicyNotFound", @@ -1092,8 +1091,7 @@ }, "AddTagsOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "AddTrustStoreRevocationsInput":{ "type":"structure", @@ -1146,8 +1144,7 @@ "AllocationId":{"type":"string"}, "AllocationIdNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified allocation ID does not exist.

    ", "error":{ "code":"AllocationIdNotFound", @@ -1360,8 +1357,7 @@ }, "AvailabilityZoneNotSupportedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified Availability Zone is not supported.

    ", "error":{ "code":"AvailabilityZoneNotSupported", @@ -1376,8 +1372,7 @@ }, "CaCertificatesBundleNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified ca certificate bundle does not exist.

    ", "error":{ "code":"CaCertificatesBundleNotFound", @@ -1389,8 +1384,7 @@ "CanonicalHostedZoneId":{"type":"string"}, "CapacityDecreaseRequestsLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've exceeded the daily capacity decrease limit for this reservation.

    ", "error":{ "code":"CapacityDecreaseRequestLimitExceeded", @@ -1401,8 +1395,7 @@ }, "CapacityReservationPendingException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    There is a pending capacity reservation.

    ", "error":{ "code":"CapacityReservationPending", @@ -1438,8 +1431,7 @@ "CapacityUnitsDouble":{"type":"double"}, "CapacityUnitsLimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've exceeded the capacity units limit.

    ", "error":{ "code":"CapacityUnitsLimitExceeded", @@ -1469,8 +1461,7 @@ }, "CertificateNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified certificate does not exist.

    ", "error":{ "code":"CertificateNotFound", @@ -1524,7 +1515,7 @@ }, "SslPolicy":{ "shape":"SslPolicyName", - "documentation":"

    [HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.

    For more information, see Security policies in the Application Load Balancers Guide and Security policies in the Network Load Balancers Guide.

    " + "documentation":"

    [HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.

    For more information, see Security policies in the Application Load Balancers Guide and Security policies in the Network Load Balancers Guide.

    " }, "Certificates":{ "shape":"CertificateList", @@ -1536,7 +1527,7 @@ }, "AlpnPolicy":{ "shape":"AlpnPolicyName", - "documentation":"

    [TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) policy. You can specify one policy name. The following are the possible values:

    • HTTP1Only

    • HTTP2Only

    • HTTP2Optional

    • HTTP2Preferred

    • None

    For more information, see ALPN policies in the Network Load Balancers Guide.

    " + "documentation":"

    [TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) policy. You can specify one policy name. The following are the possible values:

    • HTTP1Only

    • HTTP2Only

    • HTTP2Optional

    • HTTP2Preferred

    • None

    For more information, see ALPN policies in the Network Load Balancers Guide.

    " }, "Tags":{ "shape":"TagList", @@ -1789,8 +1780,7 @@ "Default":{"type":"boolean"}, "DeleteAssociationSameAccountException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified association can't be within the same account.

    ", "error":{ "code":"DeleteAssociationSameAccount", @@ -1811,8 +1801,7 @@ }, "DeleteListenerOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLoadBalancerInput":{ "type":"structure", @@ -1826,8 +1815,7 @@ }, "DeleteLoadBalancerOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteRuleInput":{ "type":"structure", @@ -1841,8 +1829,7 @@ }, "DeleteRuleOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteSharedTrustStoreAssociationInput":{ "type":"structure", @@ -1863,8 +1850,7 @@ }, "DeleteSharedTrustStoreAssociationOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteTargetGroupInput":{ "type":"structure", @@ -1878,8 +1864,7 @@ }, "DeleteTargetGroupOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteTrustStoreInput":{ "type":"structure", @@ -1893,8 +1878,7 @@ }, "DeleteTrustStoreOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeregisterTargetsInput":{ "type":"structure", @@ -1915,8 +1899,7 @@ }, "DeregisterTargetsOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeAccountLimitsInput":{ "type":"structure", @@ -2419,8 +2402,7 @@ "Description":{"type":"string"}, "DuplicateListenerException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A listener with the specified port already exists.

    ", "error":{ "code":"DuplicateListener", @@ -2431,8 +2413,7 @@ }, "DuplicateLoadBalancerNameException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A load balancer with the specified name already exists.

    ", "error":{ "code":"DuplicateLoadBalancerName", @@ -2443,8 +2424,7 @@ }, "DuplicateTagKeysException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A tag key was specified more than once.

    ", "error":{ "code":"DuplicateTagKeys", @@ -2455,8 +2435,7 @@ }, "DuplicateTargetGroupNameException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A target group with the specified name already exists.

    ", "error":{ "code":"DuplicateTargetGroupName", @@ -2467,8 +2446,7 @@ }, "DuplicateTrustStoreNameException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A trust store with the specified name already exists.

    ", "error":{ "code":"DuplicateTrustStoreName", @@ -2623,8 +2601,7 @@ }, "HealthUnavailableException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The health of the specified targets could not be retrieved due to an internal error.

    ", "error":{ "code":"HealthUnavailable", @@ -2637,7 +2614,7 @@ "members":{ "Values":{ "shape":"ListOfString", - "documentation":"

    The host names. The maximum size of each name is 128 characters. The comparison is case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character).

    If you specify multiple strings, the condition is satisfied if one of the strings matches the host name.

    " + "documentation":"

    The host names. The maximum size of each name is 128 characters. The comparison is case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character). You must include at least one \".\" character. You can include only alphabetical characters after the final \".\" character.

    If you specify multiple strings, the condition is satisfied if one of the strings matches the host name.

    " } }, "documentation":"

    Information about a host header condition.

    " @@ -2648,7 +2625,7 @@ "members":{ "HttpHeaderName":{ "shape":"HttpHeaderConditionName", - "documentation":"

    The name of the HTTP header field. The maximum size is 40 characters. The header name is case insensitive. The allowed characters are specified by RFC 7230. Wildcards are not supported.

    You can't use an HTTP header condition to specify the host header. Use HostHeaderConditionConfig to specify a host header condition.

    " + "documentation":"

    The name of the HTTP header field. The maximum size is 40 characters. The header name is case insensitive. The allowed characters are specified by RFC 7230. Wildcards are not supported.

    You can't use an HTTP header condition to specify the host header. Instead, use a host condition.

    " }, "Values":{ "shape":"ListOfString", @@ -2672,8 +2649,7 @@ "IgnoreClientCertificateExpiry":{"type":"boolean"}, "IncompatibleProtocolsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified configuration is not valid with this protocol.

    ", "error":{ "code":"IncompatibleProtocols", @@ -2684,8 +2660,7 @@ }, "InsufficientCapacityException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    There is insufficient capacity to reserve.

    ", "error":{ "code":"InsufficientCapacity", @@ -2695,8 +2670,7 @@ }, "InvalidCaCertificatesBundleException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified ca certificate bundle is in an invalid format, or corrupt.

    ", "error":{ "code":"InvalidCaCertificatesBundle", @@ -2707,8 +2681,7 @@ }, "InvalidConfigurationRequestException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The requested configuration is not valid.

    ", "error":{ "code":"InvalidConfigurationRequest", @@ -2719,8 +2692,7 @@ }, "InvalidLoadBalancerActionException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The requested action is not valid.

    ", "error":{ "code":"InvalidLoadBalancerAction", @@ -2731,8 +2703,7 @@ }, "InvalidRevocationContentException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The provided revocation file is an invalid format, or uses an incorrect algorithm.

    ", "error":{ "code":"InvalidRevocationContent", @@ -2743,8 +2714,7 @@ }, "InvalidSchemeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The requested scheme is not valid.

    ", "error":{ "code":"InvalidScheme", @@ -2755,8 +2725,7 @@ }, "InvalidSecurityGroupException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified security group does not exist.

    ", "error":{ "code":"InvalidSecurityGroup", @@ -2767,8 +2736,7 @@ }, "InvalidSubnetException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified subnet is out of available addresses.

    ", "error":{ "code":"InvalidSubnet", @@ -2779,8 +2747,7 @@ }, "InvalidTargetException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified target does not exist, is not in the same VPC as the target group, or has an unsupported instance type.

    ", "error":{ "code":"InvalidTarget", @@ -2914,8 +2881,7 @@ }, "ListenerNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified listener does not exist.

    ", "error":{ "code":"ListenerNotFound", @@ -3064,8 +3030,7 @@ }, "LoadBalancerNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified load balancer does not exist.

    ", "error":{ "code":"LoadBalancerNotFound", @@ -3261,7 +3226,7 @@ }, "SslPolicy":{ "shape":"SslPolicyName", - "documentation":"

    [HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.

    For more information, see Security policies in the Application Load Balancers Guide or Security policies in the Network Load Balancers Guide.

    " + "documentation":"

    [HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.

    For more information, see Security policies in the Application Load Balancers Guide or Security policies in the Network Load Balancers Guide.

    " }, "Certificates":{ "shape":"CertificateList", @@ -3273,7 +3238,7 @@ }, "AlpnPolicy":{ "shape":"AlpnPolicyName", - "documentation":"

    [TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) policy. You can specify one policy name. The following are the possible values:

    • HTTP1Only

    • HTTP2Only

    • HTTP2Optional

    • HTTP2Preferred

    • None

    For more information, see ALPN policies in the Network Load Balancers Guide.

    " + "documentation":"

    [TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) policy. You can specify one policy name. The following are the possible values:

    • HTTP1Only

    • HTTP2Only

    • HTTP2Optional

    • HTTP2Preferred

    • None

    For more information, see ALPN policies in the Network Load Balancers Guide.

    " }, "MutualAuthentication":{ "shape":"MutualAuthenticationAttributes", @@ -3490,8 +3455,7 @@ "NumberOfRevokedEntries":{"type":"long"}, "OperationNotPermittedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This operation is not allowed.

    ", "error":{ "code":"OperationNotPermitted", @@ -3516,7 +3480,7 @@ "members":{ "Values":{ "shape":"ListOfString", - "documentation":"

    The path patterns to compare against the request URL. The maximum size of each string is 128 characters. The comparison is case sensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character).

    If you specify multiple strings, the condition is satisfied if one of them matches the request URL. The path pattern is compared only to the path of the URL, not to its query string. To compare against the query string, use QueryStringConditionConfig.

    " + "documentation":"

    The path patterns to compare against the request URL. The maximum size of each string is 128 characters. The comparison is case sensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character).

    If you specify multiple strings, the condition is satisfied if one of them matches the request URL. The path pattern is compared only to the path of the URL, not to its query string. To compare against the query string, use a query string condition.

    " } }, "documentation":"

    Information about a path pattern condition.

    " @@ -3532,8 +3496,7 @@ }, "PriorRequestNotCompleteException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This operation is not allowed while a prior request has not been completed.

    ", "error":{ "code":"PriorRequestNotComplete", @@ -3544,8 +3507,7 @@ }, "PriorityInUseException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified priority is in use.

    ", "error":{ "code":"PriorityInUse", @@ -3673,8 +3635,7 @@ }, "RegisterTargetsOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "RemoveIpamPoolEnum":{ "type":"string", @@ -3703,8 +3664,7 @@ }, "RemoveListenerCertificatesOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "RemoveTagsInput":{ "type":"structure", @@ -3725,8 +3685,7 @@ }, "RemoveTagsOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "RemoveTrustStoreRevocationsInput":{ "type":"structure", @@ -3747,8 +3706,7 @@ }, "RemoveTrustStoreRevocationsOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "ResetCapacityReservation":{"type":"boolean"}, "ResourceArn":{"type":"string"}, @@ -3758,8 +3716,7 @@ }, "ResourceInUseException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    A specified resource is in use.

    ", "error":{ "code":"ResourceInUse", @@ -3770,8 +3727,7 @@ }, "ResourceNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified resource does not exist.

    ", "error":{ "code":"ResourceNotFound", @@ -3804,8 +3760,7 @@ }, "RevocationContentNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified revocation file does not exist.

    ", "error":{ "code":"RevocationContentNotFound", @@ -3821,8 +3776,7 @@ "RevocationId":{"type":"long"}, "RevocationIdNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified revocation ID does not exist.

    ", "error":{ "code":"RevocationIdNotFound", @@ -3914,8 +3868,7 @@ }, "RuleNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified rule does not exist.

    ", "error":{ "code":"RuleNotFound", @@ -3956,8 +3909,7 @@ "S3ObjectVersion":{"type":"string"}, "SSLPolicyNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified SSL policy does not exist.

    ", "error":{ "code":"SSLPolicyNotFound", @@ -4098,7 +4050,7 @@ "members":{ "Values":{ "shape":"ListOfString", - "documentation":"

    The source IP addresses, in CIDR format. You can use both IPv4 and IPv6 addresses. Wildcards are not supported.

    If you specify multiple addresses, the condition is satisfied if the source IP address of the request matches one of the CIDR blocks. This condition is not satisfied by the addresses in the X-Forwarded-For header. To search for addresses in the X-Forwarded-For header, use HttpHeaderConditionConfig.

    The total number of values must be less than, or equal to five.

    " + "documentation":"

    The source IP addresses, in CIDR format. You can use both IPv4 and IPv6 addresses. Wildcards are not supported.

    If you specify multiple addresses, the condition is satisfied if the source IP address of the request matches one of the CIDR blocks. This condition is not satisfied by the addresses in the X-Forwarded-For header. To search for addresses in the X-Forwarded-For header, use an HTTP header condition.

    The total number of values must be less than, or equal to five.

    " } }, "documentation":"

    Information about a source IP condition.

    You can use this condition to route based on the IP address of the source that connects to the load balancer. If a client is behind a proxy, this is the IP address of the proxy not the IP address of the client.

    " @@ -4180,8 +4132,7 @@ }, "SubnetNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified subnet does not exist.

    ", "error":{ "code":"SubnetNotFound", @@ -4374,8 +4325,7 @@ }, "TargetGroupAssociationLimitException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've reached the limit on the number of load balancers per target group.

    ", "error":{ "code":"TargetGroupAssociationLimit", @@ -4389,7 +4339,7 @@ "members":{ "Key":{ "shape":"TargetGroupAttributeKey", - "documentation":"

    The name of the attribute.

    The following attributes are supported by all load balancers:

    • deregistration_delay.timeout_seconds - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.

    • stickiness.enabled - Indicates whether target stickiness is enabled. The value is true or false. The default is false.

    • stickiness.type - Indicates the type of stickiness. The possible values are:

      • lb_cookie and app_cookie for Application Load Balancers.

      • source_ip for Network Load Balancers.

      • source_ip_dest_ip and source_ip_dest_ip_proto for Gateway Load Balancers.

    The following attributes are supported by Application Load Balancers and Network Load Balancers:

    • load_balancing.cross_zone.enabled - Indicates whether cross zone load balancing is enabled. The value is true, false or use_load_balancer_configuration. The default is use_load_balancer_configuration.

    • target_group_health.dns_failover.minimum_healthy_targets.count - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off or an integer from 1 to the maximum number of targets. The default is off.

    • target_group_health.dns_failover.minimum_healthy_targets.percentage - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off or an integer from 1 to 100. The default is off.

    • target_group_health.unhealthy_state_routing.minimum_healthy_targets.count - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.

    • target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are off or an integer from 1 to 100. The default is off.

    The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

    • load_balancing.algorithm.type - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin, least_outstanding_requests, or weighted_random. The default is round_robin.

    • load_balancing.algorithm.anomaly_mitigation - Only available when load_balancing.algorithm.type is weighted_random. Indicates whether anomaly mitigation is enabled. The value is on or off. The default is off.

    • slow_start.duration_seconds - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).

    • stickiness.app_cookie.cookie_name - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: AWSALB, AWSALBAPP, and AWSALBTG; they're reserved for use by the load balancer.

    • stickiness.app_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

    • stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

    The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

    • lambda.multi_value_headers.enabled - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true or false. The default is false. If the value is false and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.

    The following attributes are supported only by Network Load Balancers:

    • deregistration_delay.connection_termination.enabled - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is true or false. For new UDP/TCP_UDP target groups the default is true. Otherwise, the default is false.

    • preserve_client_ip.enabled - Indicates whether client IP preservation is enabled. The value is true or false. The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation can't be disabled for UDP and TCP_UDP target groups.

    • proxy_protocol_v2.enabled - Indicates whether Proxy Protocol version 2 is enabled. The value is true or false. The default is false.

    • target_health_state.unhealthy.connection_termination.enabled - Indicates whether the load balancer terminates connections to unhealthy targets. The value is true or false. The default is true. This attribute can't be enabled for UDP and TCP_UDP target groups.

    • target_health_state.unhealthy.draining_interval_seconds - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from unhealthy.draining to unhealthy. The range is 0-360000 seconds. The default value is 0 seconds.

      Note: This attribute can only be configured when target_health_state.unhealthy.connection_termination.enabled is false.

    The following attributes are supported only by Gateway Load Balancers:

    • target_failover.on_deregistration - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are rebalance and no_rebalance. The default is no_rebalance. The two attributes (target_failover.on_deregistration and target_failover.on_unhealthy) can't be set independently. The value you set for both attributes must be the same.

    • target_failover.on_unhealthy - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are rebalance and no_rebalance. The default is no_rebalance. The two attributes (target_failover.on_deregistration and target_failover.on_unhealthy) can't be set independently. The value you set for both attributes must be the same.

    " + "documentation":"

    The name of the attribute.

    The following attributes are supported by all load balancers:

    • deregistration_delay.timeout_seconds - The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.

    • stickiness.enabled - Indicates whether target stickiness is enabled. The value is true or false. The default is false.

    • stickiness.type - Indicates the type of stickiness. The possible values are:

      • lb_cookie and app_cookie for Application Load Balancers.

      • source_ip for Network Load Balancers.

      • source_ip_dest_ip and source_ip_dest_ip_proto for Gateway Load Balancers.

    The following attributes are supported by Application Load Balancers and Network Load Balancers:

    • load_balancing.cross_zone.enabled - Indicates whether cross zone load balancing is enabled. The value is true, false or use_load_balancer_configuration. The default is use_load_balancer_configuration.

    • target_group_health.dns_failover.minimum_healthy_targets.count - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off or an integer from 1 to the maximum number of targets. The default is 1.

    • target_group_health.dns_failover.minimum_healthy_targets.percentage - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off or an integer from 1 to 100. The default is off.

    • target_group_health.unhealthy_state_routing.minimum_healthy_targets.count - The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.

    • target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage - The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are off or an integer from 1 to 100. The default is off.

    The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

    • load_balancing.algorithm.type - The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin, least_outstanding_requests, or weighted_random. The default is round_robin.

    • load_balancing.algorithm.anomaly_mitigation - Only available when load_balancing.algorithm.type is weighted_random. Indicates whether anomaly mitigation is enabled. The value is on or off. The default is off.

    • slow_start.duration_seconds - The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).

    • stickiness.app_cookie.cookie_name - Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: AWSALB, AWSALBAPP, and AWSALBTG; they're reserved for use by the load balancer.

    • stickiness.app_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

    • stickiness.lb_cookie.duration_seconds - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).

    The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

    • lambda.multi_value_headers.enabled - Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true or false. The default is false. If the value is false and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.

    The following attributes are supported only by Network Load Balancers:

    • deregistration_delay.connection_termination.enabled - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is true or false. For new UDP/TCP_UDP target groups the default is true. Otherwise, the default is false.

    • preserve_client_ip.enabled - Indicates whether client IP preservation is enabled. The value is true or false. The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation can't be disabled for UDP and TCP_UDP target groups.

    • proxy_protocol_v2.enabled - Indicates whether Proxy Protocol version 2 is enabled. The value is true or false. The default is false.

    • target_health_state.unhealthy.connection_termination.enabled - Indicates whether the load balancer terminates connections to unhealthy targets. The value is true or false. The default is true. This attribute can't be enabled for UDP and TCP_UDP target groups.

    • target_health_state.unhealthy.draining_interval_seconds - The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from unhealthy.draining to unhealthy. The range is 0-360000 seconds. The default value is 0 seconds.

      Note: This attribute can only be configured when target_health_state.unhealthy.connection_termination.enabled is false.

    The following attributes are supported only by Gateway Load Balancers:

    • target_failover.on_deregistration - Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are rebalance and no_rebalance. The default is no_rebalance. The two attributes (target_failover.on_deregistration and target_failover.on_unhealthy) can't be set independently. The value you set for both attributes must be the same.

    • target_failover.on_unhealthy - Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are rebalance and no_rebalance. The default is no_rebalance. The two attributes (target_failover.on_deregistration and target_failover.on_unhealthy) can't be set independently. The value you set for both attributes must be the same.

    " }, "Value":{ "shape":"TargetGroupAttributeValue", @@ -4426,8 +4376,7 @@ }, "TargetGroupNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified target group does not exist.

    ", "error":{ "code":"TargetGroupNotFound", @@ -4445,7 +4394,7 @@ }, "DurationSeconds":{ "shape":"TargetGroupStickinessDurationSeconds", - "documentation":"

    The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days).

    " + "documentation":"

    The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days). You must specify this value when enabling target group stickiness.

    " } }, "documentation":"

    Information about the target group stickiness for a rule.

    " @@ -4560,8 +4509,7 @@ }, "TooManyActionsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've reached the limit on the number of actions per rule.

    ", "error":{ "code":"TooManyActions", @@ -4572,8 +4520,7 @@ }, "TooManyCertificatesException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've reached the limit on the number of certificates per load balancer.

    ", "error":{ "code":"TooManyCertificates", @@ -4584,8 +4531,7 @@ }, "TooManyListenersException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've reached the limit on the number of listeners per load balancer.

    ", "error":{ "code":"TooManyListeners", @@ -4596,8 +4542,7 @@ }, "TooManyLoadBalancersException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've reached the limit on the number of load balancers for your Amazon Web Services account.

    ", "error":{ "code":"TooManyLoadBalancers", @@ -4608,8 +4553,7 @@ }, "TooManyRegistrationsForTargetIdException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've reached the limit on the number of times a target can be registered with a load balancer.

    ", "error":{ "code":"TooManyRegistrationsForTargetId", @@ -4620,8 +4564,7 @@ }, "TooManyRulesException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've reached the limit on the number of rules per load balancer.

    ", "error":{ "code":"TooManyRules", @@ -4632,8 +4575,7 @@ }, "TooManyTagsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've reached the limit on the number of tags for this resource.

    ", "error":{ "code":"TooManyTags", @@ -4644,8 +4586,7 @@ }, "TooManyTargetGroupsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've reached the limit on the number of target groups for your Amazon Web Services account.

    ", "error":{ "code":"TooManyTargetGroups", @@ -4656,8 +4597,7 @@ }, "TooManyTargetsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've reached the limit on the number of targets.

    ", "error":{ "code":"TooManyTargets", @@ -4668,8 +4608,7 @@ }, "TooManyTrustStoreRevocationEntriesException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified trust store has too many revocation entries.

    ", "error":{ "code":"TooManyTrustStoreRevocationEntries", @@ -4680,8 +4619,7 @@ }, "TooManyTrustStoresException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've reached the limit on the number of trust stores for your Amazon Web Services account.

    ", "error":{ "code":"TooManyTrustStores", @@ -4692,8 +4630,7 @@ }, "TooManyUniqueTargetGroupsPerLoadBalancerException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You've reached the limit on the number of unique target groups per load balancer across all listeners. If a target group is used by multiple actions for a load balancer, it is counted as only one use.

    ", "error":{ "code":"TooManyUniqueTargetGroupsPerLoadBalancer", @@ -4746,8 +4683,7 @@ }, "TrustStoreAssociationNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified association does not exist.

    ", "error":{ "code":"AssociationNotFound", @@ -4770,8 +4706,7 @@ }, "TrustStoreInUseException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified trust store is currently in use.

    ", "error":{ "code":"TrustStoreInUse", @@ -4792,8 +4727,7 @@ }, "TrustStoreNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified trust store does not exist.

    ", "error":{ "code":"TrustStoreNotFound", @@ -4804,8 +4738,7 @@ }, "TrustStoreNotReadyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified trust store is not active.

    ", "error":{ "code":"TrustStoreNotReady", @@ -4853,8 +4786,7 @@ }, "UnsupportedProtocolException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified protocol is not supported.

    ", "error":{ "code":"UnsupportedProtocol", @@ -4873,14 +4805,14 @@ }, "AvailabilityZone":{ "shape":"ZoneName", - "documentation":"

    Information about the availability zone.

    " + "documentation":"

    Information about the Availability Zone.

    " }, "EffectiveCapacityUnits":{ "shape":"CapacityUnitsDouble", "documentation":"

    The number of effective capacity units.

    " } }, - "documentation":"

    The capacity reservation status for each availability zone.

    " + "documentation":"

    The capacity reservation status for each Availability Zone.

    " }, "ZonalCapacityReservationStates":{ "type":"list", diff --git a/services/elasticsearch/pom.xml b/services/elasticsearch/pom.xml index 1d52dc15e3c4..f92512adc699 100644 --- a/services/elasticsearch/pom.xml +++ b/services/elasticsearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT elasticsearch AWS Java SDK :: Services :: Amazon Elasticsearch Service diff --git a/services/elasticsearch/src/main/resources/codegen-resources/customization.config b/services/elasticsearch/src/main/resources/codegen-resources/customization.config index 0ec3e1d6f177..a2bee164c24e 100644 --- a/services/elasticsearch/src/main/resources/codegen-resources/customization.config +++ b/services/elasticsearch/src/main/resources/codegen-resources/customization.config @@ -7,6 +7,5 @@ "listDomainNames", "listElasticsearchVersions" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/elastictranscoder/pom.xml b/services/elastictranscoder/pom.xml index 2ff872efd70d..f725dad0337a 100644 --- a/services/elastictranscoder/pom.xml +++ b/services/elastictranscoder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT elastictranscoder AWS Java SDK :: Services :: Amazon Elastic Transcoder diff --git a/services/elastictranscoder/src/main/resources/codegen-resources/customization.config b/services/elastictranscoder/src/main/resources/codegen-resources/customization.config index 34dd21d71fbc..4e9a028c968e 100644 --- a/services/elastictranscoder/src/main/resources/codegen-resources/customization.config +++ b/services/elastictranscoder/src/main/resources/codegen-resources/customization.config @@ -6,6 +6,5 @@ "deprecatedOperations": [ "TestRole" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/emr/pom.xml b/services/emr/pom.xml index f105b4b8d4b8..82457de17f60 100644 --- a/services/emr/pom.xml +++ b/services/emr/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT emr AWS Java SDK :: Services :: Amazon EMR diff --git a/services/emr/src/main/resources/codegen-resources/customization.config b/services/emr/src/main/resources/codegen-resources/customization.config index 5a5a3d9ceca0..cf2c695c9123 100644 --- a/services/emr/src/main/resources/codegen-resources/customization.config +++ b/services/emr/src/main/resources/codegen-resources/customization.config @@ -22,6 +22,5 @@ "deprecatedOperations": [ "DescribeJobFlows" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/emr/src/main/resources/codegen-resources/service-2.json b/services/emr/src/main/resources/codegen-resources/service-2.json index d89226e5f28f..4ce89069c3dd 100644 --- a/services/emr/src/main/resources/codegen-resources/service-2.json +++ b/services/emr/src/main/resources/codegen-resources/service-2.json @@ -83,6 +83,20 @@ ], "documentation":"

    Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee that a step will be canceled, even if the request is successfully submitted. When you use Amazon EMR releases 5.28.0 and later, you can cancel steps that are in a PENDING or RUNNING state. In earlier versions of Amazon EMR, you can only cancel steps that are in a PENDING state.

    " }, + "CreatePersistentAppUI":{ + "name":"CreatePersistentAppUI", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePersistentAppUIInput"}, + "output":{"shape":"CreatePersistentAppUIOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

    Creates a persistent application user interface.

    " + }, "CreateSecurityConfiguration":{ "name":"CreateSecurityConfiguration", "http":{ @@ -206,6 +220,20 @@ ], "documentation":"

    Provides details of a notebook execution.

    " }, + "DescribePersistentAppUI":{ + "name":"DescribePersistentAppUI", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribePersistentAppUIInput"}, + "output":{"shape":"DescribePersistentAppUIOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

    Describes a persistent application user interface.

    " + }, "DescribeReleaseLabel":{ "name":"DescribeReleaseLabel", "http":{ @@ -310,6 +338,34 @@ "output":{"shape":"GetManagedScalingPolicyOutput"}, "documentation":"

    Fetches the attached managed scaling policy for an Amazon EMR cluster.

    " }, + "GetOnClusterAppUIPresignedURL":{ + "name":"GetOnClusterAppUIPresignedURL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOnClusterAppUIPresignedURLInput"}, + "output":{"shape":"GetOnClusterAppUIPresignedURLOutput"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

    The presigned URL properties for the cluster's application user interface.

    " + }, + "GetPersistentAppUIPresignedURL":{ + "name":"GetPersistentAppUIPresignedURL", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPersistentAppUIPresignedURLInput"}, + "output":{"shape":"GetPersistentAppUIPresignedURLOutput"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"

    The presigned URL properties for the cluster's application user interface.

    " + }, "GetStudioSessionMapping":{ "name":"GetStudioSessionMapping", "http":{ @@ -1553,6 +1609,45 @@ "type":"list", "member":{"shape":"Configuration"} }, + "CreatePersistentAppUIInput":{ + "type":"structure", + "required":["TargetResourceArn"], + "members":{ + "TargetResourceArn":{ + "shape":"ArnType", + "documentation":"

    The unique Amazon Resource Name (ARN) of the target resource.

    " + }, + "EMRContainersConfig":{ + "shape":"EMRContainersConfig", + "documentation":"

    The EMR containers configuration.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    Tags for the persistent application user interface.

    " + }, + "XReferer":{ + "shape":"String", + "documentation":"

    The cross reference for the persistent application user interface.

    " + }, + "ProfilerType":{ + "shape":"ProfilerType", + "documentation":"

    The profiler type for the persistent application user interface. Valid values are SHS, TEZUI, or YTS.

    " + } + } + }, + "CreatePersistentAppUIOutput":{ + "type":"structure", + "members":{ + "PersistentAppUIId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

    The persistent application user interface identifier.

    " + }, + "RuntimeRoleEnabledCluster":{ + "shape":"Boolean", + "documentation":"

    Represents if the EMR on EC2 cluster that the persisent application user interface is created for is a runtime role enabled cluster or not.

    " + } + } + }, "CreateSecurityConfigurationInput":{ "type":"structure", "required":[ @@ -1847,6 +1942,25 @@ } } }, + "DescribePersistentAppUIInput":{ + "type":"structure", + "required":["PersistentAppUIId"], + "members":{ + "PersistentAppUIId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

    The identifier for the persistent application user interface.

    " + } + } + }, + "DescribePersistentAppUIOutput":{ + "type":"structure", + "members":{ + "PersistentAppUI":{ + "shape":"PersistentAppUI", + "documentation":"

    The persistent application user interface.

    " + } + } + }, "DescribeReleaseLabelInput":{ "type":"structure", "members":{ @@ -1967,6 +2081,16 @@ "type":"list", "member":{"shape":"InstanceId"} }, + "EMRContainersConfig":{ + "type":"structure", + "members":{ + "JobRunId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

    The Job run ID for the container configuration.

    " + } + }, + "documentation":"

    The EMR container configuration.

    " + }, "EbsBlockDevice":{ "type":"structure", "members":{ @@ -2256,6 +2380,84 @@ } } }, + "GetOnClusterAppUIPresignedURLInput":{ + "type":"structure", + "required":["ClusterId"], + "members":{ + "ClusterId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

    The cluster ID associated with the cluster's application user interface presigned URL.

    " + }, + "OnClusterAppUIType":{ + "shape":"OnClusterAppUIType", + "documentation":"

    The application UI type associated with the cluster's application user interface presigned URL.

    " + }, + "ApplicationId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

    The application ID associated with the cluster's application user interface presigned URL.

    " + }, + "DryRun":{ + "shape":"BooleanObject", + "documentation":"

    Determines if the user interface presigned URL is for a dry run.

    " + }, + "ExecutionRoleArn":{ + "shape":"ArnType", + "documentation":"

    The execution role ARN associated with the cluster's application user interface presigned URL.

    " + } + } + }, + "GetOnClusterAppUIPresignedURLOutput":{ + "type":"structure", + "members":{ + "PresignedURLReady":{ + "shape":"Boolean", + "documentation":"

    Used to determine if the presigned URL is ready.

    " + }, + "PresignedURL":{ + "shape":"XmlString", + "documentation":"

    The cluster's generated presigned URL.

    " + } + } + }, + "GetPersistentAppUIPresignedURLInput":{ + "type":"structure", + "required":["PersistentAppUIId"], + "members":{ + "PersistentAppUIId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

    The persistent application user interface ID associated with the presigned URL.

    " + }, + "PersistentAppUIType":{ + "shape":"PersistentAppUIType", + "documentation":"

    The persistent application user interface type associated with the presigned URL.

    " + }, + "ApplicationId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

    The application ID associated with the presigned URL.

    " + }, + "AuthProxyCall":{ + "shape":"BooleanObject", + "documentation":"

    A boolean that represents if the caller is an authentication proxy call.

    " + }, + "ExecutionRoleArn":{ + "shape":"ArnType", + "documentation":"

    The execution role ARN associated with the presigned URL.

    " + } + } + }, + "GetPersistentAppUIPresignedURLOutput":{ + "type":"structure", + "members":{ + "PresignedURLReady":{ + "shape":"Boolean", + "documentation":"

    Used to determine if the presigned URL is ready.

    " + }, + "PresignedURL":{ + "shape":"XmlString", + "documentation":"

    The returned presigned URL.

    " + } + } + }, "GetStudioSessionMappingInput":{ "type":"structure", "required":[ @@ -4220,6 +4422,17 @@ "type":"list", "member":{"shape":"OSRelease"} }, + "OnClusterAppUIType":{ + "type":"string", + "enum":[ + "SparkHistoryServer", + "YarnTimelineService", + "TezUI", + "ApplicationMaster", + "JobHistoryServer", + "ResourceManager" + ] + }, "OnDemandCapacityReservationOptions":{ "type":"structure", "members":{ @@ -4323,6 +4536,56 @@ }, "documentation":"

    The Amazon S3 location that stores the notebook execution output.

    " }, + "PersistentAppUI":{ + "type":"structure", + "members":{ + "PersistentAppUIId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

    The identifier for the persistent application user interface object.

    " + }, + "PersistentAppUITypeList":{ + "shape":"PersistentAppUITypeList", + "documentation":"

    The type list for the persistent application user interface object. Valid values include SHS, YTS, or TEZ.

    " + }, + "PersistentAppUIStatus":{ + "shape":"XmlStringMaxLen256", + "documentation":"

    The status for the persistent application user interface object.

    " + }, + "AuthorId":{ + "shape":"XmlStringMaxLen256", + "documentation":"

    The author ID for the persistent application user interface object.

    " + }, + "CreationTime":{ + "shape":"Date", + "documentation":"

    The creation date and time for the persistent application user interface object.

    " + }, + "LastModifiedTime":{ + "shape":"Date", + "documentation":"

    The date and time the persistent application user interface object was last changed.

    " + }, + "LastStateChangeReason":{ + "shape":"XmlString", + "documentation":"

    The reason the persistent application user interface object was last changed.

    " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

    A collection of tags for the persistent application user interface object.

    " + } + }, + "documentation":"

    Holds persistent application user interface information. Applications installed on the Amazon EMR cluster publish user interfaces as web sites to monitor cluster activity.

    " + }, + "PersistentAppUIType":{ + "type":"string", + "enum":[ + "SHS", + "TEZ", + "YTS" + ] + }, + "PersistentAppUITypeList":{ + "type":"list", + "member":{"shape":"PersistentAppUIType"} + }, "PlacementGroupConfig":{ "type":"structure", "required":["InstanceRole"], @@ -4389,6 +4652,14 @@ "type":"list", "member":{"shape":"PortRange"} }, + "ProfilerType":{ + "type":"string", + "enum":[ + "SHS", + "TEZUI", + "YTS" + ] + }, "PutAutoScalingPolicyInput":{ "type":"structure", "required":[ diff --git a/services/emrcontainers/pom.xml b/services/emrcontainers/pom.xml index aad6af0a665e..d78d7f96a05e 100644 --- a/services/emrcontainers/pom.xml +++ b/services/emrcontainers/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT emrcontainers AWS Java SDK :: Services :: EMR Containers diff --git a/services/emrcontainers/src/main/resources/codegen-resources/customization.config b/services/emrcontainers/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/emrcontainers/src/main/resources/codegen-resources/customization.config +++ b/services/emrcontainers/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/emrserverless/pom.xml b/services/emrserverless/pom.xml index 1cedc762298d..732a72faba6b 100644 --- a/services/emrserverless/pom.xml +++ b/services/emrserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT emrserverless AWS Java SDK :: Services :: EMR Serverless diff --git a/services/emrserverless/src/main/resources/codegen-resources/customization.config b/services/emrserverless/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/emrserverless/src/main/resources/codegen-resources/customization.config +++ b/services/emrserverless/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/emrserverless/src/main/resources/codegen-resources/service-2.json b/services/emrserverless/src/main/resources/codegen-resources/service-2.json index d32f0d655d96..3f3be64c4b75 100644 --- a/services/emrserverless/src/main/resources/codegen-resources/service-2.json +++ b/services/emrserverless/src/main/resources/codegen-resources/service-2.json @@ -24,8 +24,8 @@ "output":{"shape":"CancelJobRunResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Cancels a job run.

    ", "idempotent":true @@ -41,8 +41,8 @@ "output":{"shape":"CreateApplicationResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], "documentation":"

    Creates an application.

    ", @@ -59,8 +59,8 @@ "output":{"shape":"DeleteApplicationResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Deletes an application. An application has to be in a stopped or created state in order to be deleted.

    ", "idempotent":true @@ -76,8 +76,8 @@ "output":{"shape":"GetApplicationResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Displays detailed information about a specified application.

    " }, @@ -108,8 +108,8 @@ "output":{"shape":"GetJobRunResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Displays detailed information about a job run.

    " }, @@ -139,8 +139,8 @@ "output":{"shape":"ListJobRunAttemptsResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Lists all attempt of a job run.

    " }, @@ -170,8 +170,8 @@ "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Lists the tags assigned to the resources.

    " }, @@ -186,8 +186,8 @@ "output":{"shape":"StartApplicationResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ServiceQuotaExceededException"} ], "documentation":"

    Starts a specified application and initializes initial capacity if configured.

    ", @@ -222,8 +222,8 @@ "output":{"shape":"StopApplicationResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Stops a specified application and releases initial capacity if configured. All scheduled and running jobs must be completed or cancelled before stopping an application.

    ", "idempotent":true @@ -239,8 +239,8 @@ "output":{"shape":"TagResourceResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Assigns tags to resources. A tag is a label that you assign to an Amazon Web Services resource. Each tag consists of a key and an optional value, both of which you define. Tags enable you to categorize your Amazon Web Services resources by attributes such as purpose, owner, or environment. When you have many resources of the same type, you can quickly identify a specific resource based on the tags you've assigned to it.

    " }, @@ -255,8 +255,8 @@ "output":{"shape":"UntagResourceResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Removes tags from resources.

    ", "idempotent":true @@ -272,8 +272,8 @@ "output":{"shape":"UpdateApplicationResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

    Updates a specified application. An application has to be in a stopped or created state in order to be updated.

    " } @@ -375,6 +375,10 @@ "schedulerConfiguration":{ "shape":"SchedulerConfiguration", "documentation":"

    The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.

    " + }, + "identityCenterConfiguration":{ + "shape":"IdentityCenterConfiguration", + "documentation":"

    The IAM Identity Center configuration applied to enable trusted identity propagation.

    " } }, "documentation":"

    Information about an application. Amazon EMR Serverless uses applications to run jobs.

    " @@ -481,6 +485,12 @@ "X86_64" ] }, + "Arn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"([ -~… -퟿-�က0-ჿFF]+)" + }, "AttemptNumber":{ "type":"integer", "box":true, @@ -538,6 +548,12 @@ "documentation":"

    The ID of the job run to cancel.

    ", "location":"uri", "locationName":"jobRunId" + }, + "shutdownGracePeriodInSeconds":{ + "shape":"ShutdownGracePeriodInSeconds", + "documentation":"

    The duration in seconds to wait before forcefully terminating the job after cancellation is requested.

    ", + "location":"querystring", + "locationName":"shutdownGracePeriodInSeconds" } } }, @@ -737,6 +753,10 @@ "schedulerConfiguration":{ "shape":"SchedulerConfiguration", "documentation":"

    The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.

    " + }, + "identityCenterConfiguration":{ + "shape":"IdentityCenterConfigurationInput", + "documentation":"

    The IAM Identity Center Configuration accepts the Identity Center instance parameter required to enable trusted identity propagation. This configuration allows identity propagation between integrated services and the Identity Center instance.

    " } } }, @@ -811,14 +831,15 @@ }, "EntryPointArgument":{ "type":"string", - "max":10280, "min":1, "pattern":".*\\S.*", "sensitive":true }, "EntryPointArguments":{ "type":"list", - "member":{"shape":"EntryPointArgument"} + "member":{"shape":"EntryPointArgument"}, + "max":1024, + "min":0 }, "EntryPointPath":{ "type":"string", @@ -960,6 +981,40 @@ "min":20, "pattern":"arn:(aws[a-zA-Z0-9-]*):iam::([0-9]{12}):(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)" }, + "IdentityCenterApplicationArn":{ + "type":"string", + "pattern":"arn:(aws[a-zA-Z0-9-]*):sso::\\d{12}:application/(sso)?ins-[a-zA-Z0-9-.]{16}/apl-[a-zA-Z0-9]{16}" + }, + "IdentityCenterConfiguration":{ + "type":"structure", + "members":{ + "identityCenterInstanceArn":{ + "shape":"IdentityCenterInstanceArn", + "documentation":"

    The ARN of the IAM Identity Center instance.

    " + }, + "identityCenterApplicationArn":{ + "shape":"IdentityCenterApplicationArn", + "documentation":"

    The ARN of the EMR Serverless created IAM Identity Center Application that provides trusted-identity propagation.

    " + } + }, + "documentation":"

    The IAM Identity Center Configuration that includes the Identify Center instance and application ARNs that provide trusted-identity propagation.

    " + }, + "IdentityCenterConfigurationInput":{ + "type":"structure", + "members":{ + "identityCenterInstanceArn":{ + "shape":"IdentityCenterInstanceArn", + "documentation":"

    The ARN of the IAM Identity Center instance.

    " + } + }, + "documentation":"

    Specifies the IAM Identity Center configuration used to enable or disable trusted identity propagation. When provided, this configuration determines how the application interacts with IAM Identity Center for user authentication and access control.

    " + }, + "IdentityCenterInstanceArn":{ + "type":"string", + "max":1024, + "min":10, + "pattern":"arn:(aws[a-zA-Z0-9-]*):sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}" + }, "ImageConfiguration":{ "type":"structure", "required":["imageUri"], @@ -1122,6 +1177,7 @@ "shape":"IAMRoleArn", "documentation":"

    The execution role ARN of the job run.

    " }, + "executionIamPolicy":{"shape":"JobRunExecutionIamPolicy"}, "state":{ "shape":"JobRunState", "documentation":"

    The state of the job run.

    " @@ -1282,6 +1338,20 @@ "type":"list", "member":{"shape":"JobRunAttemptSummary"} }, + "JobRunExecutionIamPolicy":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"PolicyDocument", + "documentation":"

    An IAM inline policy to use as an execution IAM policy.

    " + }, + "policyArns":{ + "shape":"PolicyArnList", + "documentation":"

    A list of Amazon Resource Names (ARNs) to use as an execution IAM policy.

    " + } + }, + "documentation":"

    Optional IAM policy. The resulting job IAM role permissions will be an intersection of the policies passed and the policy associated with your job execution role.

    " + }, "JobRunId":{ "type":"string", "max":64, @@ -1707,6 +1777,18 @@ "min":1, "pattern":"[A-Za-z0-9_=-]+" }, + "PolicyArnList":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":10, + "min":0 + }, + "PolicyDocument":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"([ -ÿ]+)" + }, "PrometheusMonitoringConfiguration":{ "type":"structure", "members":{ @@ -1859,6 +1941,10 @@ }, "exception":true }, + "ShutdownGracePeriodInSeconds":{ + "type":"integer", + "box":true + }, "SparkSubmit":{ "type":"structure", "required":["entryPoint"], @@ -1925,6 +2011,10 @@ "shape":"IAMRoleArn", "documentation":"

    The execution role ARN for the job run.

    " }, + "executionIamPolicy":{ + "shape":"JobRunExecutionIamPolicy", + "documentation":"

    You can pass an optional IAM policy. The resulting job IAM role permissions will be an intersection of this policy and the policy associated with your job execution role.

    " + }, "jobDriver":{ "shape":"JobDriver", "documentation":"

    The job driver for the job run.

    " @@ -2178,6 +2268,10 @@ "schedulerConfiguration":{ "shape":"SchedulerConfiguration", "documentation":"

    The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.

    " + }, + "identityCenterConfiguration":{ + "shape":"IdentityCenterConfigurationInput", + "documentation":"

    Specifies the IAM Identity Center configuration used to enable or disable trusted identity propagation. When provided, this configuration determines how the application interacts with IAM Identity Center for user authentication and access control.

    " } } }, diff --git a/services/entityresolution/pom.xml b/services/entityresolution/pom.xml index 0f727e7848bc..9c3003f4966f 100644 --- a/services/entityresolution/pom.xml +++ b/services/entityresolution/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT entityresolution AWS Java SDK :: Services :: Entity Resolution diff --git a/services/entityresolution/src/main/resources/codegen-resources/customization.config b/services/entityresolution/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/entityresolution/src/main/resources/codegen-resources/customization.config +++ b/services/entityresolution/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/entityresolution/src/main/resources/codegen-resources/service-2.json b/services/entityresolution/src/main/resources/codegen-resources/service-2.json index c26c75e970f2..33a3398c060b 100644 --- a/services/entityresolution/src/main/resources/codegen-resources/service-2.json +++ b/services/entityresolution/src/main/resources/codegen-resources/service-2.json @@ -222,6 +222,24 @@ "documentation":"

    Deletes the SchemaMapping with a given name. This operation will succeed even if a schema with the given name does not exist. This operation will fail if there is a MatchingWorkflow object that references the SchemaMapping in the workflow's InputSourceConfig.

    ", "idempotent":true }, + "GenerateMatchId":{ + "name":"GenerateMatchId", + "http":{ + "method":"POST", + "requestUri":"/matchingworkflows/{workflowName}/generateMatches", + "responseCode":200 + }, + "input":{"shape":"GenerateMatchIdInput"}, + "output":{"shape":"GenerateMatchIdOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Generates or retrieves Match IDs for records using a rule-based matching workflow. When you call this operation, it processes your records against the workflow's matching rules to identify potential matches. For existing records, it retrieves their Match IDs and associated rules. For records without matches, it generates new Match IDs. The operation saves results to Amazon S3.

    The processing type (processingType) you choose affects both the accuracy and response time of the operation. Additional charges apply for each API call, whether made through the Entity Resolution console or directly via the API. The rule-based matching workflow must exist and be active before calling this operation.

    " + }, "GetIdMappingJob":{ "name":"GetIdMappingJob", "http":{ @@ -238,7 +256,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

    Gets the status, metrics, and errors (if there are any) that are associated with a job.

    " + "documentation":"

    Returns the status, metrics, and errors (if there are any) that are associated with a job.

    " }, "GetIdMappingWorkflow":{ "name":"GetIdMappingWorkflow", @@ -310,7 +328,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

    Gets the status, metrics, and errors (if there are any) that are associated with a job.

    " + "documentation":"

    Returns the status, metrics, and errors (if there are any) that are associated with a job.

    " }, "GetMatchingWorkflow":{ "name":"GetMatchingWorkflow", @@ -1412,6 +1430,83 @@ }, "exception":true }, + "FailedRecord":{ + "type":"structure", + "required":[ + "inputSourceARN", + "uniqueId", + "errorMessage" + ], + "members":{ + "inputSourceARN":{ + "shape":"FailedRecordInputSourceARNString", + "documentation":"

    The input source ARN of the record that didn't generate a Match ID.

    " + }, + "uniqueId":{ + "shape":"String", + "documentation":"

    The unique ID of the record that didn't generate a Match ID.

    " + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

    The error message for the record that didn't generate a Match ID.

    " + } + }, + "documentation":"

    The record that didn't generate a Match ID.

    " + }, + "FailedRecordInputSourceARNString":{ + "type":"string", + "pattern":"arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):glue:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(table/[a-zA-Z_0-9-]{1,255}/[a-zA-Z_0-9-]{1,255})" + }, + "FailedRecordsList":{ + "type":"list", + "member":{"shape":"FailedRecord"} + }, + "GenerateMatchIdInput":{ + "type":"structure", + "required":[ + "workflowName", + "records" + ], + "members":{ + "workflowName":{ + "shape":"EntityName", + "documentation":"

    The name of the rule-based matching workflow.

    ", + "location":"uri", + "locationName":"workflowName" + }, + "records":{ + "shape":"GenerateMatchIdInputRecordsList", + "documentation":"

    The records to match.

    " + }, + "processingType":{ + "shape":"ProcessingType", + "documentation":"

    The processing mode that determines how Match IDs are generated and results are saved. Each mode provides different levels of accuracy, response time, and completeness of results.

    If not specified, defaults to CONSISTENT.

    CONSISTENT: Performs immediate lookup and matching against all existing records, with results saved synchronously. Provides highest accuracy but slower response time.

    EVENTUAL (shown as Background in the console): Performs initial match ID lookup or generation immediately, with record updates processed asynchronously in the background. Offers faster initial response time, with complete matching results available later in S3.

    EVENTUAL_NO_LOOKUP (shown as Quick ID generation in the console): Generates new match IDs without checking existing matches, with updates processed asynchronously. Provides fastest response time but should only be used for records known to be unique.

    " + } + } + }, + "GenerateMatchIdInputRecordsList":{ + "type":"list", + "member":{"shape":"Record"}, + "max":1, + "min":1 + }, + "GenerateMatchIdOutput":{ + "type":"structure", + "required":[ + "matchGroups", + "failedRecords" + ], + "members":{ + "matchGroups":{ + "shape":"MatchGroupsList", + "documentation":"

    The match groups from the generated match ID.

    " + }, + "failedRecords":{ + "shape":"FailedRecordsList", + "documentation":"

    The records that didn't receive a generated Match ID.

    " + } + } + }, "GetIdMappingJobInput":{ "type":"structure", "required":[ @@ -1664,7 +1759,7 @@ "members":{ "jobId":{ "shape":"JobId", - "documentation":"

    The ID of the job.

    " + "documentation":"

    The unique identifier of the matching job.

    " }, "status":{ "shape":"JobStatus", @@ -1921,7 +2016,7 @@ }, "mappedInputFields":{ "shape":"SchemaInputAttributes", - "documentation":"

    A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information Venice uses for matching.

    " + "documentation":"

    A list of MappedInputFields. Each MappedInputField corresponds to a column the source data table, and contains column name plus additional information Entity Resolution uses for matching.

    " }, "createdAt":{ "shape":"Timestamp", @@ -2754,6 +2849,33 @@ } } }, + "MatchGroup":{ + "type":"structure", + "required":[ + "records", + "matchId", + "matchRule" + ], + "members":{ + "records":{ + "shape":"MatchedRecordsList", + "documentation":"

    The matched records.

    " + }, + "matchId":{ + "shape":"String", + "documentation":"

    The match ID.

    " + }, + "matchRule":{ + "shape":"String", + "documentation":"

    The match rule of the match group.

    " + } + }, + "documentation":"

    The match group.

    " + }, + "MatchGroupsList":{ + "type":"list", + "member":{"shape":"MatchGroup"} + }, "MatchPurpose":{ "type":"string", "enum":[ @@ -2761,6 +2883,32 @@ "INDEXING" ] }, + "MatchedRecord":{ + "type":"structure", + "required":[ + "inputSourceARN", + "recordId" + ], + "members":{ + "inputSourceARN":{ + "shape":"MatchedRecordInputSourceARNString", + "documentation":"

    The input source ARN of the matched record.

    " + }, + "recordId":{ + "shape":"String", + "documentation":"

    The record ID of the matched record.

    " + } + }, + "documentation":"

    The matched record.

    " + }, + "MatchedRecordInputSourceARNString":{ + "type":"string", + "pattern":"arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):glue:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(table/[a-zA-Z_0-9-]{1,255}/[a-zA-Z_0-9-]{1,255})" + }, + "MatchedRecordsList":{ + "type":"list", + "member":{"shape":"MatchedRecord"} + }, "MatchingWorkflowArn":{ "type":"string", "pattern":"arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})" @@ -2915,6 +3063,14 @@ "min":36, "pattern":"[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" }, + "ProcessingType":{ + "type":"string", + "enum":[ + "CONSISTENT", + "EVENTUAL", + "EVENTUAL_NO_LOOKUP" + ] + }, "ProviderComponentSchema":{ "type":"structure", "members":{ @@ -3141,6 +3297,29 @@ } } }, + "Record":{ + "type":"structure", + "required":[ + "inputSourceARN", + "uniqueId", + "recordAttributeMap" + ], + "members":{ + "inputSourceARN":{ + "shape":"RecordInputSourceARNString", + "documentation":"

    The input source ARN of the record.

    " + }, + "uniqueId":{ + "shape":"UniqueId", + "documentation":"

    The unique ID of the record.

    " + }, + "recordAttributeMap":{ + "shape":"RecordAttributeMapString255", + "documentation":"

    The record's attribute map.

    " + } + }, + "documentation":"

    The record.

    " + }, "RecordAttributeMap":{ "type":"map", "key":{"shape":"RecordAttributeMapKeyString"}, @@ -3153,12 +3332,32 @@ "min":0, "pattern":"[a-zA-Z_0-9- \\t]*" }, + "RecordAttributeMapString255":{ + "type":"map", + "key":{"shape":"RecordAttributeMapString255KeyString"}, + "value":{"shape":"RecordAttributeMapString255ValueString"}, + "sensitive":true + }, + "RecordAttributeMapString255KeyString":{ + "type":"string", + "max":255, + "min":0 + }, + "RecordAttributeMapString255ValueString":{ + "type":"string", + "max":255, + "min":0 + }, "RecordAttributeMapValueString":{ "type":"string", "max":255, "min":0, "pattern":"[a-zA-Z_0-9-./@ ()+\\t]*" }, + "RecordInputSourceARNString":{ + "type":"string", + "pattern":"arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(idnamespace/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):entityresolution:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(matchingworkflow/[a-zA-Z_0-9-]{1,255})$|^arn:(aws|aws-us-gov|aws-cn):glue:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(table/[a-zA-Z_0-9-]{1,255}/[a-zA-Z_0-9-]{1,255})" + }, "RecordMatchingModel":{ "type":"string", "enum":[ @@ -3561,6 +3760,12 @@ "retryable":{"throttling":true} }, "Timestamp":{"type":"timestamp"}, + "UniqueId":{ + "type":"string", + "max":38, + "min":1, + "pattern":"[a-zA-Z0-9_-]*" + }, "UniqueIdList":{ "type":"list", "member":{"shape":"HeaderSafeUniqueId"} @@ -3812,7 +4017,7 @@ }, "resolutionTechniques":{ "shape":"ResolutionTechniques", - "documentation":"

    An object which defines the resolutionType and the ruleBasedProperties

    " + "documentation":"

    An object which defines the resolutionType and the ruleBasedProperties.

    " }, "incrementalRunConfig":{ "shape":"IncrementalRunConfig", diff --git a/services/eventbridge/pom.xml b/services/eventbridge/pom.xml index 696170858081..a3af2e5a73b1 100644 --- a/services/eventbridge/pom.xml +++ b/services/eventbridge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT eventbridge AWS Java SDK :: Services :: EventBridge diff --git a/services/eventbridge/src/main/resources/codegen-resources/customization.config b/services/eventbridge/src/main/resources/codegen-resources/customization.config index 4dba6cf43c06..4aa0caa705ce 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/customization.config +++ b/services/eventbridge/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "allowedEndpointAuthSchemeParams": [ "EndpointId" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/eventbridge/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/eventbridge/src/main/resources/codegen-resources/endpoint-rule-set.json index 46c93ffd489f..c47befc2231e 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/eventbridge/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -34,6 +34,79 @@ } }, "rules": [ + { + "conditions": [ + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + }, + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + }, + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://events.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { diff --git a/services/eventbridge/src/main/resources/codegen-resources/endpoint-tests.json b/services/eventbridge/src/main/resources/codegen-resources/endpoint-tests.json index 3655193a0a97..28c6f2091780 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/eventbridge/src/main/resources/codegen-resources/endpoint-tests.json @@ -442,19 +442,6 @@ "UseDualStack": false } }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, { "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { @@ -468,32 +455,6 @@ "UseDualStack": false } }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://events.us-gov-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://events-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, { "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { @@ -816,15 +777,107 @@ } }, { - "documentation": "Valid EndpointId with DualStack enabled and partition does not support DualStack", + "documentation": "legacy fips endpoint @ us-gov-east-1", "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" + "endpoint": { + "url": "https://events.us-gov-east-1.amazonaws.com" + } }, "params": { - "EndpointId": "abc123.456def", + "Region": "us-gov-east-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "legacy non-fips endpoint @ us-gov-east-1", + "expect": { + "endpoint": { + "url": "https://events.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "Dualstack fips endpoint @ us-gov-east-1", + "expect": { + "endpoint": { + "url": "https://events.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", "UseDualStack": true, - "UseFIPS": false, - "Region": "us-isob-east-1" + "UseFIPS": true + } + }, + { + "documentation": "Dualstack non-fips endpoint @ us-gov-east-1", + "expect": { + "endpoint": { + "url": "https://events.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "legacy fips endpoint @ us-gov-west-1", + "expect": { + "endpoint": { + "url": "https://events.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseDualStack": false, + "UseFIPS": true + } + }, + { + "documentation": "legacy non-fips endpoint @ us-gov-west-1", + "expect": { + "endpoint": { + "url": "https://events.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "Dualstack fips endpoint @ us-gov-west-1", + "expect": { + "endpoint": { + "url": "https://events.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseDualStack": true, + "UseFIPS": true + } + }, + { + "documentation": "Dualstack non-fips endpoint @ us-gov-west-1", + "expect": { + "endpoint": { + "url": "https://events.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseDualStack": true, + "UseFIPS": false } } ], diff --git a/services/eventbridge/src/main/resources/codegen-resources/paginators-1.json b/services/eventbridge/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..ea142457a6a7 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/paginators-1.json +++ b/services/eventbridge/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,3 @@ { - "pagination": { - } + "pagination": {} } diff --git a/services/eventbridge/src/main/resources/codegen-resources/service-2.json b/services/eventbridge/src/main/resources/codegen-resources/service-2.json index e25c3b98f4b3..0b6e47877f20 100644 --- a/services/eventbridge/src/main/resources/codegen-resources/service-2.json +++ b/services/eventbridge/src/main/resources/codegen-resources/service-2.json @@ -877,8 +877,7 @@ "shapes":{ "AccessDeniedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    You do not have the necessary permissions for this action.

    ", "exception":true }, @@ -1234,8 +1233,7 @@ }, "ConcurrentModificationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    There is concurrent modification on a rule, target, archive, or replay.

    ", "exception":true }, @@ -2040,8 +2038,7 @@ }, "DeleteApiDestinationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteArchiveRequest":{ "type":"structure", @@ -2055,8 +2052,7 @@ }, "DeleteArchiveResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteConnectionRequest":{ "type":"structure", @@ -2105,8 +2101,7 @@ }, "DeleteEndpointResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteEventBusRequest":{ "type":"structure", @@ -2938,7 +2933,7 @@ "type":"string", "max":1600, "min":1, - "pattern":"(arn:aws[\\w-]*:events:[a-z]{2}-[a-z]+-[\\w-]+:[0-9]{12}:event-bus\\/)?[/\\.\\-_A-Za-z0-9]+" + "pattern":"(arn:aws[\\w-]*:events:[a-z]+-[a-z]+-[\\w-]+:[0-9]{12}:event-bus\\/)?[/\\.\\-_A-Za-z0-9]+" }, "EventId":{ "type":"string", @@ -3100,8 +3095,7 @@ }, "IllegalStatusException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    An error occurred because a replay can be canceled only when the state is Running or Starting.

    ", "exception":true }, @@ -3129,23 +3123,20 @@ "Integer":{"type":"integer"}, "InternalException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This exception occurs due to unexpected causes.

    ", "exception":true, "fault":true }, "InvalidEventPatternException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The event pattern is not valid.

    ", "exception":true }, "InvalidStateException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The specified state is not a valid state for an event source.

    ", "exception":true }, @@ -3175,8 +3166,7 @@ }, "LimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The request failed because it attempted to create resource beyond the allowed service quota.

    ", "exception":true }, @@ -3620,8 +3610,7 @@ }, "ManagedRuleException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This rule was created by an Amazon Web Services service on behalf of your account. It is managed by that service. If you see this error in response to DeleteRule or RemoveTargets, you can use the Force parameter in those calls to delete the rule or remove targets from the rule. You cannot modify these managed rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, or UntagResource.

    ", "exception":true }, @@ -3658,7 +3647,7 @@ "type":"string", "max":512, "min":1, - "pattern":"^arn:aws[a-z-]*:events:[a-z]{2}-[a-z-]+-\\d+:\\d{12}:event-bus/[\\w.-]+$" + "pattern":"^arn:aws[a-z-]*:events:[a-z]+-[a-z-]+-\\d+:\\d{12}:event-bus/[\\w.-]+$" }, "NonPartnerEventBusName":{ "type":"string", @@ -3670,12 +3659,11 @@ "type":"string", "max":1600, "min":1, - "pattern":"(arn:aws[\\w-]*:events:[a-z]{2}-[a-z]+-[\\w-]+:[0-9]{12}:event-bus\\/)?[\\.\\-_A-Za-z0-9]+" + "pattern":"(arn:aws[\\w-]*:events:[a-z]+-[a-z]+-[\\w-]+:[0-9]{12}:event-bus\\/)?[\\.\\-_A-Za-z0-9]+" }, "OperationDisabledException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The operation you are attempting is not available in this region.

    ", "exception":true }, @@ -3800,8 +3788,7 @@ }, "PolicyLengthExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The event bus policy is too long. For more information, see the limits.

    ", "exception":true }, @@ -4386,8 +4373,7 @@ }, "ResourceAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The resource you are trying to create already exists.

    ", "exception":true }, @@ -4410,8 +4396,7 @@ }, "ResourceNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    An entity that you specified does not exist.

    ", "exception":true }, @@ -4798,8 +4783,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -4947,8 +4931,7 @@ }, "ThrottlingException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    This request cannot be completed due to throttling issues.

    ", "exception":true }, @@ -4988,8 +4971,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateApiDestinationRequest":{ "type":"structure", diff --git a/services/evidently/pom.xml b/services/evidently/pom.xml index ec8cc5273b97..fd898634a7fa 100644 --- a/services/evidently/pom.xml +++ b/services/evidently/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT evidently AWS Java SDK :: Services :: Evidently diff --git a/services/evidently/src/main/resources/codegen-resources/customization.config b/services/evidently/src/main/resources/codegen-resources/customization.config index 2880fc39d3a3..cdf857bdc287 100644 --- a/services/evidently/src/main/resources/codegen-resources/customization.config +++ b/services/evidently/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/evs/pom.xml b/services/evs/pom.xml new file mode 100644 index 000000000000..cae85b4f3aec --- /dev/null +++ b/services/evs/pom.xml @@ -0,0 +1,60 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.31.76-SNAPSHOT + + evs + AWS Java SDK :: Services :: Evs + The AWS Java SDK for Evs module holds the client classes that are used for + communicating with Evs. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.evs + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + software.amazon.awssdk + http-auth-aws + ${awsjavasdk.version} + + + diff --git a/services/evs/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/evs/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..a5d16a60c9dd --- /dev/null +++ b/services/evs/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://evs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://evs-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://evs.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://evs.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/services/evs/src/main/resources/codegen-resources/endpoint-tests.json b/services/evs/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..7555a890f574 --- /dev/null +++ b/services/evs/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://evs.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://evs.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://evs.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://evs.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/evs/src/main/resources/codegen-resources/paginators-1.json b/services/evs/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..c5e0850aedbf --- /dev/null +++ b/services/evs/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListEnvironmentHosts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "environmentHosts" + }, + "ListEnvironmentVlans": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "environmentVlans" + }, + "ListEnvironments": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "environmentSummaries" + } + } +} diff --git a/services/evs/src/main/resources/codegen-resources/service-2.json b/services/evs/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..78f3105f0a4f --- /dev/null +++ b/services/evs/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1356 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2023-07-27", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"evs", + "jsonVersion":"1.0", + "protocol":"json", + "protocols":["json"], + "serviceAbbreviation":"EVS", + "serviceFullName":"Amazon Elastic VMware Service", + "serviceId":"evs", + "signatureVersion":"v4", + "signingName":"evs", + "targetPrefix":"AmazonElasticVMwareService", + "uid":"evs-2023-07-27" + }, + "operations":{ + "CreateEnvironment":{ + "name":"CreateEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEnvironmentRequest"}, + "output":{"shape":"CreateEnvironmentResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Creates an Amazon EVS environment that runs VCF software, such as SDDC Manager, NSX Manager, and vCenter Server.

    During environment creation, Amazon EVS performs validations on DNS settings, provisions VLAN subnets and hosts, and deploys the supplied version of VCF.

    It can take several hours to create an environment. After the deployment completes, you can configure VCF according to your unique requirements.

    You cannot use the dedicatedHostId and placementGroupId parameters together in the same CreateEnvironment action. This results in a ValidationException response.

    EC2 instances created through Amazon EVS do not support associating an IAM instance profile.

    ", + "idempotent":true + }, + "CreateEnvironmentHost":{ + "name":"CreateEnvironmentHost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateEnvironmentHostRequest"}, + "output":{"shape":"CreateEnvironmentHostResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

    Creates an ESXi host and adds it to an Amazon EVS environment. Amazon EVS supports 4-16 hosts per environment.

    This action can only be used after the Amazon EVS environment is deployed. All Amazon EVS hosts are created with the latest AMI release version for the respective VCF version of the environment.

    You can use the dedicatedHostId parameter to specify an Amazon EC2 Dedicated Host for ESXi host creation.

    You can use the placementGroupId parameter to specify a cluster or partition placement group to launch EC2 instances into.

    You cannot use the dedicatedHostId and placementGroupId parameters together in the same CreateEnvironmentHost action. This results in a ValidationException response.

    EC2 instances created through Amazon EVS do not support associating an IAM instance profile.

    ", + "idempotent":true + }, + "DeleteEnvironment":{ + "name":"DeleteEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEnvironmentRequest"}, + "output":{"shape":"DeleteEnvironmentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Deletes an Amazon EVS environment.

    Amazon EVS environments will only be enabled for deletion once the hosts are deleted. You can delete hosts using the DeleteEnvironmentHost action.

    Environment deletion also deletes the associated Amazon EVS VLAN subnets. Other associated Amazon Web Services resources are not deleted. These resources may continue to incur costs.

    ", + "idempotent":true + }, + "DeleteEnvironmentHost":{ + "name":"DeleteEnvironmentHost", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteEnvironmentHostRequest"}, + "output":{"shape":"DeleteEnvironmentHostResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Deletes a host from an Amazon EVS environment.

    Before deleting a host, you must unassign and decommission the host from within the SDDC Manager user interface. Not doing so could impact the availability of your virtual machines or result in data loss.

    ", + "idempotent":true + }, + "GetEnvironment":{ + "name":"GetEnvironment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetEnvironmentRequest"}, + "output":{"shape":"GetEnvironmentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Returns a description of the specified environment.

    " + }, + "ListEnvironmentHosts":{ + "name":"ListEnvironmentHosts", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEnvironmentHostsRequest"}, + "output":{"shape":"ListEnvironmentHostsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    List the hosts within an environment.

    " + }, + "ListEnvironmentVlans":{ + "name":"ListEnvironmentVlans", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEnvironmentVlansRequest"}, + "output":{"shape":"ListEnvironmentVlansResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Lists environment VLANs that are associated with the specified environment.

    " + }, + "ListEnvironments":{ + "name":"ListEnvironments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListEnvironmentsRequest"}, + "output":{"shape":"ListEnvironmentsResponse"}, + "errors":[ + {"shape":"ValidationException"} + ], + "documentation":"

    Lists the Amazon EVS environments in your Amazon Web Services account in the specified Amazon Web Services Region.

    " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

    Lists the tags for an Amazon EVS resource.

    " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyTagsException"}, + {"shape":"TagPolicyException"} + ], + "documentation":"

    Associates the specified tags to an Amazon EVS resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they aren't changed. When a resource is deleted, the tags associated with that resource are also deleted. Tags that you create for Amazon EVS resources don't propagate to any other resources associated with the environment. For example, if you tag an environment with this operation, that tag doesn't automatically propagate to the VLAN subnets and hosts associated with the environment.

    ", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"TagPolicyException"} + ], + "documentation":"

    Deletes specified tags from an Amazon EVS resource.

    ", + "idempotent":true + } + }, + "shapes":{ + "Arn":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"arn:aws:evs:[a-z]{2}-[a-z]+-[0-9]:[0-9]{12}:environment/[a-zA-Z0-9_-]+" + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "Check":{ + "type":"structure", + "members":{ + "type":{ + "shape":"CheckType", + "documentation":"

    The check type. Amazon EVS performs the following checks.

    • KEY_REUSE: checks that the VCF license key is not used by another Amazon EVS environment. This check fails if a used license is added to the environment.

    • KEY_COVERAGE: checks that your VCF license key allocates sufficient vCPU cores for all deployed hosts. The check fails when any assigned hosts in the EVS environment are not covered by license keys, or when any unassigned hosts cannot be covered by available vCPU cores in keys.

    • REACHABILITY: checks that the Amazon EVS control plane has a persistent connection to SDDC Manager. If Amazon EVS cannot reach the environment, this check fails.

    • HOST_COUNT: Checks that your environment has a minimum of 4 hosts, which is a requirement for VCF 5.2.1.

      If this check fails, you will need to add hosts so that your environment meets this minimum requirement. Amazon EVS only supports environments with 4-16 hosts.

    " + }, + "result":{ + "shape":"CheckResult", + "documentation":"

    The check result.

    " + }, + "impairedSince":{ + "shape":"Timestamp", + "documentation":"

    The time when environment health began to be impaired.

    " + } + }, + "documentation":"

    A check on the environment to identify environment health and validate VMware VCF licensing compliance.

    " + }, + "CheckResult":{ + "type":"string", + "enum":[ + "PASSED", + "FAILED", + "UNKNOWN" + ] + }, + "CheckType":{ + "type":"string", + "enum":[ + "KEY_REUSE", + "KEY_COVERAGE", + "REACHABILITY", + "HOST_COUNT" + ] + }, + "ChecksList":{ + "type":"list", + "member":{"shape":"Check"} + }, + "Cidr":{ + "type":"string", + "pattern":"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(3[0-2]|[1-2][0-9]|[0-9])" + }, + "ClientToken":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[!-~]+" + }, + "ConnectivityInfo":{ + "type":"structure", + "required":["privateRouteServerPeerings"], + "members":{ + "privateRouteServerPeerings":{ + "shape":"RouteServerPeeringList", + "documentation":"

    The unique IDs for private route server peers.

    " + } + }, + "documentation":"

    The connectivity configuration for the environment. Amazon EVS requires that you specify two route server peer IDs. During environment creation, the route server endpoints peer with the NSX uplink VLAN for connectivity to the NSX overlay network.

    " + }, + "CreateEnvironmentHostRequest":{ + "type":"structure", + "required":[ + "environmentId", + "host" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    This parameter is not used in Amazon EVS currently. If you supply input for this parameter, it will have no effect.

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the host creation request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency.

    ", + "idempotencyToken":true + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID for the environment that the host is added to.

    " + }, + "host":{ + "shape":"HostInfoForCreate", + "documentation":"

    The host that is created and added to the environment.

    " + } + } + }, + "CreateEnvironmentHostResponse":{ + "type":"structure", + "members":{ + "environmentSummary":{ + "shape":"EnvironmentSummary", + "documentation":"

    A summary of the environment that the host is created in.

    " + }, + "host":{ + "shape":"Host", + "documentation":"

    A description of the created host.

    " + } + } + }, + "CreateEnvironmentRequest":{ + "type":"structure", + "required":[ + "vpcId", + "serviceAccessSubnetId", + "vcfVersion", + "termsAccepted", + "licenseInfo", + "initialVlans", + "hosts", + "connectivityInfo", + "vcfHostnames", + "siteId" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    This parameter is not used in Amazon EVS currently. If you supply input for this parameter, it will have no effect.

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the environment creation request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency.

    ", + "idempotencyToken":true + }, + "environmentName":{ + "shape":"EnvironmentName", + "documentation":"

    The name to give to your environment. The name can contain only alphanumeric characters (case-sensitive), hyphens, and underscores. It must start with an alphanumeric character, and can't be longer than 100 characters. The name must be unique within the Amazon Web Services Region and Amazon Web Services account that you're creating the environment in.

    " + }, + "kmsKeyId":{ + "shape":"String", + "documentation":"

    A unique ID for the customer-managed KMS key that is used to encrypt the VCF credential pairs for SDDC Manager, NSX Manager, and vCenter appliances. These credentials are stored in Amazon Web Services Secrets Manager.

    " + }, + "tags":{ + "shape":"RequestTagMap", + "documentation":"

    Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources.

    " + }, + "serviceAccessSecurityGroups":{ + "shape":"ServiceAccessSecurityGroups", + "documentation":"

    The security group that controls communication between the Amazon EVS control plane and VPC. The default security group is used if a custom security group isn't specified.

    The security group should allow access to the following.

    • TCP/UDP access to the DNS servers

    • HTTPS/SSH access to the host management VLAN subnet

    • HTTPS/SSH access to the Management VM VLAN subnet

    You should avoid modifying the security group rules after deployment, as this can break the persistent connection between the Amazon EVS control plane and VPC. This can cause future environment actions like adding or removing hosts to fail.

    " + }, + "vpcId":{ + "shape":"VpcId", + "documentation":"

    A unique ID for the VPC that connects to the environment control plane for service access.

    Amazon EVS requires that all VPC subnets exist in a single Availability Zone in a Region where the service is available.

    The VPC that you select must have a valid DHCP option set with domain name, at least two DNS servers, and an NTP server. These settings are used to configure your VCF appliances and hosts.

    If you plan to use HCX over the internet, choose a VPC that has a primary CIDR block and a /28 secondary CIDR block from an IPAM pool. Make sure that your VPC also has an attached internet gateway.

    Amazon EVS does not support the following Amazon Web Services networking options for NSX overlay connectivity: cross-Region VPC peering, Amazon S3 gateway endpoints, or Amazon Web Services Direct Connect virtual private gateway associations.

    " + }, + "serviceAccessSubnetId":{ + "shape":"SubnetId", + "documentation":"

    The subnet that is used to establish connectivity between the Amazon EVS control plane and VPC. Amazon EVS uses this subnet to validate mandatory DNS records for your VCF appliances and hosts and create the environment.

    " + }, + "vcfVersion":{ + "shape":"VcfVersion", + "documentation":"

    The VCF version to use for the environment. Amazon EVS only supports VCF version 5.2.1 at this time.

    " + }, + "termsAccepted":{ + "shape":"Boolean", + "documentation":"

    Customer confirmation that the customer has purchased and maintains sufficient VCF software licenses to cover all physical processor cores in the environment, in compliance with VMware's licensing requirements and terms of use.

    " + }, + "licenseInfo":{ + "shape":"LicenseInfoList", + "documentation":"

    The license information that Amazon EVS requires to create an environment. Amazon EVS requires two license keys: a VCF solution key and a vSAN license key. VCF licenses must have sufficient core entitlements to cover vCPU core and vSAN storage capacity needs.

    VCF licenses can be used for only one Amazon EVS environment. Amazon EVS does not support reuse of VCF licenses for multiple environments.

    VCF license information can be retrieved from the Broadcom portal.

    " + }, + "initialVlans":{ + "shape":"InitialVlans", + "documentation":"

    The initial VLAN subnets for the environment. You must specify a non-overlapping CIDR block for each VLAN subnet.

    " + }, + "hosts":{ + "shape":"HostInfoForCreateList", + "documentation":"

    The ESXi hosts to add to the environment. Amazon EVS requires that you provide details for a minimum of 4 hosts during environment creation.

    For each host, you must provide the desired hostname, EC2 SSH key, and EC2 instance type. Optionally, you can also provide a partition or cluster placement group to use, or use Amazon EC2 Dedicated Hosts.

    " + }, + "connectivityInfo":{ + "shape":"ConnectivityInfo", + "documentation":"

    The connectivity configuration for the environment. Amazon EVS requires that you specify two route server peer IDs. During environment creation, the route server endpoints peer with the NSX edges over the NSX, providing BGP dynamic routing for overlay networks.

    " + }, + "vcfHostnames":{ + "shape":"VcfHostnames", + "documentation":"

    The DNS hostnames for the virtual machines that host the VCF management appliances. Amazon EVS requires that you provide DNS hostnames for the following appliances: vCenter, NSX Manager, SDDC Manager, and Cloud Builder.

    " + }, + "siteId":{ + "shape":"String", + "documentation":"

    The Broadcom Site ID that is allocated to you as part of your electronic software delivery. This ID allows customer access to the Broadcom portal, and is provided to you by Broadcom at the close of your software contract or contract renewal. Amazon EVS uses the Broadcom Site ID that you provide to meet Broadcom VCF license usage reporting requirements for Amazon EVS.

    " + } + } + }, + "CreateEnvironmentResponse":{ + "type":"structure", + "members":{ + "environment":{ + "shape":"Environment", + "documentation":"

    A description of the created environment.

    " + } + } + }, + "DedicatedHostId":{ + "type":"string", + "max":25, + "min":1, + "pattern":"h-[a-f0-9]{8}([a-f0-9]{9})?" + }, + "DeleteEnvironmentHostRequest":{ + "type":"structure", + "required":[ + "environmentId", + "hostName" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    This parameter is not used in Amazon EVS currently. If you supply input for this parameter, it will have no effect.

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the host deletion request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency.

    ", + "idempotencyToken":true + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID for the host's environment.

    " + }, + "hostName":{ + "shape":"HostName", + "documentation":"

    The DNS hostname associated with the host to be deleted.

    " + } + } + }, + "DeleteEnvironmentHostResponse":{ + "type":"structure", + "members":{ + "environmentSummary":{ + "shape":"EnvironmentSummary", + "documentation":"

    A summary of the environment that the host was deleted from.

    " + }, + "host":{ + "shape":"Host", + "documentation":"

    A description of the deleted host.

    " + } + } + }, + "DeleteEnvironmentRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

    This parameter is not used in Amazon EVS currently. If you supply input for this parameter, it will have no effect.

    A unique, case-sensitive identifier that you provide to ensure the idempotency of the environment deletion request. If you do not specify a client token, a randomly generated token is used for the request to ensure idempotency.

    ", + "idempotencyToken":true + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID associated with the environment to be deleted.

    " + } + } + }, + "DeleteEnvironmentResponse":{ + "type":"structure", + "members":{ + "environment":{ + "shape":"Environment", + "documentation":"

    A description of the deleted environment.

    " + } + } + }, + "Environment":{ + "type":"structure", + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    The unique ID for the environment.

    " + }, + "environmentState":{ + "shape":"EnvironmentState", + "documentation":"

    The state of an environment.

    " + }, + "stateDetails":{ + "shape":"StateDetails", + "documentation":"

    A detailed description of the environmentState of an environment.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the environment was created.

    " + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the environment was modified.

    " + }, + "environmentArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) that is associated with the environment.

    " + }, + "environmentName":{ + "shape":"EnvironmentName", + "documentation":"

    The name of the environment.

    " + }, + "vpcId":{ + "shape":"VpcId", + "documentation":"

    The VPC associated with the environment.

    " + }, + "serviceAccessSubnetId":{ + "shape":"SubnetId", + "documentation":"

    The subnet that is used to establish connectivity between the Amazon EVS control plane and VPC. Amazon EVS uses this subnet to perform validations and create the environment.

    " + }, + "vcfVersion":{ + "shape":"VcfVersion", + "documentation":"

    The VCF version of the environment.

    " + }, + "termsAccepted":{ + "shape":"Boolean", + "documentation":"

    Customer confirmation that the customer has purchased and maintains sufficient VCF software licenses to cover all physical processor cores in the environment, in compliance with VMware's licensing requirements and terms of use.

    " + }, + "licenseInfo":{ + "shape":"LicenseInfoList", + "documentation":"

    The license information that Amazon EVS requires to create an environment. Amazon EVS requires two license keys: a VCF solution key and a vSAN license key.

    " + }, + "siteId":{ + "shape":"String", + "documentation":"

    The Broadcom Site ID that is associated with your Amazon EVS environment. Amazon EVS uses the Broadcom Site ID that you provide to meet Broadcom VCF license usage reporting requirements for Amazon EVS.

    " + }, + "environmentStatus":{ + "shape":"CheckResult", + "documentation":"

    Reports impaired functionality that stems from issues internal to the environment, such as impaired reachability.

    " + }, + "checks":{ + "shape":"ChecksList", + "documentation":"

    A check on the environment to identify instance health and VMware VCF licensing issues.

    " + }, + "connectivityInfo":{ + "shape":"ConnectivityInfo", + "documentation":"

    The connectivity configuration for the environment. Amazon EVS requires that you specify two route server peer IDs. During environment creation, the route server endpoints peer with the NSX uplink VLAN for connectivity to the NSX overlay network.

    " + }, + "vcfHostnames":{ + "shape":"VcfHostnames", + "documentation":"

    The DNS hostnames to be used by the VCF management appliances in your environment.

    For environment creation to be successful, each hostname entry must resolve to a domain name that you've registered in your DNS service of choice and configured in the DHCP option set of your VPC. DNS hostnames cannot be changed after environment creation has started.

    " + }, + "kmsKeyId":{ + "shape":"String", + "documentation":"

    The Amazon Web Services KMS key ID that Amazon Web Services Secrets Manager uses to encrypt secrets that are associated with the environment. These secrets contain the VCF credentials that are needed to install vCenter Server, NSX, and SDDC Manager.

    By default, Amazon EVS use the Amazon Web Services Secrets Manager managed key aws/secretsmanager. You can also specify a customer managed key.

    " + }, + "serviceAccessSecurityGroups":{ + "shape":"ServiceAccessSecurityGroups", + "documentation":"

    The security groups that allow traffic between the Amazon EVS control plane and your VPC for service access. If a security group is not specified, Amazon EVS uses the default security group in your account for service access.

    " + }, + "credentials":{ + "shape":"SecretList", + "documentation":"

    The VCF credentials that are stored as Amazon EVS managed secrets in Amazon Web Services Secrets Manager.

    Amazon EVS stores credentials that are needed to install vCenter Server, NSX, and SDDC Manager.

    " + } + }, + "documentation":"

    An object that represents an Amazon EVS environment.

    " + }, + "EnvironmentId":{ + "type":"string", + "pattern":"(env-[a-zA-Z0-9]{10})" + }, + "EnvironmentName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "EnvironmentState":{ + "type":"string", + "enum":[ + "CREATING", + "CREATED", + "DELETING", + "DELETED", + "CREATE_FAILED" + ] + }, + "EnvironmentStateList":{ + "type":"list", + "member":{"shape":"EnvironmentState"} + }, + "EnvironmentSummary":{ + "type":"structure", + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID for the environment.

    " + }, + "environmentName":{ + "shape":"EnvironmentName", + "documentation":"

    The name of the environment.

    " + }, + "vcfVersion":{ + "shape":"VcfVersion", + "documentation":"

    The VCF version of the environment.

    " + }, + "environmentStatus":{ + "shape":"CheckResult", + "documentation":"

    Reports impaired functionality that stems from issues internal to the environment, such as impaired reachability.

    " + }, + "environmentState":{ + "shape":"EnvironmentState", + "documentation":"

    The state of an environment.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the environment was created.

    " + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the environment was modified.

    " + }, + "environmentArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) that is associated with the environment.

    " + } + }, + "documentation":"

    A list of environments with summarized environment details.

    " + }, + "EnvironmentSummaryList":{ + "type":"list", + "member":{"shape":"EnvironmentSummary"} + }, + "GetEnvironmentRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID for the environment.

    " + } + } + }, + "GetEnvironmentResponse":{ + "type":"structure", + "members":{ + "environment":{ + "shape":"Environment", + "documentation":"

    A description of the requested environment.

    " + } + } + }, + "Host":{ + "type":"structure", + "members":{ + "hostName":{ + "shape":"HostName", + "documentation":"

    The DNS hostname of the host. DNS hostnames for hosts must be unique across Amazon EVS environments and within VCF.

    " + }, + "ipAddress":{ + "shape":"IpAddress", + "documentation":"

    The IP address of the host.

    " + }, + "keyName":{ + "shape":"KeyName", + "documentation":"

    The name of the SSH key that is used to access the host.

    " + }, + "instanceType":{ + "shape":"InstanceType", + "documentation":"

    The EC2 instance type of the host.

    EC2 instances created through Amazon EVS do not support associating an IAM instance profile.

    " + }, + "placementGroupId":{ + "shape":"PlacementGroupId", + "documentation":"

    The unique ID of the placement group where the host is placed.

    " + }, + "dedicatedHostId":{ + "shape":"DedicatedHostId", + "documentation":"

    The unique ID of the Amazon EC2 Dedicated Host.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the host was created.

    " + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the host was modified.

    " + }, + "hostState":{ + "shape":"HostState", + "documentation":"

    The state of the host.

    " + }, + "stateDetails":{ + "shape":"StateDetails", + "documentation":"

    A detailed description of the hostState of a host.

    " + }, + "ec2InstanceId":{ + "shape":"String", + "documentation":"

    The unique ID of the EC2 instance that represents the host.

    " + }, + "networkInterfaces":{ + "shape":"NetworkInterfaceList", + "documentation":"

    The elastic network interfaces that are attached to the host.

    " + } + }, + "documentation":"

    An ESXi host that runs on an Amazon EC2 bare metal instance. Four hosts are created in an Amazon EVS environment during environment creation. You can add hosts to an environment using the CreateEnvironmentHost operation. Amazon EVS supports 4-16 hosts per environment.

    " + }, + "HostInfoForCreate":{ + "type":"structure", + "required":[ + "hostName", + "keyName", + "instanceType" + ], + "members":{ + "hostName":{ + "shape":"HostName", + "documentation":"

    The DNS hostname of the host. DNS hostnames for hosts must be unique across Amazon EVS environments and within VCF.

    " + }, + "keyName":{ + "shape":"KeyName", + "documentation":"

    The name of the SSH key that is used to access the host.

    " + }, + "instanceType":{ + "shape":"InstanceType", + "documentation":"

    The EC2 instance type that represents the host.

    " + }, + "placementGroupId":{ + "shape":"PlacementGroupId", + "documentation":"

    The unique ID of the placement group where the host is placed.

    " + }, + "dedicatedHostId":{ + "shape":"DedicatedHostId", + "documentation":"

    The unique ID of the Amazon EC2 Dedicated Host.

    " + } + }, + "documentation":"

    An object that represents a host.

    You cannot use dedicatedHostId and placementGroupId together in the same HostInfoForCreateobject. This results in a ValidationException response.

    " + }, + "HostInfoForCreateList":{ + "type":"list", + "member":{"shape":"HostInfoForCreate"}, + "max":4, + "min":4 + }, + "HostList":{ + "type":"list", + "member":{"shape":"Host"} + }, + "HostName":{ + "type":"string", + "pattern":"([a-zA-Z0-9\\-]*)" + }, + "HostState":{ + "type":"string", + "enum":[ + "CREATING", + "CREATED", + "UPDATING", + "DELETING", + "DELETED", + "CREATE_FAILED", + "UPDATE_FAILED" + ] + }, + "InitialVlanInfo":{ + "type":"structure", + "required":["cidr"], + "members":{ + "cidr":{ + "shape":"Cidr", + "documentation":"

    The CIDR block that you provide to create a VLAN subnet. VLAN CIDR blocks must not overlap with other subnets in the VPC.

    " + } + }, + "documentation":"

    An object that represents an initial VLAN subnet for the environment. Amazon EVS creates initial VLAN subnets when you first create the environment. You must specify a non-overlapping CIDR block for each VLAN subnet. Amazon EVS creates the following 10 VLAN subnets: host management VLAN, vMotion VLAN, vSAN VLAN, VTEP VLAN, Edge VTEP VLAN, Management VM VLAN, HCX uplink VLAN, NSX uplink VLAN, expansion VLAN 1, expansion VLAN 2.

    " + }, + "InitialVlans":{ + "type":"structure", + "required":[ + "vmkManagement", + "vmManagement", + "vMotion", + "vSan", + "vTep", + "edgeVTep", + "nsxUplink", + "hcx", + "expansionVlan1", + "expansionVlan2" + ], + "members":{ + "vmkManagement":{ + "shape":"InitialVlanInfo", + "documentation":"

    The VMkernel management VLAN subnet. This VLAN subnet carries traffic for managing ESXi hosts and communicating with VMware vCenter Server.

    " + }, + "vmManagement":{ + "shape":"InitialVlanInfo", + "documentation":"

    The VM management VLAN subnet. This VLAN subnet carries traffic for vSphere virtual machines.

    " + }, + "vMotion":{ + "shape":"InitialVlanInfo", + "documentation":"

    The vMotion VLAN subnet. This VLAN subnet carries traffic for vSphere vMotion.

    " + }, + "vSan":{ + "shape":"InitialVlanInfo", + "documentation":"

    The vSAN VLAN subnet. This VLAN subnet carries the communication between ESXi hosts to implement a vSAN shared storage pool.

    " + }, + "vTep":{ + "shape":"InitialVlanInfo", + "documentation":"

    The VTEP VLAN subnet. This VLAN subnet handles internal network traffic between virtual machines within a VCF instance.

    " + }, + "edgeVTep":{ + "shape":"InitialVlanInfo", + "documentation":"

    The edge VTEP VLAN subnet. This VLAN subnet manages traffic flowing between the internal network and external networks, including internet access and other site connections.

    " + }, + "nsxUplink":{ + "shape":"InitialVlanInfo", + "documentation":"

    The NSX uplink VLAN subnet. This VLAN subnet allows connectivity to the NSX overlay network.

    " + }, + "hcx":{ + "shape":"InitialVlanInfo", + "documentation":"

    The HCX VLAN subnet. This VLAN subnet allows the HCX Interconnnect (IX) and HCX Network Extension (NE) to reach their peers and enable HCX Service Mesh creation.

    " + }, + "expansionVlan1":{ + "shape":"InitialVlanInfo", + "documentation":"

    An additional VLAN subnet that can be used to extend VCF capabilities once configured. For example, you can configure an expansion VLAN subnet to use NSX Federation for centralized management and synchronization of multiple NSX deployments across different locations.

    " + }, + "expansionVlan2":{ + "shape":"InitialVlanInfo", + "documentation":"

    An additional VLAN subnet that can be used to extend VCF capabilities once configured. For example, you can configure an expansion VLAN subnet to use NSX Federation for centralized management and synchronization of multiple NSX deployments across different locations.

    " + } + }, + "documentation":"

    The initial VLAN subnets for the environment. You must specify a non-overlapping CIDR block for each VLAN subnet.

    " + }, + "InstanceType":{ + "type":"string", + "enum":["i4i.metal"] + }, + "Integer":{ + "type":"integer", + "box":true + }, + "IpAddress":{ + "type":"string", + "pattern":"(\\d{1,3}\\.){3}\\d{1,3}" + }, + "KeyName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "LicenseInfo":{ + "type":"structure", + "required":[ + "solutionKey", + "vsanKey" + ], + "members":{ + "solutionKey":{ + "shape":"SolutionKey", + "documentation":"

    The VCF solution key. This license unlocks VMware VCF product features, including vSphere, NSX, SDDC Manager, and vCenter Server.

    " + }, + "vsanKey":{ + "shape":"VSanLicenseKey", + "documentation":"

    The VSAN license key. This license unlocks vSAN features.

    " + } + }, + "documentation":"

    The license information that Amazon EVS requires to create an environment. Amazon EVS requires two license keys: a VCF solution key and a vSAN license key.

    " + }, + "LicenseInfoList":{ + "type":"list", + "member":{"shape":"LicenseInfo"}, + "max":1, + "min":1 + }, + "ListEnvironmentHostsRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A unique pagination token for each page. If nextToken is returned, there are more results available. Make the call again using the returned token with all other arguments unchanged to retrieve the next page. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return. If you specify MaxResults in the request, the response includes information up to the limit specified.

    " + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID for the environment.

    " + } + } + }, + "ListEnvironmentHostsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A unique pagination token for next page results. Make the call again using this token to retrieve the next page.

    " + }, + "environmentHosts":{ + "shape":"HostList", + "documentation":"

    A list of hosts in the environment.

    " + } + } + }, + "ListEnvironmentVlansRequest":{ + "type":"structure", + "required":["environmentId"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A unique pagination token for each page. If nextToken is returned, there are more results available. Make the call again using the returned token with all other arguments unchanged to retrieve the next page. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return. If you specify MaxResults in the request, the response includes information up to the limit specified.

    " + }, + "environmentId":{ + "shape":"EnvironmentId", + "documentation":"

    A unique ID for the environment.

    " + } + } + }, + "ListEnvironmentVlansResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A unique pagination token for next page results. Make the call again using this token to retrieve the next page.

    " + }, + "environmentVlans":{ + "shape":"VlanList", + "documentation":"

    A list of VLANs that are associated with the specified environment.

    " + } + } + }, + "ListEnvironmentsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A unique pagination token for each page. If nextToken is returned, there are more results available. Make the call again using the returned token with all other arguments unchanged to retrieve the next page. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

    " + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

    The maximum number of results to return. If you specify MaxResults in the request, the response includes information up to the limit specified.

    " + }, + "state":{ + "shape":"EnvironmentStateList", + "documentation":"

    The state of an environment. Used to filter response results to return only environments with the specified environmentState.

    " + } + } + }, + "ListEnvironmentsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

    A unique pagination token for next page results. Make the call again using this token to retrieve the next page.

    " + }, + "environmentSummaries":{ + "shape":"EnvironmentSummaryList", + "documentation":"

    A list of environments with summarized environment details.

    " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) that identifies the resource to list tags for.

    " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"ResponseTagMap", + "documentation":"

    The tags for the resource.

    " + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "NetworkInterface":{ + "type":"structure", + "members":{ + "networkInterfaceId":{ + "shape":"NetworkInterfaceId", + "documentation":"

    The unique ID of the elastic network interface.

    " + } + }, + "documentation":"

    An elastic network interface (ENI) that connects hosts to the VLAN subnets. Amazon EVS provisions two identically configured ENIs in the VMkernel management subnet during host creation. One ENI is active, and the other is in standby mode for automatic switchover during a failure scenario.

    " + }, + "NetworkInterfaceId":{ + "type":"string", + "max":100, + "min":1 + }, + "NetworkInterfaceList":{ + "type":"list", + "member":{"shape":"NetworkInterface"}, + "max":2, + "min":0 + }, + "PaginationToken":{"type":"string"}, + "PlacementGroupId":{ + "type":"string", + "max":25, + "min":1, + "pattern":"pg-[a-f0-9]{8}([a-f0-9]{9})?" + }, + "RequestTagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Describes the error encountered.

    " + }, + "resourceId":{ + "shape":"String", + "documentation":"

    The ID of the resource that could not be found.

    " + }, + "resourceType":{ + "shape":"String", + "documentation":"

    The type of the resource that is associated with the error.

    " + } + }, + "documentation":"

    A service resource associated with the request could not be found. The resource might not be specified correctly, or it may have a state of DELETED.

    ", + "exception":true + }, + "ResponseTagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, + "RouteServerPeering":{ + "type":"string", + "max":21, + "min":3 + }, + "RouteServerPeeringList":{ + "type":"list", + "member":{"shape":"RouteServerPeering"}, + "max":2, + "min":2 + }, + "Secret":{ + "type":"structure", + "members":{ + "secretArn":{ + "shape":"String", + "documentation":"

    The Amazon Resource Name (ARN) of the secret.

    " + } + }, + "documentation":"

    A managed secret that contains the credentials for installing vCenter Server, NSX, and SDDC Manager. During environment creation, the Amazon EVS control plane uses Amazon Web Services Secrets Manager to create, encrypt, validate, and store secrets. If you choose to delete your environment, Amazon EVS also deletes the secrets that are associated with your environment. Amazon EVS does not provide managed rotation of secrets. We recommend that you rotate secrets regularly to ensure that secrets are not long-lived.

    " + }, + "SecretList":{ + "type":"list", + "member":{"shape":"Secret"} + }, + "SecurityGroupId":{ + "type":"string", + "max":25, + "min":3, + "pattern":"sg-[0-9a-zA-Z]*" + }, + "SecurityGroups":{ + "type":"list", + "member":{"shape":"SecurityGroupId"}, + "max":2, + "min":0 + }, + "ServiceAccessSecurityGroups":{ + "type":"structure", + "members":{ + "securityGroups":{ + "shape":"SecurityGroups", + "documentation":"

    The security groups that allow service access.

    " + } + }, + "documentation":"

    The security groups that allow traffic between the Amazon EVS control plane and your VPC for Amazon EVS service access. If a security group is not specified, Amazon EVS uses the default security group in your account for service access.

    " + }, + "SolutionKey":{ + "type":"string", + "pattern":"[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}" + }, + "StateDetails":{"type":"string"}, + "String":{"type":"string"}, + "SubnetId":{ + "type":"string", + "max":24, + "min":15, + "pattern":"subnet-[a-f0-9]{8}([a-f0-9]{9})?" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\w.:/=+-@]+" + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagPolicyException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Describes the error encountered

    " + } + }, + "documentation":"

    The request doesn't comply with IAM tag policy. Correct your request and then retry it.

    ", + "exception":true + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource to add tags to.

    " + }, + "tags":{ + "shape":"RequestTagMap", + "documentation":"

    Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other environment or Amazon Web Services resources.

    " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\w.:/=+-@]+|" + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Describes the error encountered.

    " + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

    The seconds to wait to retry.

    " + } + }, + "documentation":"

    The CreateEnvironmentHost operation couldn't be performed because the service is throttling requests. This exception is thrown when the CreateEnvironmentHost request exceeds concurrency of 1 transaction per second (TPS).

    ", + "exception":true, + "retryable":{"throttling":false} + }, + "Timestamp":{"type":"timestamp"}, + "TooManyTagsException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Describes the error encountered.

    " + } + }, + "documentation":"

    A service resource associated with the request has more than 200 tags.

    ", + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

    The Amazon Resource Name (ARN) of the resource to delete tags from.

    " + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"

    The keys of the tags to delete.

    " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "VSanLicenseKey":{ + "type":"string", + "pattern":"[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}-[a-zA-Z0-9]{5}" + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

    Describes the error encountered.

    " + }, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

    The reason for the exception.

    " + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

    A list of fields that didn't validate.

    " + } + }, + "documentation":"

    The input fails to satisfy the specified constraints. You will see this exception if invalid inputs are provided for any of the Amazon EVS environment operations, or if a list operation is performed on an environment resource that is still initializing.

    ", + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

    The field name.

    " + }, + "message":{ + "shape":"String", + "documentation":"

    A message describing why the field failed validation.

    " + } + }, + "documentation":"

    Stores information about a field passed inside a request that resulted in an exception.

    " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other" + ] + }, + "VcfHostnames":{ + "type":"structure", + "required":[ + "vCenter", + "nsx", + "nsxManager1", + "nsxManager2", + "nsxManager3", + "nsxEdge1", + "nsxEdge2", + "sddcManager", + "cloudBuilder" + ], + "members":{ + "vCenter":{ + "shape":"HostName", + "documentation":"

    The VMware vCenter hostname.

    " + }, + "nsx":{ + "shape":"HostName", + "documentation":"

    The VMware NSX hostname.

    " + }, + "nsxManager1":{ + "shape":"HostName", + "documentation":"

    The hostname for the first VMware NSX Manager virtual machine (VM).

    " + }, + "nsxManager2":{ + "shape":"HostName", + "documentation":"

    The hostname for the second VMware NSX Manager virtual machine (VM).

    " + }, + "nsxManager3":{ + "shape":"HostName", + "documentation":"

    The hostname for the third VMware NSX Manager virtual machine (VM).

    " + }, + "nsxEdge1":{ + "shape":"HostName", + "documentation":"

    The hostname for the first NSX Edge node.

    " + }, + "nsxEdge2":{ + "shape":"HostName", + "documentation":"

    The hostname for the second NSX Edge node.

    " + }, + "sddcManager":{ + "shape":"HostName", + "documentation":"

    The hostname for SDDC Manager.

    " + }, + "cloudBuilder":{ + "shape":"HostName", + "documentation":"

    The hostname for VMware Cloud Builder.

    " + } + }, + "documentation":"

    The DNS hostnames that Amazon EVS uses to install VMware vCenter Server, NSX, SDDC Manager, and Cloud Builder. Each hostname must be unique, and resolve to a domain name that you've registered in your DNS service of choice. Hostnames cannot be changed.

    VMware VCF requires the deployment of two NSX Edge nodes, and three NSX Manager virtual machines.

    " + }, + "VcfVersion":{ + "type":"string", + "enum":["VCF-5.2.1"] + }, + "Vlan":{ + "type":"structure", + "members":{ + "vlanId":{ + "shape":"VlanId", + "documentation":"

    The unique ID of the VLAN.

    " + }, + "cidr":{ + "shape":"Cidr", + "documentation":"

    The CIDR block of the VLAN.

    " + }, + "availabilityZone":{ + "shape":"String", + "documentation":"

    The availability zone of the VLAN.

    " + }, + "functionName":{ + "shape":"String", + "documentation":"

    The VMware VCF traffic type that is carried over the VLAN. For example, a VLAN with a functionName of hcx is being used to carry VMware HCX traffic.

    " + }, + "subnetId":{ + "shape":"SubnetId", + "documentation":"

    The unique ID of the VLAN subnet.

    " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the VLAN was created.

    " + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

    The date and time that the VLAN was modified.

    " + }, + "vlanState":{ + "shape":"VlanState", + "documentation":"

    The state of the VLAN.

    " + }, + "stateDetails":{ + "shape":"StateDetails", + "documentation":"

    The state details of the VLAN.

    " + } + }, + "documentation":"

    The VLANs that Amazon EVS creates during environment creation.

    " + }, + "VlanId":{ + "type":"integer", + "box":true + }, + "VlanList":{ + "type":"list", + "member":{"shape":"Vlan"} + }, + "VlanState":{ + "type":"string", + "enum":[ + "CREATING", + "CREATED", + "DELETING", + "DELETED", + "CREATE_FAILED" + ] + }, + "VpcId":{ + "type":"string", + "max":21, + "min":12, + "pattern":"vpc-[a-f0-9]{8}([a-f0-9]{9})?" + } + }, + "documentation":"

    Amazon Elastic VMware Service (Amazon EVS) is a service that you can use to deploy a VMware Cloud Foundation (VCF) software environment directly on EC2 bare metal instances within an Amazon Virtual Private Cloud (VPC).

    Workloads running on Amazon EVS are fully compatible with workloads running on any standard VMware vSphere environment. This means that you can migrate any VMware-based workload to Amazon EVS without workload modification.

    " +} diff --git a/services/evs/src/main/resources/codegen-resources/waiters-2.json b/services/evs/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/evs/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/finspace/pom.xml b/services/finspace/pom.xml index 5c399061689a..8556712e46bc 100644 --- a/services/finspace/pom.xml +++ b/services/finspace/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT finspace AWS Java SDK :: Services :: Finspace diff --git a/services/finspace/src/main/resources/codegen-resources/customization.config b/services/finspace/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/finspace/src/main/resources/codegen-resources/customization.config +++ b/services/finspace/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/finspacedata/pom.xml b/services/finspacedata/pom.xml index 3ce4d59cfe9e..6c1dc5c0f669 100644 --- a/services/finspacedata/pom.xml +++ b/services/finspacedata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT finspacedata AWS Java SDK :: Services :: Finspace Data diff --git a/services/finspacedata/src/main/resources/codegen-resources/customization.config b/services/finspacedata/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/finspacedata/src/main/resources/codegen-resources/customization.config +++ b/services/finspacedata/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/firehose/pom.xml b/services/firehose/pom.xml index 8a284a9949ef..7d78b63f86ac 100644 --- a/services/firehose/pom.xml +++ b/services/firehose/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT firehose AWS Java SDK :: Services :: Amazon Kinesis Firehose diff --git a/services/firehose/src/main/resources/codegen-resources/customization.config b/services/firehose/src/main/resources/codegen-resources/customization.config index 9ec45ee9c014..85aba7197c80 100644 --- a/services/firehose/src/main/resources/codegen-resources/customization.config +++ b/services/firehose/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listDeliveryStreams" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/firehose/src/main/resources/codegen-resources/paginators-1.json b/services/firehose/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..ea142457a6a7 100644 --- a/services/firehose/src/main/resources/codegen-resources/paginators-1.json +++ b/services/firehose/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,3 @@ { - "pagination": { - } + "pagination": {} } diff --git a/services/firehose/src/main/resources/codegen-resources/service-2.json b/services/firehose/src/main/resources/codegen-resources/service-2.json index 7b9450326269..d58ebf68f6ac 100644 --- a/services/firehose/src/main/resources/codegen-resources/service-2.json +++ b/services/firehose/src/main/resources/codegen-resources/service-2.json @@ -1160,8 +1160,7 @@ }, "DeleteDeliveryStreamOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeliveryStartTimestamp":{"type":"timestamp"}, "DeliveryStreamARN":{ @@ -2048,7 +2047,7 @@ "type":"string", "max":512, "min":1, - "pattern":"arn:.*:glue:.*:\\d{12}:catalog" + "pattern":"arn:.*:glue:.*:\\d{12}:catalog(?:(/[a-z0-9_-]+){1,2})?" }, "HECAcknowledgmentTimeoutInSeconds":{ "type":"integer", @@ -3589,7 +3588,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:.*:secretsmanager:[a-zA-Z0-9\\-]+:\\d{12}:secret:[a-zA-Z0-9\\-/_+=.@]+" + "pattern":"arn:.*:secretsmanager:[a-zA-Z0-9\\-]+:\\d{12}:secret:[a-zA-Z0-9\\-/_+=.@!]+" }, "SecretsManagerConfiguration":{ "type":"structure", @@ -4276,8 +4275,7 @@ }, "StartDeliveryStreamEncryptionOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "StopDeliveryStreamEncryptionInput":{ "type":"structure", @@ -4291,8 +4289,7 @@ }, "StopDeliveryStreamEncryptionOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "StringWithLettersDigitsUnderscoresDots":{ "type":"string", @@ -4357,8 +4354,7 @@ }, "TagDeliveryStreamOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "TagKey":{ "type":"string", @@ -4409,8 +4405,7 @@ }, "UntagDeliveryStreamOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateDestinationInput":{ "type":"structure", @@ -4477,8 +4472,7 @@ }, "UpdateDestinationOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "Username":{ "type":"string", diff --git a/services/fis/pom.xml b/services/fis/pom.xml index ffe1339e4ccc..7f3c790e3ba6 100644 --- a/services/fis/pom.xml +++ b/services/fis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT fis AWS Java SDK :: Services :: Fis diff --git a/services/fis/src/main/resources/codegen-resources/customization.config b/services/fis/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/fis/src/main/resources/codegen-resources/customization.config +++ b/services/fis/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/fms/pom.xml b/services/fms/pom.xml index c19b07914fd7..ac64b652ae70 100644 --- a/services/fms/pom.xml +++ b/services/fms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT fms AWS Java SDK :: Services :: FMS diff --git a/services/fms/src/main/resources/codegen-resources/customization.config b/services/fms/src/main/resources/codegen-resources/customization.config index 7ddce5a9e917..b394eea2b025 100644 --- a/services/fms/src/main/resources/codegen-resources/customization.config +++ b/services/fms/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ "listMemberAccounts", "listPolicies" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/forecast/pom.xml b/services/forecast/pom.xml index d15b472b7db0..348cd4a4f79b 100644 --- a/services/forecast/pom.xml +++ b/services/forecast/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT forecast AWS Java SDK :: Services :: Forecast diff --git a/services/forecast/src/main/resources/codegen-resources/customization.config b/services/forecast/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/forecast/src/main/resources/codegen-resources/customization.config +++ b/services/forecast/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/forecastquery/pom.xml b/services/forecastquery/pom.xml index 561cade3a253..e28dfc85385c 100644 --- a/services/forecastquery/pom.xml +++ b/services/forecastquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT forecastquery AWS Java SDK :: Services :: Forecastquery diff --git a/services/frauddetector/pom.xml b/services/frauddetector/pom.xml index 9cf56d7a567d..b2d0a6760c24 100644 --- a/services/frauddetector/pom.xml +++ b/services/frauddetector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT frauddetector AWS Java SDK :: Services :: FraudDetector diff --git a/services/frauddetector/src/main/resources/codegen-resources/customization.config b/services/frauddetector/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/frauddetector/src/main/resources/codegen-resources/customization.config +++ b/services/frauddetector/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/freetier/pom.xml b/services/freetier/pom.xml index e42c2fca7a66..9b8c28154641 100644 --- a/services/freetier/pom.xml +++ b/services/freetier/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT freetier AWS Java SDK :: Services :: Free Tier diff --git a/services/freetier/src/main/resources/codegen-resources/customization.config b/services/freetier/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/freetier/src/main/resources/codegen-resources/customization.config +++ b/services/freetier/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/fsx/pom.xml b/services/fsx/pom.xml index 948d66d998e8..5ac7549fefd3 100644 --- a/services/fsx/pom.xml +++ b/services/fsx/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT fsx AWS Java SDK :: Services :: FSx diff --git a/services/fsx/src/main/resources/codegen-resources/customization.config b/services/fsx/src/main/resources/codegen-resources/customization.config index b4b1ff8c6b83..43360d6a47fa 100644 --- a/services/fsx/src/main/resources/codegen-resources/customization.config +++ b/services/fsx/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "describeBackups", "describeFileSystems" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/fsx/src/main/resources/codegen-resources/paginators-1.json b/services/fsx/src/main/resources/codegen-resources/paginators-1.json index b1aad5081996..ae6e98501012 100644 --- a/services/fsx/src/main/resources/codegen-resources/paginators-1.json +++ b/services/fsx/src/main/resources/codegen-resources/paginators-1.json @@ -30,10 +30,17 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "DescribeS3AccessPointAttachments": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "S3AccessPointAttachments" + }, "DescribeSnapshots": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "Snapshots" }, "DescribeStorageVirtualMachines": { "input_token": "NextToken", diff --git a/services/fsx/src/main/resources/codegen-resources/service-2.json b/services/fsx/src/main/resources/codegen-resources/service-2.json index 911d72794566..62ff82fc33c0 100644 --- a/services/fsx/src/main/resources/codegen-resources/service-2.json +++ b/services/fsx/src/main/resources/codegen-resources/service-2.json @@ -89,6 +89,27 @@ "documentation":"

    Updates an existing volume by using a snapshot from another Amazon FSx for OpenZFS file system. For more information, see on-demand data replication in the Amazon FSx for OpenZFS User Guide.

    ", "idempotent":true }, + "CreateAndAttachS3AccessPoint":{ + "name":"CreateAndAttachS3AccessPoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateAndAttachS3AccessPointRequest"}, + "output":{"shape":"CreateAndAttachS3AccessPointResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"IncompatibleParameterError"}, + {"shape":"InternalServerError"}, + {"shape":"UnsupportedOperation"}, + {"shape":"VolumeNotFound"}, + {"shape":"InvalidAccessPoint"}, + {"shape":"InvalidRequest"}, + {"shape":"AccessPointAlreadyOwnedByYou"}, + {"shape":"TooManyAccessPoints"} + ], + "documentation":"

    Creates an S3 access point and attaches it to an Amazon FSx volume. For FSx for OpenZFS file systems, the volume must be hosted on a high-availability file system, either Single-AZ or Multi-AZ. For more information, see Accessing your data using access points in the Amazon FSx for OpenZFS User Guide.

    The requester requires the following permissions to perform these actions:

    • fsx:CreateAndAttachS3AccessPoint

    • s3:CreateAccessPoint

    • s3:GetAccessPoint

    • s3:PutAccessPointPolicy

    • s3:DeleteAccessPoint

    The following actions are related to CreateAndAttachS3AccessPoint:

    " + }, "CreateBackup":{ "name":"CreateBackup", "http":{ @@ -359,7 +380,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

    Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted.

    To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines (SVMs) on the file system. Then provide a FileSystemId value to the DeleteFileSystem operation.

    By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's retention policy, and must be manually deleted.

    To delete an Amazon FSx for Lustre file system, first unmount it from every connected Amazon EC2 instance, then provide a FileSystemId value to the DeleteFileSystem operation. By default, Amazon FSx will not take a final backup when the DeleteFileSystem operation is invoked. On file systems not linked to an Amazon S3 bucket, set SkipFinalBackup to false to take a final backup of the file system you are deleting. Backups cannot be enabled on S3-linked file systems. To ensure all of your data is written back to S3 before deleting your file system, you can either monitor for the AgeOfOldestQueuedMessage metric to be zero (if using automatic export) or you can run an export data repository task. If you have automatic export enabled and want to use an export data repository task, you have to disable automatic export before executing the export data repository task.

    The DeleteFileSystem operation returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems operation returns a FileSystemNotFound error.

    If a data repository task is in a PENDING or EXECUTING state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad Request).

    The data in a deleted file system is also deleted and can't be recovered by any means.

    ", + "documentation":"

    Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted.

    To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines (SVMs) on the file system. Then provide a FileSystemId value to the DeleteFileSystem operation.

    Before deleting an Amazon FSx for OpenZFS file system, make sure that there aren't any Amazon S3 access points attached to any volume. For more information on how to list S3 access points that are attached to volumes, see Listing S3 access point attachments. For more information on how to delete S3 access points, see Deleting an S3 access point attachment.

    By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's retention policy, and must be manually deleted.

    To delete an Amazon FSx for Lustre file system, first unmount it from every connected Amazon EC2 instance, then provide a FileSystemId value to the DeleteFileSystem operation. By default, Amazon FSx will not take a final backup when the DeleteFileSystem operation is invoked. On file systems not linked to an Amazon S3 bucket, set SkipFinalBackup to false to take a final backup of the file system you are deleting. Backups cannot be enabled on S3-linked file systems. To ensure all of your data is written back to S3 before deleting your file system, you can either monitor for the AgeOfOldestQueuedMessage metric to be zero (if using automatic export) or you can run an export data repository task. If you have automatic export enabled and want to use an export data repository task, you have to disable automatic export before executing the export data repository task.

    The DeleteFileSystem operation returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems operation returns a FileSystemNotFound error.

    If a data repository task is in a PENDING or EXECUTING state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad Request).

    The data in a deleted file system is also deleted and can't be recovered by any means.

    ", "idempotent":true }, "DeleteSnapshot":{ @@ -508,6 +529,22 @@ ], "documentation":"

    Returns the description of specific Amazon FSx file systems, if a FileSystemIds value is provided for that file system. Otherwise, it returns descriptions of all file systems owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

    When retrieving all file system descriptions, you can optionally specify the MaxResults parameter to limit the number of descriptions in a response. If more file system descriptions remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

    This operation is used in an iterative process to retrieve a list of your file system descriptions. DescribeFileSystems is called first without a NextTokenvalue. Then the operation continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken.

    When using this operation, keep the following in mind:

    • The implementation might return fewer than MaxResults file system descriptions while still including a NextToken value.

    • The order of file systems returned in the response of one DescribeFileSystems call and the order of file systems returned across the responses of a multicall iteration is unspecified.

    " }, + "DescribeS3AccessPointAttachments":{ + "name":"DescribeS3AccessPointAttachments", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeS3AccessPointAttachmentsRequest"}, + "output":{"shape":"DescribeS3AccessPointAttachmentsResponse"}, + "errors":[ + {"shape":"S3AccessPointAttachmentNotFound"}, + {"shape":"BadRequest"}, + {"shape":"InternalServerError"}, + {"shape":"UnsupportedOperation"} + ], + "documentation":"

    Describes one or more S3 access points attached to Amazon FSx volumes.

    The requester requires the following permission to perform this action:

    • fsx:DescribeS3AccessPointAttachments

    " + }, "DescribeSharedVpcConfiguration":{ "name":"DescribeSharedVpcConfiguration", "http":{ @@ -567,6 +604,23 @@ ], "documentation":"

    Describes one or more Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volumes.

    " }, + "DetachAndDeleteS3AccessPoint":{ + "name":"DetachAndDeleteS3AccessPoint", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetachAndDeleteS3AccessPointRequest"}, + "output":{"shape":"DetachAndDeleteS3AccessPointResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"IncompatibleParameterError"}, + {"shape":"InternalServerError"}, + {"shape":"UnsupportedOperation"}, + {"shape":"S3AccessPointAttachmentNotFound"} + ], + "documentation":"

    Detaches an S3 access point from an Amazon FSx volume and deletes the S3 access point.

    The requester requires the following permission to perform this action:

    • fsx:DetachAndDeleteS3AccessPoint

    • s3:DeleteAccessPoint

    " + }, "DisassociateFileSystemAliases":{ "name":"DisassociateFileSystemAliases", "http":{ @@ -740,7 +794,7 @@ {"shape":"MissingFileSystemConfiguration"}, {"shape":"ServiceLimitExceeded"} ], - "documentation":"

    Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

    For FSx for Windows File Server file systems, you can update the following properties:

    • AuditLogConfiguration

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • SelfManagedActiveDirectoryConfiguration

    • StorageCapacity

    • StorageType

    • ThroughputCapacity

    • DiskIopsConfiguration

    • WeeklyMaintenanceStartTime

    For FSx for Lustre file systems, you can update the following properties:

    • AutoImportPolicy

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DataCompressionType

    • FileSystemTypeVersion

    • LogConfiguration

    • LustreRootSquashConfiguration

    • MetadataConfiguration

    • PerUnitStorageThroughput

    • StorageCapacity

    • WeeklyMaintenanceStartTime

    For FSx for ONTAP file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • FsxAdminPassword

    • HAPairs

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • ThroughputCapacityPerHAPair

    • WeeklyMaintenanceStartTime

    For FSx for OpenZFS file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • CopyTagsToBackups

    • CopyTagsToVolumes

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • ReadCacheConfiguration

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    " + "documentation":"

    Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request.

    For FSx for Windows File Server file systems, you can update the following properties:

    • AuditLogConfiguration

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • SelfManagedActiveDirectoryConfiguration

    • StorageCapacity

    • StorageType

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    For FSx for Lustre file systems, you can update the following properties:

    • AutoImportPolicy

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DataCompressionType

    • FileSystemTypeVersion

    • LogConfiguration

    • LustreReadCacheConfiguration

    • LustreRootSquashConfiguration

    • MetadataConfiguration

    • PerUnitStorageThroughput

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    For FSx for ONTAP file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • FsxAdminPassword

    • HAPairs

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • ThroughputCapacityPerHAPair

    • WeeklyMaintenanceStartTime

    For FSx for OpenZFS file systems, you can update the following properties:

    • AddRouteTableIds

    • AutomaticBackupRetentionDays

    • CopyTagsToBackups

    • CopyTagsToVolumes

    • DailyAutomaticBackupStartTime

    • DiskIopsConfiguration

    • ReadCacheConfiguration

    • RemoveRouteTableIds

    • StorageCapacity

    • ThroughputCapacity

    • WeeklyMaintenanceStartTime

    " }, "UpdateSharedVpcConfiguration":{ "name":"UpdateSharedVpcConfiguration", @@ -816,6 +870,23 @@ "min":12, "pattern":"^\\d{12}$" }, + "AccessPointAlreadyOwnedByYou":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

    An error code indicating that an access point with that name already exists in the Amazon Web Services Region in your Amazon Web Services account.

    " + }, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    An access point with that name already exists in the Amazon Web Services Region in your Amazon Web Services account.

    ", + "exception":true + }, + "AccessPointPolicy":{ + "type":"string", + "max":200000, + "min":1 + }, "ActiveDirectoryBackupAttributes":{ "type":"structure", "members":{ @@ -1434,6 +1505,76 @@ }, "documentation":"

    Used to specify the configuration options for an FSx for ONTAP volume's storage aggregate or aggregates.

    " }, + "CreateAndAttachS3AccessPointOpenZFSConfiguration":{ + "type":"structure", + "required":[ + "VolumeId", + "FileSystemIdentity" + ], + "members":{ + "VolumeId":{ + "shape":"VolumeId", + "documentation":"

    The ID of the FSx for OpenZFS volume to which you want the S3 access point attached.

    " + }, + "FileSystemIdentity":{ + "shape":"OpenZFSFileSystemIdentity", + "documentation":"

    Specifies the file system user identity to use for authorizing file read and write requests that are made using this S3 access point.

    " + } + }, + "documentation":"

    Specifies the FSx for OpenZFS volume that the S3 access point will be attached to, and the file system user identity.

    " + }, + "CreateAndAttachS3AccessPointRequest":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "Name":{ + "shape":"S3AccessPointAttachmentName", + "documentation":"

    The name you want to assign to this S3 access point.

    " + }, + "Type":{ + "shape":"S3AccessPointAttachmentType", + "documentation":"

    The type of S3 access point you want to create. Only OpenZFS is supported.

    " + }, + "OpenZFSConfiguration":{ + "shape":"CreateAndAttachS3AccessPointOpenZFSConfiguration", + "documentation":"

    Specifies the configuration to use when creating and attaching an S3 access point to an FSx for OpenZFS volume.

    " + }, + "S3AccessPoint":{ + "shape":"CreateAndAttachS3AccessPointS3Configuration", + "documentation":"

    Specifies the virtual private cloud (VPC) configuration if you're creating an access point that is restricted to a VPC. For more information, see Creating access points restricted to a virtual private cloud.

    " + } + } + }, + "CreateAndAttachS3AccessPointResponse":{ + "type":"structure", + "members":{ + "S3AccessPointAttachment":{ + "shape":"S3AccessPointAttachment", + "documentation":"

    Describes the configuration of the S3 access point created.

    " + } + } + }, + "CreateAndAttachS3AccessPointS3Configuration":{ + "type":"structure", + "members":{ + "VpcConfiguration":{ + "shape":"S3AccessPointVpcConfiguration", + "documentation":"

    If included, Amazon S3 restricts access to this S3 access point to requests made from the specified virtual private cloud (VPC).

    " + }, + "Policy":{ + "shape":"AccessPointPolicy", + "documentation":"

    Specifies an access policy to associate with the S3 access point configuration. For more information, see Configuring IAM policies for using access points in the Amazon Simple Storage Service User Guide.

    " + } + }, + "documentation":"

    Used to create an S3 access point that accepts requests only from a virtual private cloud (VPC) to restrict data access to a private network.

    " + }, "CreateBackupRequest":{ "type":"structure", "members":{ @@ -1676,7 +1817,7 @@ "LustreConfiguration":{"shape":"CreateFileSystemLustreConfiguration"}, "StorageType":{ "shape":"StorageType", - "documentation":"

    Sets the storage type for the Windows or OpenZFS file system that you're creating from a backup. Valid values are SSD and HDD.

    • Set to SSD to use solid state drive storage. SSD is supported on all Windows and OpenZFS deployment types.

    • Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 FSx for Windows File Server file system deployment types.

    The default value is SSD.

    HDD and SSD storage types have different minimum storage capacity requirements. A restored file system's storage capacity is tied to the file system that was backed up. You can create a file system that uses HDD storage from a backup of a file system that used SSD storage if the original SSD file system had a storage capacity of at least 2000 GiB.

    " + "documentation":"

    Sets the storage type for the Windows, OpenZFS, or Lustre file system that you're creating from a backup. Valid values are SSD, HDD, and INTELLIGENT_TIERING.

    • Set to SSD to use solid state drive storage. SSD is supported on all Windows and OpenZFS deployment types.

    • Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 FSx for Windows File Server file system deployment types.

    • Set to INTELLIGENT_TIERING to use fully elastic, intelligently-tiered storage. Intelligent-Tiering is only available for OpenZFS file systems with the Multi-AZ deployment type and for Lustre file systems with the Persistent_2 deployment type.

    The default value is SSD.

    HDD and SSD storage types have different minimum storage capacity requirements. A restored file system's storage capacity is tied to the file system that was backed up. You can create a file system that uses HDD storage from a backup of a file system that used SSD storage if the original SSD file system had a storage capacity of at least 2000 GiB.

    " }, "KmsKeyId":{"shape":"KmsKeyId"}, "FileSystemTypeVersion":{ @@ -1725,7 +1866,7 @@ }, "DeploymentType":{ "shape":"LustreDeploymentType", - "documentation":"

    (Optional) Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

    Choose PERSISTENT_1 for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. PERSISTENT_1 supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.

    Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. PERSISTENT_2 supports SSD storage, and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB). You can optionally specify a metadata configuration mode for PERSISTENT_2 which supports increasing metadata performance. PERSISTENT_2 is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2 is available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.

    If you choose PERSISTENT_2, and you set FileSystemTypeVersion to 2.10, the CreateFileSystem operation fails.

    Encryption of data in transit is automatically turned on when you access SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide.

    (Default = SCRATCH_1)

    " + "documentation":"

    (Optional) Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

    Choose PERSISTENT_1 for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. PERSISTENT_1 supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available.

    Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. PERSISTENT_2 supports the SSD and Intelligent-Tiering storage classes. You can optionally specify a metadata configuration mode for PERSISTENT_2 which supports increasing metadata performance. PERSISTENT_2 is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2 is available, see Deployment and storage class options for FSx for Lustre file systems in the Amazon FSx for Lustre User Guide.

    If you choose PERSISTENT_2, and you set FileSystemTypeVersion to 2.10, the CreateFileSystem operation fails.

    Encryption of data in transit is automatically turned on when you access SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide.

    (Default = SCRATCH_1)

    " }, "AutoImportPolicy":{ "shape":"AutoImportPolicyType", @@ -1733,7 +1874,7 @@ }, "PerUnitStorageThroughput":{ "shape":"PerUnitStorageThroughput", - "documentation":"

    Required with PERSISTENT_1 and PERSISTENT_2 deployment types, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 120 MB/s of file system throughput. You pay for the amount of throughput that you provision.

    Valid values:

    • For PERSISTENT_1 SSD storage: 50, 100, 200 MB/s/TiB.

    • For PERSISTENT_1 HDD storage: 12, 40 MB/s/TiB.

    • For PERSISTENT_2 SSD storage: 125, 250, 500, 1000 MB/s/TiB.

    " + "documentation":"

    Required with PERSISTENT_1 and PERSISTENT_2 deployment types using an SSD or HDD storage class, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 120 MB/s of file system throughput. You pay for the amount of throughput that you provision.

    Valid values:

    • For PERSISTENT_1 SSD storage: 50, 100, 200 MB/s/TiB.

    • For PERSISTENT_1 HDD storage: 12, 40 MB/s/TiB.

    • For PERSISTENT_2 SSD storage: 125, 250, 500, 1000 MB/s/TiB.

    " }, "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, "AutomaticBackupRetentionDays":{ @@ -1767,6 +1908,14 @@ "MetadataConfiguration":{ "shape":"CreateFileSystemLustreMetadataConfiguration", "documentation":"

    The Lustre metadata performance configuration for the creation of an FSx for Lustre file system using a PERSISTENT_2 deployment type.

    " + }, + "ThroughputCapacity":{ + "shape":"ThroughputCapacityMbps", + "documentation":"

    Specifies the throughput of an FSx for Lustre file system using the Intelligent-Tiering storage class, measured in megabytes per second (MBps). Valid values are 4000 MBps or multiples of 4000 MBps. You pay for the amount of throughput that you provision.

    " + }, + "DataReadCacheConfiguration":{ + "shape":"LustreReadCacheConfiguration", + "documentation":"

    Specifies the optional provisioned SSD read cache on FSx for Lustre file systems that use the Intelligent-Tiering storage class. Required when StorageType is set to INTELLIGENT_TIERING.

    " } }, "documentation":"

    The Lustre configuration for the file system being created.

    The following parameters are not supported for file systems with a data repository association created with .

    • AutoImportPolicy

    • ExportPath

    • ImportedFileChunkSize

    • ImportPath

    " @@ -1777,11 +1926,11 @@ "members":{ "Iops":{ "shape":"MetadataIops", - "documentation":"

    (USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for the file system. This parameter sets the maximum rate of metadata disk IOPS supported by the file system. Valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

    Iops doesn’t have a default value. If you're using USER_PROVISIONED mode, you can choose to specify a valid value. If you're using AUTOMATIC mode, you cannot specify a value because FSx for Lustre automatically sets the value based on your file system storage capacity.

    " + "documentation":"

    (USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for the file system. This parameter sets the maximum rate of metadata disk IOPS supported by the file system.

    • For SSD file systems, valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

    • For Intelligent-Tiering file systems, valid values are 6000 and 12000.

    Iops doesn’t have a default value. If you're using USER_PROVISIONED mode, you can choose to specify a valid value. If you're using AUTOMATIC mode, you cannot specify a value because FSx for Lustre automatically sets the value based on your file system storage capacity.

    " }, "Mode":{ "shape":"MetadataConfigurationMode", - "documentation":"

    The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type.

    • In AUTOMATIC mode, FSx for Lustre automatically provisions and scales the number of Metadata IOPS for your file system based on your file system storage capacity.

    • In USER_PROVISIONED mode, you specify the number of Metadata IOPS to provision for your file system.

    " + "documentation":"

    The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type.

    • In AUTOMATIC mode (supported only on SSD file systems), FSx for Lustre automatically provisions and scales the number of Metadata IOPS for your file system based on your file system storage capacity.

    • In USER_PROVISIONED mode, you specify the number of Metadata IOPS to provision for your file system.

    " } }, "documentation":"

    The Lustre metadata performance configuration for the creation of an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. The configuration uses a Metadata IOPS value to set the maximum rate of metadata disk IOPS supported by the file system.

    After creation, the file system supports increasing metadata performance. For more information on Metadata IOPS, see Lustre metadata performance configuration in the Amazon FSx for Lustre User Guide.

    " @@ -1855,7 +2004,7 @@ }, "ThroughputCapacity":{ "shape":"MegabytesPerSecond", - "documentation":"

    Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType you choose, as follows:

    • For MULTI_AZ_1 and SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps.

    You pay for additional throughput capacity that you provision.

    " + "documentation":"

    Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType that you choose, as follows:

    • For MULTI_AZ_1 and SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps.

    • For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps.

    You pay for additional throughput capacity that you provision.

    " }, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DiskIopsConfiguration":{"shape":"DiskIopsConfiguration"}, @@ -1869,7 +2018,7 @@ }, "EndpointIpAddressRange":{ "shape":"IpAddressRange", - "documentation":"

    (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.

    " + "documentation":"

    (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.

    " }, "RouteTableIds":{ "shape":"RouteTableIds", @@ -1904,7 +2053,7 @@ }, "StorageType":{ "shape":"StorageType", - "documentation":"

    Sets the storage class for the file system that you're creating. Valid values are SSD, HDD, and INTELLIGENT_TIERING.

    • Set to SSD to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.

    • Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types, and on PERSISTENT_1 Lustre file system deployment types.

    • Set to INTELLIGENT_TIERING to use fully elastic, intelligently-tiered storage. Intelligent-Tiering is only available for OpenZFS file systems with the Multi-AZ deployment type.

    Default value is SSD. For more information, see Storage type options in the FSx for Windows File Server User Guide, Multiple storage options in the FSx for Lustre User Guide, and Working with Intelligent-Tiering in the Amazon FSx for OpenZFS User Guide.

    " + "documentation":"

    Sets the storage class for the file system that you're creating. Valid values are SSD, HDD, and INTELLIGENT_TIERING.

    • Set to SSD to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types.

    • Set to HDD to use hard disk drive storage, which is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types, and on PERSISTENT_1 Lustre file system deployment types.

    • Set to INTELLIGENT_TIERING to use fully elastic, intelligently-tiered storage. Intelligent-Tiering is only available for OpenZFS file systems with the Multi-AZ deployment type and for Lustre file systems with the Persistent_2 deployment type.

    Default value is SSD. For more information, see Storage type options in the FSx for Windows File Server User Guide, FSx for Lustre storage classes in the FSx for Lustre User Guide, and Working with Intelligent-Tiering in the Amazon FSx for OpenZFS User Guide.

    " }, "SubnetIds":{ "shape":"SubnetIds", @@ -2097,7 +2246,7 @@ }, "CopyTagsToSnapshots":{ "shape":"Flag", - "documentation":"

    A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to false. If it's set to true, all tags for the volume are copied to snapshots where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to snapshots. If you specify one or more tags when creating the snapshot, no tags are copied from the volume, regardless of this value.

    " + "documentation":"

    A Boolean value indicating whether tags for the volume should be copied to snapshots. This value defaults to false. If this value is set to true, and you do not specify any tags, all tags for the original volume are copied over to snapshots. If this value is set to true, and you do specify one or more tags, only the specified tags for the original volume are copied over to snapshots. If you specify one or more tags when creating a new snapshot, no tags are copied over from the original volume, regardless of this value.

    " }, "OriginSnapshot":{ "shape":"CreateOpenZFSOriginSnapshotConfiguration", @@ -2381,7 +2530,7 @@ "documentation":"

    The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.

    " } }, - "documentation":"

    The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

    • CreateDataRepositoryAssociation

    • UpdateDataRepositoryAssociation

    • DescribeDataRepositoryAssociations

    Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and 2.15 file systems, excluding scratch_1 deployment type.

    " + "documentation":"

    The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

    • CreateDataRepositoryAssociation

    • UpdateDataRepositoryAssociation

    • DescribeDataRepositoryAssociations

    Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and 2.15 file systems, excluding Intelligent-Tiering and scratch_1 file systems.

    " }, "DataRepositoryAssociationId":{ "type":"string", @@ -3185,11 +3334,35 @@ }, "documentation":"

    The response object for DescribeFileSystems operation.

    " }, - "DescribeSharedVpcConfigurationRequest":{ + "DescribeS3AccessPointAttachmentsRequest":{ "type":"structure", "members":{ + "Names":{ + "shape":"S3AccessPointAttachmentNames", + "documentation":"

    The names of the S3 access point attachments whose descriptions you want to retrieve.

    " + }, + "Filters":{ + "shape":"S3AccessPointAttachmentsFilters", + "documentation":"

    Enter a filter Name and Values pair to view a select set of S3 access point attachments.

    " + }, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} } }, + "DescribeS3AccessPointAttachmentsResponse":{ + "type":"structure", + "members":{ + "S3AccessPointAttachments":{ + "shape":"S3AccessPointAttachments", + "documentation":"

    Array of S3 access point attachments returned after a successful DescribeS3AccessPointAttachments operation.

    " + }, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeSharedVpcConfigurationRequest":{ + "type":"structure", + "members":{} + }, "DescribeSharedVpcConfigurationResponse":{ "type":"structure", "members":{ @@ -3278,6 +3451,33 @@ "NextToken":{"shape":"NextToken"} } }, + "DetachAndDeleteS3AccessPointRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "Name":{ + "shape":"S3AccessPointAttachmentName", + "documentation":"

    The name of the S3 access point attachment that you want to delete.

    " + } + } + }, + "DetachAndDeleteS3AccessPointResponse":{ + "type":"structure", + "members":{ + "Lifecycle":{ + "shape":"S3AccessPointAttachmentLifecycle", + "documentation":"

    The lifecycle status of the S3 access point attachment.

    " + }, + "Name":{ + "shape":"S3AccessPointAttachmentName", + "documentation":"

    The name of the S3 access point attachment being deleted.

    " + } + } + }, "DirectoryId":{ "type":"string", "max":12, @@ -3378,6 +3578,11 @@ "documentation":"

    Defines the minimum amount of time since last access for a file to be eligible for release. Only files that have been exported to S3 and that were last accessed or modified before this point-in-time are eligible to be released from the Amazon FSx for Lustre file system.

    " }, "EndTime":{"type":"timestamp"}, + "ErrorCode":{ + "type":"string", + "max":128, + "min":1 + }, "ErrorMessage":{ "type":"string", "documentation":"

    A detailed error message.

    ", @@ -3668,7 +3873,7 @@ }, "StorageType":{ "shape":"StorageType", - "documentation":"

    The type of storage the file system is using. If set to SSD, the file system uses solid state drive storage. If set to HDD, the file system uses hard disk drive storage.

    " + "documentation":"

    The type of storage the file system is using.

    • If set to SSD, the file system uses solid state drive storage.

    • If set to HDD, the file system uses hard disk drive storage.

    • If set to INTELLIGENT_TIERING, the file system uses fully elastic, intelligently-tiered storage.

    " }, "VpcId":{ "shape":"VpcId", @@ -3763,6 +3968,11 @@ }, "documentation":"

    A structure providing details of any failures that occurred.

    " }, + "FileSystemGID":{ + "type":"long", + "max":4294967295, + "min":0 + }, "FileSystemId":{ "type":"string", "documentation":"

    The globally unique ID of the file system, assigned by Amazon FSx.

    ", @@ -3795,11 +4005,11 @@ "members":{ "Iops":{ "shape":"MetadataIops", - "documentation":"

    The number of Metadata IOPS provisioned for the file system. Valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

    " + "documentation":"

    The number of Metadata IOPS provisioned for the file system.

    • For SSD file systems, valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

    • For Intelligent-Tiering file systems, valid values are 6000 and 12000.

    " }, "Mode":{ "shape":"MetadataConfigurationMode", - "documentation":"

    The metadata configuration mode for provisioning Metadata IOPS for the file system.

    • In AUTOMATIC mode, FSx for Lustre automatically provisions and scales the number of Metadata IOPS on your file system based on your file system storage capacity.

    • In USER_PROVISIONED mode, you can choose to specify the number of Metadata IOPS to provision for your file system.

    " + "documentation":"

    The metadata configuration mode for provisioning Metadata IOPS for the file system.

    • In AUTOMATIC mode (supported only on SSD file systems), FSx for Lustre automatically provisions and scales the number of Metadata IOPS on your file system based on your file system storage capacity.

    • In USER_PROVISIONED mode, you can choose to specify the number of Metadata IOPS to provision for your file system.

    " } }, "documentation":"

    The Lustre metadata performance configuration of an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. The configuration enables the file system to support increasing metadata performance.

    " @@ -3826,9 +4036,14 @@ "documentation":"

    No Amazon FSx file systems were found based upon supplied parameters.

    ", "exception":true }, + "FileSystemSecondaryGIDs":{ + "type":"list", + "member":{"shape":"FileSystemGID"}, + "max":15 + }, "FileSystemType":{ "type":"string", - "documentation":"

    The type of file system.

    ", + "documentation":"

    The type of Amazon FSx file system.

    ", "enum":[ "WINDOWS", "LUSTRE", @@ -3842,6 +4057,11 @@ "min":1, "pattern":"^[0-9](.[0-9]*)*$" }, + "FileSystemUID":{ + "type":"long", + "max":4294967295, + "min":0 + }, "FileSystems":{ "type":"list", "member":{"shape":"FileSystem"}, @@ -3967,6 +4187,18 @@ "exception":true, "fault":true }, + "InvalidAccessPoint":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

    An error code indicating that the access point specified doesn't exist.

    " + }, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The access point specified doesn't exist.

    ", + "exception":true + }, "InvalidDataRepositoryType":{ "type":"structure", "members":{ @@ -4038,6 +4270,18 @@ "documentation":"

    The Region provided for SourceRegion is not valid or is in a different Amazon Web Services partition.

    ", "exception":true }, + "InvalidRequest":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

    An error code indicating that the action or operation requested is invalid.

    " + }, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The action or operation requested is invalid. Verify that the action is typed correctly.

    ", + "exception":true + }, "InvalidSourceKmsKey":{ "type":"structure", "members":{ @@ -4150,7 +4394,7 @@ "DataRepositoryConfiguration":{"shape":"DataRepositoryConfiguration"}, "DeploymentType":{ "shape":"LustreDeploymentType", - "documentation":"

    The deployment type of the FSx for Lustre file system. Scratch deployment type is designed for temporary storage and shorter-term processing of data.

    SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

    The PERSISTENT_1 and PERSISTENT_2 deployment type is used for longer-term storage and workloads and encryption of data in transit. PERSISTENT_2 offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see FSx for Lustre deployment options.

    The default is SCRATCH_1.

    " + "documentation":"

    The deployment type of the FSx for Lustre file system. Scratch deployment type is designed for temporary storage and shorter-term processing of data.

    SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1.

    The PERSISTENT_1 and PERSISTENT_2 deployment type is used for longer-term storage and workloads and encryption of data in transit. PERSISTENT_2 offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB) along with a lower minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see Deployment and storage class options for FSx for Lustre file systems.

    The default is SCRATCH_1.

    " }, "PerUnitStorageThroughput":{ "shape":"PerUnitStorageThroughput", @@ -4189,6 +4433,14 @@ "EfaEnabled":{ "shape":"Flag", "documentation":"

    Specifies whether Elastic Fabric Adapter (EFA) and GPUDirect Storage (GDS) support is enabled for the Amazon FSx for Lustre file system.

    " + }, + "ThroughputCapacity":{ + "shape":"ThroughputCapacityMbps", + "documentation":"

    The throughput of an Amazon FSx for Lustre file system using the Intelligent-Tiering storage class, measured in megabytes per second (MBps).

    " + }, + "DataReadCacheConfiguration":{ + "shape":"LustreReadCacheConfiguration", + "documentation":"

    Required when StorageType is set to INTELLIGENT_TIERING. Specifies the optional provisioned SSD read cache.

    " } }, "documentation":"

    The configuration for the Amazon FSx for Lustre file system.

    " @@ -4240,6 +4492,28 @@ "member":{"shape":"LustreNoSquashNid"}, "max":64 }, + "LustreReadCacheConfiguration":{ + "type":"structure", + "members":{ + "SizingMode":{ + "shape":"LustreReadCacheSizingMode", + "documentation":"

    Specifies how the provisioned SSD read cache is sized, as follows:

    • Set to NO_CACHE if you do not want to use an SSD read cache with your Intelligent-Tiering file system.

    • Set to USER_PROVISIONED to specify the exact size of your SSD read cache.

    • Set to PROPORTIONAL_TO_THROUGHPUT_CAPACITY to have your SSD read cache automatically sized based on your throughput capacity.

    " + }, + "SizeGiB":{ + "shape":"StorageCapacity", + "documentation":"

    Required if SizingMode is set to USER_PROVISIONED. Specifies the size of the file system's SSD read cache, in gibibytes (GiB).

    The SSD read cache size is distributed across provisioned file servers in your file system. Intelligent-Tiering file systems support a minimum of 32 GiB and maximum of 131072 GiB for SSD read cache size for every 4,000 MB/s of throughput capacity provisioned.

    " + } + }, + "documentation":"

    The configuration for the optional provisioned SSD read cache on Amazon FSx for Lustre file systems that use the Intelligent-Tiering storage class.

    " + }, + "LustreReadCacheSizingMode":{ + "type":"string", + "enum":[ + "NO_CACHE", + "USER_PROVISIONED", + "PROPORTIONAL_TO_THROUGHPUT_CAPACITY" + ] + }, "LustreRootSquash":{ "type":"string", "max":21, @@ -4657,6 +4931,25 @@ }, "documentation":"

    The configuration for the Amazon FSx for OpenZFS file system.

    " }, + "OpenZFSFileSystemIdentity":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"OpenZFSFileSystemUserType", + "documentation":"

    Specifies the FSx for OpenZFS user identity type, accepts only POSIX.

    " + }, + "PosixUser":{ + "shape":"OpenZFSPosixFileSystemUser", + "documentation":"

    Specifies the UID and GIDs of the file system POSIX user.

    " + } + }, + "documentation":"

    Specifies the file system user identity that will be used for authorizing all file access requests that are made using the S3 access point.

    " + }, + "OpenZFSFileSystemUserType":{ + "type":"string", + "enum":["POSIX"] + }, "OpenZFSNfsExport":{ "type":"structure", "required":["ClientConfigurations"], @@ -4696,6 +4989,28 @@ }, "documentation":"

    The snapshot configuration used when creating an Amazon FSx for OpenZFS volume from a snapshot.

    " }, + "OpenZFSPosixFileSystemUser":{ + "type":"structure", + "required":[ + "Uid", + "Gid" + ], + "members":{ + "Uid":{ + "shape":"FileSystemUID", + "documentation":"

    The UID of the file system user.

    " + }, + "Gid":{ + "shape":"FileSystemGID", + "documentation":"

    The GID of the file system user.

    " + }, + "SecondaryGids":{ + "shape":"FileSystemSecondaryGIDs", + "documentation":"

    The list of secondary GIDs for the file system user.

    " + } + }, + "documentation":"

    The FSx for OpenZFS file system user that is used for authorizing all file access requests that are made using the S3 access point.

    " + }, "OpenZFSQuotaType":{ "type":"string", "enum":[ @@ -4715,7 +5030,7 @@ "documentation":"

    Required if SizingMode is set to USER_PROVISIONED. Specifies the size of the file system's SSD read cache, in gibibytes (GiB).

    " } }, - "documentation":"

    The configuration for the optional provisioned SSD read cache on file systems that use the Intelligent-Tiering storage class.

    " + "documentation":"

    The configuration for the optional provisioned SSD read cache on Amazon FSx for OpenZFS file systems that use the Intelligent-Tiering storage class.

    " }, "OpenZFSReadCacheSizingMode":{ "type":"string", @@ -5049,6 +5364,158 @@ "member":{"shape":"RouteTableId"}, "max":50 }, + "S3AccessPoint":{ + "type":"structure", + "members":{ + "ResourceARN":{ + "shape":"GeneralARN", + "documentation":"

    he S3 access point's ARN.

    " + }, + "Alias":{ + "shape":"S3AccessPointAlias", + "documentation":"

    The S3 access point's alias.

    " + }, + "VpcConfiguration":{ + "shape":"S3AccessPointVpcConfiguration", + "documentation":"

    The S3 access point's virtual private cloud (VPC) configuration.

    " + } + }, + "documentation":"

    Describes the S3 access point configuration of the S3 access point attachment.

    " + }, + "S3AccessPointAlias":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^[0-9a-z\\\\-]{1,63}" + }, + "S3AccessPointAttachment":{ + "type":"structure", + "members":{ + "Lifecycle":{ + "shape":"S3AccessPointAttachmentLifecycle", + "documentation":"

    The lifecycle status of the S3 access point attachment. The lifecycle can have the following values:

    • AVAILABLE - the S3 access point attachment is available for use

    • CREATING - Amazon FSx is creating the S3 access point and attachment

    • DELETING - Amazon FSx is deleting the S3 access point and attachment

    • FAILED - The S3 access point attachment is in a failed state. Delete and detach the S3 access point attachment, and create a new one.

    • UPDATING - Amazon FSx is updating the S3 access point attachment

    " + }, + "LifecycleTransitionReason":{"shape":"LifecycleTransitionReason"}, + "CreationTime":{"shape":"CreationTime"}, + "Name":{ + "shape":"S3AccessPointAttachmentName", + "documentation":"

    The name of the S3 access point attachment; also used for the name of the S3 access point.

    " + }, + "Type":{ + "shape":"S3AccessPointAttachmentType", + "documentation":"

    The type of Amazon FSx volume that the S3 access point is attached to.

    " + }, + "OpenZFSConfiguration":{ + "shape":"S3AccessPointOpenZFSConfiguration", + "documentation":"

    The OpenZFSConfiguration of the S3 access point attachment.

    " + }, + "S3AccessPoint":{ + "shape":"S3AccessPoint", + "documentation":"

    The S3 access point configuration of the S3 access point attachment.

    " + } + }, + "documentation":"

    An S3 access point attached to an Amazon FSx volume.

    " + }, + "S3AccessPointAttachmentLifecycle":{ + "type":"string", + "enum":[ + "AVAILABLE", + "CREATING", + "DELETING", + "UPDATING", + "FAILED" + ] + }, + "S3AccessPointAttachmentName":{ + "type":"string", + "max":50, + "min":3, + "pattern":"^(?=[a-z0-9])[a-z0-9-]{1,48}[a-z0-9]$" + }, + "S3AccessPointAttachmentNames":{ + "type":"list", + "member":{"shape":"S3AccessPointAttachmentName"}, + "max":50 + }, + "S3AccessPointAttachmentNotFound":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    The access point specified was not found.

    ", + "exception":true + }, + "S3AccessPointAttachmentType":{ + "type":"string", + "enum":["OPENZFS"] + }, + "S3AccessPointAttachments":{ + "type":"list", + "member":{"shape":"S3AccessPointAttachment"}, + "max":1000 + }, + "S3AccessPointAttachmentsFilter":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"S3AccessPointAttachmentsFilterName", + "documentation":"

    The name of the filter.

    " + }, + "Values":{ + "shape":"S3AccessPointAttachmentsFilterValues", + "documentation":"

    The values of the filter.

    " + } + }, + "documentation":"

    A set of Name and Values pairs used to view a select set of S3 access point attachments.

    " + }, + "S3AccessPointAttachmentsFilterName":{ + "type":"string", + "enum":[ + "file-system-id", + "volume-id", + "type" + ] + }, + "S3AccessPointAttachmentsFilterValue":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[0-9a-zA-Z\\*\\.\\\\/\\?\\-\\_]*$" + }, + "S3AccessPointAttachmentsFilterValues":{ + "type":"list", + "member":{"shape":"S3AccessPointAttachmentsFilterValue"}, + "max":20 + }, + "S3AccessPointAttachmentsFilters":{ + "type":"list", + "member":{"shape":"S3AccessPointAttachmentsFilter"}, + "max":2 + }, + "S3AccessPointOpenZFSConfiguration":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"VolumeId", + "documentation":"

    The ID of the FSx for OpenZFS volume that the S3 access point is attached to.

    " + }, + "FileSystemIdentity":{ + "shape":"OpenZFSFileSystemIdentity", + "documentation":"

    The file system identity used to authorize file access requests made using the S3 access point.

    " + } + }, + "documentation":"

    Describes the FSx for OpenZFS attachment configuration of an S3 access point attachment.

    " + }, + "S3AccessPointVpcConfiguration":{ + "type":"structure", + "members":{ + "VpcId":{ + "shape":"VpcId", + "documentation":"

    Specifies the virtual private cloud (VPC) for the S3 access point VPC configuration, if one exists.

    " + } + }, + "documentation":"

    If included, Amazon S3 restricts access to this access point to requests from the specified virtual private cloud (VPC).

    " + }, "S3DataRepositoryConfiguration":{ "type":"structure", "members":{ @@ -5178,7 +5645,7 @@ }, "ServiceLimit":{ "type":"string", - "documentation":"

    The types of limits on your service utilization. Limits include file system count, total throughput capacity, total storage, and total user-initiated backups. These limits apply for a specific account in a specific Amazon Web Services Region. You can increase some of them by contacting Amazon Web Services Support.

    ", + "documentation":"

    The types of limits on your service utilization. Limits include file system count, total throughput capacity, total storage, and total user-initiated backups. These limits apply for a specific account in a specific Amazon Web Services Region. You can increase some of them by contacting Amazon Web ServicesSupport.

    ", "enum":[ "FILE_SYSTEM_COUNT", "TOTAL_THROUGHPUT_CAPACITY", @@ -5202,7 +5669,7 @@ }, "Message":{"shape":"ErrorMessage"} }, - "documentation":"

    An error indicating that a particular service limit was exceeded. You can increase some service limits by contacting Amazon Web Services Support.

    ", + "documentation":"

    An error indicating that a particular service limit was exceeded. You can increase some service limits by contacting Amazon Web ServicesSupport.

    ", "exception":true }, "SizeInBytes":{ @@ -5692,8 +6159,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response object for the TagResource operation.

    " }, "TagValue":{ @@ -5721,6 +6187,11 @@ "member":{"shape":"TaskId"}, "max":50 }, + "ThroughputCapacityMbps":{ + "type":"integer", + "max":2000000, + "min":4000 + }, "ThroughputCapacityPerHAPair":{ "type":"integer", "max":6144, @@ -5749,6 +6220,18 @@ "NONE" ] }, + "TooManyAccessPoints":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

    An error code indicating that you have reached the maximum number of S3 access points attachments allowed for your account in this Amazon Web Services Region, or for the file system.

    " + }, + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

    You have reached the maximum number of S3 access points attachments allowed for your account in this Amazon Web Services Region, or for the file system. For more information, or to request an increase, see Service quotas on FSx resources in the FSx for OpenZFS User Guide.

    ", + "exception":true + }, "TotalConstituents":{ "type":"integer", "max":200, @@ -5796,8 +6279,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

    The response object for UntagResource action.

    " }, "UpdateDataRepositoryAssociationRequest":{ @@ -5900,6 +6382,14 @@ "MetadataConfiguration":{ "shape":"UpdateFileSystemLustreMetadataConfiguration", "documentation":"

    The Lustre metadata performance configuration for an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. When this configuration is enabled, the file system supports increasing metadata performance.

    " + }, + "ThroughputCapacity":{ + "shape":"ThroughputCapacityMbps", + "documentation":"

    The throughput of an Amazon FSx for Lustre file system using an Intelligent-Tiering storage class, measured in megabytes per second (MBps). You can only increase your file system's throughput. Valid values are 4000 MBps or multiples of 4000 MBps.

    " + }, + "DataReadCacheConfiguration":{ + "shape":"LustreReadCacheConfiguration", + "documentation":"

    Specifies the optional provisioned SSD read cache on Amazon FSx for Lustre file systems that use the Intelligent-Tiering storage class.

    " } }, "documentation":"

    The configuration object for Amazon FSx for Lustre file systems used in the UpdateFileSystem operation.

    " @@ -5909,11 +6399,11 @@ "members":{ "Iops":{ "shape":"MetadataIops", - "documentation":"

    (USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for your file system. Valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

    The value you provide must be greater than or equal to the current number of Metadata IOPS provisioned for the file system.

    " + "documentation":"

    (USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for your file system.

    • For SSD file systems, valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000.

    • For Intelligent-Tiering file systems, valid values are 6000 and 12000.

    The value you provide must be greater than or equal to the current number of Metadata IOPS provisioned for the file system.

    " }, "Mode":{ "shape":"MetadataConfigurationMode", - "documentation":"

    The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type.

    • To increase the Metadata IOPS or to switch from AUTOMATIC mode, specify USER_PROVISIONED as the value for this parameter. Then use the Iops parameter to provide a Metadata IOPS value that is greater than or equal to the current number of Metadata IOPS provisioned for the file system.

    • To switch from USER_PROVISIONED mode, specify AUTOMATIC as the value for this parameter, but do not input a value for Iops.

      If you request to switch from USER_PROVISIONED to AUTOMATIC mode and the current Metadata IOPS value is greater than the automated default, FSx for Lustre rejects the request because downscaling Metadata IOPS is not supported.

    " + "documentation":"

    The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type.

    • To increase the Metadata IOPS or to switch an SSD file system from AUTOMATIC, specify USER_PROVISIONED as the value for this parameter. Then use the Iops parameter to provide a Metadata IOPS value that is greater than or equal to the current number of Metadata IOPS provisioned for the file system.

    • To switch from USER_PROVISIONED mode on an SSD file system, specify AUTOMATIC as the value for this parameter, but do not input a value for Iops.

      • If you request to switch from USER_PROVISIONED to AUTOMATIC mode and the current Metadata IOPS value is greater than the automated default, FSx for Lustre rejects the request because downscaling Metadata IOPS is not supported.

      • AUTOMATIC mode is not supported on Intelligent-Tiering file systems. For Intelligent-Tiering file systems, use USER_PROVISIONED mode.

    " } }, "documentation":"

    The Lustre metadata performance configuration update for an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. You can request an increase in your file system's Metadata IOPS and/or switch your file system's metadata configuration mode. For more information, see Managing metadata performance in the Amazon FSx for Lustre User Guide.

    " @@ -6475,7 +6965,7 @@ }, "WeeklyTime":{ "type":"string", - "documentation":"

    A recurring weekly time, in the format D:HH:MM.

    D is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see the ISO-8601 spec as described on Wikipedia.

    HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour.

    For example, 1:05:00 specifies maintenance at 5 AM Monday.

    ", + "documentation":"

    The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

    For example, 1:05:00 specifies maintenance at 5 AM Monday.

    ", "max":7, "min":7, "pattern":"^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$" diff --git a/services/gamelift/pom.xml b/services/gamelift/pom.xml index fcd8ee2caedb..959bdd8beb82 100644 --- a/services/gamelift/pom.xml +++ b/services/gamelift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT gamelift AWS Java SDK :: Services :: AWS GameLift diff --git a/services/gamelift/src/main/resources/codegen-resources/customization.config b/services/gamelift/src/main/resources/codegen-resources/customization.config index 48657632de66..b7c812e13681 100644 --- a/services/gamelift/src/main/resources/codegen-resources/customization.config +++ b/services/gamelift/src/main/resources/codegen-resources/customization.config @@ -25,6 +25,5 @@ "listAliases", "listBuilds", "listFleets" - ], - "enableFastUnmarshaller": true + ] } diff --git a/services/gamelift/src/main/resources/codegen-resources/service-2.json b/services/gamelift/src/main/resources/codegen-resources/service-2.json index a5d15c0ea0a4..bbd1ea239c14 100644 --- a/services/gamelift/src/main/resources/codegen-resources/service-2.json +++ b/services/gamelift/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

    When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in each ticket. Calls to this action are only valid for tickets that are in this status; calls for tickets not in this status result in an error.

    To register acceptance, specify the ticket ID, one or more players, and an acceptance response. When all players have accepted, Amazon GameLift advances the matchmaking tickets to status PLACING, and attempts to create a new game session for the match.

    If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. Each matchmaking ticket in the failed match is handled as follows:

    • If the ticket has one or more players who rejected the match or failed to respond, the ticket status is set CANCELLED and processing is terminated.

    • If all players in the ticket accepted the match, the ticket status is returned to SEARCHING to find a new match.

    Learn more

    Add FlexMatch to a game client

    FlexMatch events (reference)

    " + "documentation":"

    Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.

    When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE. This is a trigger for your game to get acceptance from all players in each ticket. Calls to this action are only valid for tickets that are in this status; calls for tickets not in this status result in an error.

    To register acceptance, specify the ticket ID, one or more players, and an acceptance response. When all players have accepted, Amazon GameLift Servers advances the matchmaking tickets to status PLACING, and attempts to create a new game session for the match.

    If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. Each matchmaking ticket in the failed match is handled as follows:

    • If the ticket has one or more players who rejected the match or failed to respond, the ticket status is set CANCELLED and processing is terminated.

    • If all players in the ticket accepted the match, the ticket status is returned to SEARCHING to find a new match.

    Learn more

    Add FlexMatch to a game client

    FlexMatch events (reference)

    " }, "ClaimGameServer":{ "name":"ClaimGameServer", @@ -46,7 +46,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Locates an available game server and temporarily reserves it to host gameplay and players. This operation is called from a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, Amazon GameLift FleetIQ locates an available game server, places it in CLAIMED status for 60 seconds, and returns connection information that players can use to connect to the game server.

    To claim a game server, identify a game server group. You can also specify a game server ID, although this approach bypasses Amazon GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or player information. Add filter options to further restrict how a game server is chosen, such as only allowing game servers on ACTIVE instances to be claimed.

    When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED after players join. If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request. The claim time period is a fixed value and is not configurable.

    If you try to claim a specific game server, this request will fail in the following cases:

    • If the game server utilization status is UTILIZED.

    • If the game server claim status is CLAIMED.

    • If the game server is running on an instance in DRAINING status and the provided filter option does not allow placing on DRAINING instances.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Locates an available game server and temporarily reserves it to host gameplay and players. This operation is called from a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, Amazon GameLift Servers FleetIQ locates an available game server, places it in CLAIMED status for 60 seconds, and returns connection information that players can use to connect to the game server.

    To claim a game server, identify a game server group. You can also specify a game server ID, although this approach bypasses Amazon GameLift Servers FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or player information. Add filter options to further restrict how a game server is chosen, such as only allowing game servers on ACTIVE instances to be claimed.

    When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED after players join. If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request. The claim time period is a fixed value and is not configurable.

    If you try to claim a specific game server, this request will fail in the following cases:

    • If the game server utilization status is UTILIZED.

    • If the game server claim status is CLAIMED.

    • If the game server is running on an instance in DRAINING status and the provided filter option does not allow placing on DRAINING instances.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "CreateAlias":{ "name":"CreateAlias", @@ -64,7 +64,7 @@ {"shape":"LimitExceededException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

    Creates an alias for a fleet. In most situations, you can use an alias ID in place of a fleet ID. An alias provides a level of abstraction for a fleet that is useful when redirecting player traffic from one fleet to another, such as when updating your game build.

    Amazon GameLift supports two types of routing strategies for aliases: simple and terminal. A simple alias points to an active fleet. A terminal alias is used to display messaging or link to a URL instead of routing players to an active fleet. For example, you might use a terminal alias when a game version is no longer supported and you want to direct players to an upgrade site.

    To create a fleet alias, specify an alias name, routing strategy, and optional description. Each simple alias can point to only one fleet, but a fleet can have multiple aliases. If successful, a new alias record is returned, including an alias ID and an ARN. You can reassign an alias to another fleet by calling UpdateAlias.

    Related actions

    All APIs by task

    " + "documentation":"

    Creates an alias for a fleet. In most situations, you can use an alias ID in place of a fleet ID. An alias provides a level of abstraction for a fleet that is useful when redirecting player traffic from one fleet to another, such as when updating your game build.

    Amazon GameLift Servers supports two types of routing strategies for aliases: simple and terminal. A simple alias points to an active fleet. A terminal alias is used to display messaging or link to a URL instead of routing players to an active fleet. For example, you might use a terminal alias when a game version is no longer supported and you want to direct players to an upgrade site.

    To create a fleet alias, specify an alias name, routing strategy, and optional description. Each simple alias can point to only one fleet, but a fleet can have multiple aliases. If successful, a new alias record is returned, including an alias ID and an ARN. You can reassign an alias to another fleet by calling UpdateAlias.

    Related actions

    All APIs by task

    " }, "CreateBuild":{ "name":"CreateBuild", @@ -81,7 +81,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Creates a new Amazon GameLift build resource for your game server binary files. Combine game server binaries into a zip file for use with Amazon GameLift.

    When setting up a new game build for Amazon GameLift, we recommend using the CLI command upload-build . This helper command combines two tasks: (1) it uploads your build files from a file directory to an Amazon GameLift Amazon S3 location, and (2) it creates a new build resource.

    You can use the CreateBuild operation in the following scenarios:

    • Create a new game build with build files that are in an Amazon S3 location under an Amazon Web Services account that you control. To use this option, you give Amazon GameLift access to the Amazon S3 bucket. With permissions in place, specify a build name, operating system, and the Amazon S3 storage location of your game build.

    • Upload your build files to a Amazon GameLift Amazon S3 location. To use this option, specify a build name and operating system. This operation creates a new build resource and also returns an Amazon S3 location with temporary access credentials. Use the credentials to manually upload your build files to the specified Amazon S3 location. For more information, see Uploading Objects in the Amazon S3 Developer Guide. After you upload build files to the Amazon GameLift Amazon S3 location, you can't update them.

    If successful, this operation creates a new build resource with a unique build ID and places it in INITIALIZED status. A build must be in READY status before you can create fleets with it.

    Learn more

    Uploading Your Game

    Create a Build with Files in Amazon S3

    All APIs by task

    " + "documentation":"

    Creates a new Amazon GameLift Servers build resource for your game server binary files. Combine game server binaries into a zip file for use with Amazon GameLift Servers.

    When setting up a new game build for Amazon GameLift Servers, we recommend using the CLI command upload-build . This helper command combines two tasks: (1) it uploads your build files from a file directory to an Amazon GameLift Servers Amazon S3 location, and (2) it creates a new build resource.

    You can use the CreateBuild operation in the following scenarios:

    • Create a new game build with build files that are in an Amazon S3 location under an Amazon Web Services account that you control. To use this option, you give Amazon GameLift Servers access to the Amazon S3 bucket. With permissions in place, specify a build name, operating system, and the Amazon S3 storage location of your game build.

    • Upload your build files to a Amazon GameLift Servers Amazon S3 location. To use this option, specify a build name and operating system. This operation creates a new build resource and also returns an Amazon S3 location with temporary access credentials. Use the credentials to manually upload your build files to the specified Amazon S3 location. For more information, see Uploading Objects in the Amazon S3 Developer Guide. After you upload build files to the Amazon GameLift Servers Amazon S3 location, you can't update them.

    If successful, this operation creates a new build resource with a unique build ID and places it in INITIALIZED status. A build must be in READY status before you can create fleets with it.

    Learn more

    Uploading Your Game

    Create a Build with Files in Amazon S3

    All APIs by task

    " }, "CreateContainerFleet":{ "name":"CreateContainerFleet", @@ -100,7 +100,7 @@ {"shape":"TaggingFailedException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Creates a managed fleet of Amazon Elastic Compute Cloud (Amazon EC2) instances to host your containerized game servers. Use this operation to define how to deploy a container architecture onto each fleet instance and configure fleet settings. You can create a container fleet in any Amazon Web Services Regions that Amazon GameLift supports for multi-location fleets. A container fleet can be deployed to a single location or multiple locations. Container fleets are deployed with Amazon Linux 2023 as the instance operating system.

    Define the fleet's container architecture using container group definitions. Each fleet can have one of the following container group types:

    • The game server container group runs your game server build and dependent software. Amazon GameLift deploys one or more replicas of this container group to each fleet instance. The number of replicas depends on the computing capabilities of the fleet instance in use.

    • An optional per-instance container group might be used to run other software that only needs to run once per instance, such as background services, logging, or test processes. One per-instance container group is deployed to each fleet instance.

    Each container group can include the definition for one or more containers. A container definition specifies a container image that is stored in an Amazon Elastic Container Registry (Amazon ECR) public or private repository.

    Request options

    Use this operation to make the following types of requests. Most fleet settings have default values, so you can create a working fleet with a minimal configuration and default values, which you can customize later.

    • Create a fleet with no container groups. You can configure a container fleet and then add container group definitions later. In this scenario, no fleet instances are deployed, and the fleet can't host game sessions until you add a game server container group definition. Provide the following required parameter values:

      • FleetRoleArn

    • Create a fleet with a game server container group. Provide the following required parameter values:

      • FleetRoleArn

      • GameServerContainerGroupDefinitionName

    • Create a fleet with a game server container group and a per-instance container group. Provide the following required parameter values:

      • FleetRoleArn

      • GameServerContainerGroupDefinitionName

      • PerInstanceContainerGroupDefinitionName

    Results

    If successful, this operation creates a new container fleet resource, places it in PENDING status, and initiates the fleet creation workflow. For fleets with container groups, this workflow starts a fleet deployment and transitions the status to ACTIVE. Fleets without a container group are placed in CREATED status.

    You can update most of the properties of a fleet, including container group definitions, and deploy the update across all fleet instances. Use a fleet update to deploy a new game server version update across the container fleet.

    " + "documentation":"

    Creates a managed fleet of Amazon Elastic Compute Cloud (Amazon EC2) instances to host your containerized game servers. Use this operation to define how to deploy a container architecture onto each fleet instance and configure fleet settings. You can create a container fleet in any Amazon Web Services Regions that Amazon GameLift Servers supports for multi-location fleets. A container fleet can be deployed to a single location or multiple locations. Container fleets are deployed with Amazon Linux 2023 as the instance operating system.

    Define the fleet's container architecture using container group definitions. Each fleet can have one of the following container group types:

    • The game server container group runs your game server build and dependent software. Amazon GameLift Servers deploys one or more replicas of this container group to each fleet instance. The number of replicas depends on the computing capabilities of the fleet instance in use.

    • An optional per-instance container group might be used to run other software that only needs to run once per instance, such as background services, logging, or test processes. One per-instance container group is deployed to each fleet instance.

    Each container group can include the definition for one or more containers. A container definition specifies a container image that is stored in an Amazon Elastic Container Registry (Amazon ECR) public or private repository.

    Request options

    Use this operation to make the following types of requests. Most fleet settings have default values, so you can create a working fleet with a minimal configuration and default values, which you can customize later.

    • Create a fleet with no container groups. You can configure a container fleet and then add container group definitions later. In this scenario, no fleet instances are deployed, and the fleet can't host game sessions until you add a game server container group definition. Provide the following required parameter values:

      • FleetRoleArn

    • Create a fleet with a game server container group. Provide the following required parameter values:

      • FleetRoleArn

      • GameServerContainerGroupDefinitionName

    • Create a fleet with a game server container group and a per-instance container group. Provide the following required parameter values:

      • FleetRoleArn

      • GameServerContainerGroupDefinitionName

      • PerInstanceContainerGroupDefinitionName

    Results

    If successful, this operation creates a new container fleet resource, places it in PENDING status, and initiates the fleet creation workflow. For fleets with container groups, this workflow starts a fleet deployment and transitions the status to ACTIVE. Fleets without a container group are placed in CREATED status.

    You can update most of the properties of a fleet, including container group definitions, and deploy the update across all fleet instances. Use a fleet update to deploy a new game server version update across the container fleet.

    " }, "CreateContainerGroupDefinition":{ "name":"CreateContainerGroupDefinition", @@ -119,7 +119,7 @@ {"shape":"UnauthorizedException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Creates a ContainerGroupDefinition that describes a set of containers for hosting your game server with Amazon GameLift managed containers hosting. An Amazon GameLift container group is similar to a container task or pod. Use container group definitions when you create a container fleet with CreateContainerFleet.

    A container group definition determines how Amazon GameLift deploys your containers to each instance in a container fleet. You can maintain multiple versions of a container group definition.

    There are two types of container groups:

    • A game server container group has the containers that run your game server application and supporting software. A game server container group can have these container types:

      • Game server container. This container runs your game server. You can define one game server container in a game server container group.

      • Support container. This container runs software in parallel with your game server. You can define up to 8 support containers in a game server group.

      When building a game server container group definition, you can choose to bundle your game server executable and all dependent software into a single game server container. Alternatively, you can separate the software into one game server container and one or more support containers.

      On a container fleet instance, a game server container group can be deployed multiple times (depending on the compute resources of the instance). This means that all containers in the container group are replicated together.

    • A per-instance container group has containers for processes that aren't replicated on a container fleet instance. This might include background services, logging, test processes, or processes that need to persist independently of the game server container group. When building a per-instance container group, you can define up to 10 support containers.

    This operation requires Identity and Access Management (IAM) permissions to access container images in Amazon ECR repositories. See IAM permissions for Amazon GameLift for help setting the appropriate permissions.

    Request options

    Use this operation to make the following types of requests. You can specify values for the minimum required parameters and customize optional values later.

    • Create a game server container group definition. Provide the following required parameter values:

      • Name

      • ContainerGroupType (GAME_SERVER)

      • OperatingSystem (omit to use default value)

      • TotalMemoryLimitMebibytes (omit to use default value)

      • TotalVcpuLimit (omit to use default value)

      • At least one GameServerContainerDefinition

        • ContainerName

        • ImageUrl

        • PortConfiguration

        • ServerSdkVersion (omit to use default value)

    • Create a per-instance container group definition. Provide the following required parameter values:

      • Name

      • ContainerGroupType (PER_INSTANCE)

      • OperatingSystem (omit to use default value)

      • TotalMemoryLimitMebibytes (omit to use default value)

      • TotalVcpuLimit (omit to use default value)

      • At least one SupportContainerDefinition

        • ContainerName

        • ImageUrl

    Results

    If successful, this request creates a ContainerGroupDefinition resource and assigns a unique ARN value. You can update most properties of a container group definition by calling UpdateContainerGroupDefinition, and optionally save the update as a new version.

    " + "documentation":"

    Creates a ContainerGroupDefinition that describes a set of containers for hosting your game server with Amazon GameLift Servers managed containers hosting. An Amazon GameLift Servers container group is similar to a container task or pod. Use container group definitions when you create a container fleet with CreateContainerFleet.

    A container group definition determines how Amazon GameLift Servers deploys your containers to each instance in a container fleet. You can maintain multiple versions of a container group definition.

    There are two types of container groups:

    • A game server container group has the containers that run your game server application and supporting software. A game server container group can have these container types:

      • Game server container. This container runs your game server. You can define one game server container in a game server container group.

      • Support container. This container runs software in parallel with your game server. You can define up to 8 support containers in a game server group.

      When building a game server container group definition, you can choose to bundle your game server executable and all dependent software into a single game server container. Alternatively, you can separate the software into one game server container and one or more support containers.

      On a container fleet instance, a game server container group can be deployed multiple times (depending on the compute resources of the instance). This means that all containers in the container group are replicated together.

    • A per-instance container group has containers for processes that aren't replicated on a container fleet instance. This might include background services, logging, test processes, or processes that need to persist independently of the game server container group. When building a per-instance container group, you can define up to 10 support containers.

    This operation requires Identity and Access Management (IAM) permissions to access container images in Amazon ECR repositories. See IAM permissions for Amazon GameLift Servers for help setting the appropriate permissions.

    Request options

    Use this operation to make the following types of requests. You can specify values for the minimum required parameters and customize optional values later.

    • Create a game server container group definition. Provide the following required parameter values:

      • Name

      • ContainerGroupType (GAME_SERVER)

      • OperatingSystem (omit to use default value)

      • TotalMemoryLimitMebibytes (omit to use default value)

      • TotalVcpuLimit (omit to use default value)

      • At least one GameServerContainerDefinition

        • ContainerName

        • ImageUrl

        • PortConfiguration

        • ServerSdkVersion (omit to use default value)

    • Create a per-instance container group definition. Provide the following required parameter values:

      • Name

      • ContainerGroupType (PER_INSTANCE)

      • OperatingSystem (omit to use default value)

      • TotalMemoryLimitMebibytes (omit to use default value)

      • TotalVcpuLimit (omit to use default value)

      • At least one SupportContainerDefinition

        • ContainerName

        • ImageUrl

    Results

    If successful, this request creates a ContainerGroupDefinition resource and assigns a unique ARN value. You can update most properties of a container group definition by calling UpdateContainerGroupDefinition, and optionally save the update as a new version.

    " }, "CreateFleet":{ "name":"CreateFleet", @@ -140,7 +140,7 @@ {"shape":"TaggingFailedException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Creates a fleet of compute resources to host your game servers. Use this operation to set up the following types of fleets based on compute type:

    Managed EC2 fleet

    An EC2 fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances. Your game server build is deployed to each fleet instance. Amazon GameLift manages the fleet's instances and controls the lifecycle of game server processes, which host game sessions for players. EC2 fleets can have instances in multiple locations. Each instance in the fleet is designated a Compute.

    To create an EC2 fleet, provide these required parameters:

    • Either BuildId or ScriptId

    • ComputeType set to EC2 (the default value)

    • EC2InboundPermissions

    • EC2InstanceType

    • FleetType

    • Name

    • RuntimeConfiguration with at least one ServerProcesses configuration

    If successful, this operation creates a new fleet resource and places it in NEW status while Amazon GameLift initiates the fleet creation workflow. To debug your fleet, fetch logs, view performance metrics or other actions on the fleet, create a development fleet with port 22/3389 open. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished.

    When the fleet status is ACTIVE, you can adjust capacity settings and turn autoscaling on/off for each location.

    Anywhere fleet

    An Anywhere fleet represents compute resources that are not owned or managed by Amazon GameLift. You might create an Anywhere fleet with your local machine for testing, or use one to host game servers with on-premises hardware or other game hosting solutions.

    To create an Anywhere fleet, provide these required parameters:

    • ComputeType set to ANYWHERE

    • Locations specifying a custom location

    • Name

    If successful, this operation creates a new fleet resource and places it in ACTIVE status. You can register computes with a fleet in ACTIVE status.

    Learn more

    Setting up fleets

    Debug fleet creation issues

    Multi-location fleets

    " + "documentation":"

    Creates a fleet of compute resources to host your game servers. Use this operation to set up the following types of fleets based on compute type:

    Managed EC2 fleet

    An EC2 fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances. Your game server build is deployed to each fleet instance. Amazon GameLift Servers manages the fleet's instances and controls the lifecycle of game server processes, which host game sessions for players. EC2 fleets can have instances in multiple locations. Each instance in the fleet is designated a Compute.

    To create an EC2 fleet, provide these required parameters:

    • Either BuildId or ScriptId

    • ComputeType set to EC2 (the default value)

    • EC2InboundPermissions

    • EC2InstanceType

    • FleetType

    • Name

    • RuntimeConfiguration with at least one ServerProcesses configuration

    If successful, this operation creates a new fleet resource and places it in NEW status while Amazon GameLift Servers initiates the fleet creation workflow. To debug your fleet, fetch logs, view performance metrics or other actions on the fleet, create a development fleet with port 22/3389 open. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished.

    When the fleet status is ACTIVE, you can adjust capacity settings and turn autoscaling on/off for each location.

    Anywhere fleet

    An Anywhere fleet represents compute resources that are not owned or managed by Amazon GameLift Servers. You might create an Anywhere fleet with your local machine for testing, or use one to host game servers with on-premises hardware or other game hosting solutions.

    To create an Anywhere fleet, provide these required parameters:

    • ComputeType set to ANYWHERE

    • Locations specifying a custom location

    • Name

    If successful, this operation creates a new fleet resource and places it in ACTIVE status. You can register computes with a fleet in ACTIVE status.

    Learn more

    Setting up fleets

    Debug fleet creation issues

    Multi-location fleets

    " }, "CreateFleetLocations":{ "name":"CreateFleetLocations", @@ -161,7 +161,7 @@ {"shape":"ConflictException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Adds remote locations to an EC2 and begins populating the new locations with instances. The new instances conform to the fleet's instance type, auto-scaling, and other configuration settings.

    You can't add remote locations to a fleet that resides in an Amazon Web Services Region that doesn't support multiple locations. Fleets created prior to March 2021 can't support multiple locations.

    To add fleet locations, specify the fleet to be updated and provide a list of one or more locations.

    If successful, this operation returns the list of added locations with their status set to NEW. Amazon GameLift initiates the process of starting an instance in each added location. You can track the status of each new location by monitoring location creation events using DescribeFleetEvents.

    Learn more

    Setting up fleets

    Update fleet locations

    Amazon GameLift service locations for managed hosting.

    " + "documentation":"

    Adds remote locations to an EC2 and begins populating the new locations with instances. The new instances conform to the fleet's instance type, auto-scaling, and other configuration settings.

    You can't add remote locations to a fleet that resides in an Amazon Web Services Region that doesn't support multiple locations. Fleets created prior to March 2021 can't support multiple locations.

    To add fleet locations, specify the fleet to be updated and provide a list of one or more locations.

    If successful, this operation returns the list of added locations with their status set to NEW. Amazon GameLift Servers initiates the process of starting an instance in each added location. You can track the status of each new location by monitoring location creation events using DescribeFleetEvents.

    Learn more

    Setting up fleets

    Update fleet locations

    Amazon GameLift Servers service locations for managed hosting.

    " }, "CreateGameServerGroup":{ "name":"CreateGameServerGroup", @@ -178,7 +178,7 @@ {"shape":"InternalServiceException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Creates a Amazon GameLift FleetIQ game server group for managing game hosting on a collection of Amazon Elastic Compute Cloud instances for game hosting. This operation creates the game server group, creates an Auto Scaling group in your Amazon Web Services account, and establishes a link between the two groups. You can view the status of your game server groups in the Amazon GameLift console. Game server group metrics and events are emitted to Amazon CloudWatch.

    Before creating a new game server group, you must have the following:

    • An Amazon Elastic Compute Cloud launch template that specifies how to launch Amazon Elastic Compute Cloud instances with your game server build. For more information, see Launching an Instance from a Launch Template in the Amazon Elastic Compute Cloud User Guide.

    • An IAM role that extends limited access to your Amazon Web Services account to allow Amazon GameLift FleetIQ to create and interact with the Auto Scaling group. For more information, see Create IAM roles for cross-service interaction in the Amazon GameLift FleetIQ Developer Guide.

    To create a new game server group, specify a unique group name, IAM role and Amazon Elastic Compute Cloud launch template, and provide a list of instance types that can be used in the group. You must also set initial maximum and minimum limits on the group's instance count. You can optionally set an Auto Scaling policy with target tracking based on a Amazon GameLift FleetIQ metric.

    Once the game server group and corresponding Auto Scaling group are created, you have full access to change the Auto Scaling group's configuration as needed. Several properties that are set when creating a game server group, including maximum/minimum size and auto-scaling policy settings, must be updated directly in the Auto Scaling group. Keep in mind that some Auto Scaling group properties are periodically updated by Amazon GameLift FleetIQ as part of its balancing activities to optimize for availability and cost.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Creates a Amazon GameLift Servers FleetIQ game server group for managing game hosting on a collection of Amazon Elastic Compute Cloud instances for game hosting. This operation creates the game server group, creates an Auto Scaling group in your Amazon Web Services account, and establishes a link between the two groups. You can view the status of your game server groups in the Amazon GameLift Servers console. Game server group metrics and events are emitted to Amazon CloudWatch.

    Before creating a new game server group, you must have the following:

    • An Amazon Elastic Compute Cloud launch template that specifies how to launch Amazon Elastic Compute Cloud instances with your game server build. For more information, see Launching an Instance from a Launch Template in the Amazon Elastic Compute Cloud User Guide.

    • An IAM role that extends limited access to your Amazon Web Services account to allow Amazon GameLift Servers FleetIQ to create and interact with the Auto Scaling group. For more information, see Create IAM roles for cross-service interaction in the Amazon GameLift Servers FleetIQ Developer Guide.

    To create a new game server group, specify a unique group name, IAM role and Amazon Elastic Compute Cloud launch template, and provide a list of instance types that can be used in the group. You must also set initial maximum and minimum limits on the group's instance count. You can optionally set an Auto Scaling policy with target tracking based on a Amazon GameLift Servers FleetIQ metric.

    Once the game server group and corresponding Auto Scaling group are created, you have full access to change the Auto Scaling group's configuration as needed. Several properties that are set when creating a game server group, including maximum/minimum size and auto-scaling policy settings, must be updated directly in the Auto Scaling group. Keep in mind that some Auto Scaling group properties are periodically updated by Amazon GameLift Servers FleetIQ as part of its balancing activities to optimize for availability and cost.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "CreateGameSession":{ "name":"CreateGameSession", @@ -201,7 +201,7 @@ {"shape":"IdempotentParameterMismatchException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Creates a multiplayer game session for players in a specific fleet location. This operation prompts an available server process to start a game session and retrieves connection information for the new game session. As an alternative, consider using the Amazon GameLift game session placement feature with StartGameSessionPlacement, which uses the FleetIQ algorithm and queues to optimize the placement process.

    When creating a game session, you specify exactly where you want to place it and provide a set of game session configuration settings. The target fleet must be in ACTIVE status.

    You can use this operation in the following ways:

    • To create a game session on an instance in a fleet's home Region, provide a fleet or alias ID along with your game session configuration.

    • To create a game session on an instance in a fleet's remote location, provide a fleet or alias ID and a location name, along with your game session configuration.

    • To create a game session on an instance in an Anywhere fleet, specify the fleet's custom location.

    If successful, Amazon GameLift initiates a workflow to start a new game session and returns a GameSession object containing the game session configuration and status. When the game session status is ACTIVE, it is updated with connection information and you can create player sessions for the game session. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.

    Amazon GameLift retains logs for active for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

    Available in Amazon GameLift Local.

    Learn more

    Start a game session

    All APIs by task

    " + "documentation":"

    Creates a multiplayer game session for players in a specific fleet location. This operation prompts an available server process to start a game session and retrieves connection information for the new game session. As an alternative, consider using the Amazon GameLift Servers game session placement feature with StartGameSessionPlacement, which uses the FleetIQ algorithm and queues to optimize the placement process.

    When creating a game session, you specify exactly where you want to place it and provide a set of game session configuration settings. The target fleet must be in ACTIVE status.

    You can use this operation in the following ways:

    • To create a game session on an instance in a fleet's home Region, provide a fleet or alias ID along with your game session configuration.

    • To create a game session on an instance in a fleet's remote location, provide a fleet or alias ID and a location name, along with your game session configuration.

    • To create a game session on an instance in an Anywhere fleet, specify the fleet's custom location.

    If successful, Amazon GameLift Servers initiates a workflow to start a new game session and returns a GameSession object containing the game session configuration and status. When the game session status is ACTIVE, it is updated with connection information and you can create player sessions for the game session. By default, newly created game sessions are open to new players. You can restrict new player access by using UpdateGameSession to change the game session's player session creation policy.

    Amazon GameLift Servers retains logs for active for 14 days. To access the logs, call GetGameSessionLogUrl to download the log files.

    Available in Amazon GameLift Servers Local.

    Learn more

    Start a game session

    All APIs by task

    " }, "CreateGameSessionQueue":{ "name":"CreateGameSessionQueue", @@ -219,7 +219,7 @@ {"shape":"NotFoundException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

    Creates a placement queue that processes requests for new game sessions. A queue uses FleetIQ algorithms to locate the best available placement locations for a new game session, and then prompts the game server process to start a new game session.

    A game session queue is configured with a set of destinations (Amazon GameLift fleets or aliases) that determine where the queue can place new game sessions. These destinations can span multiple Amazon Web Services Regions, can use different instance types, and can include both Spot and On-Demand fleets. If the queue includes multi-location fleets, the queue can place game sessions in any of a fleet's remote locations.

    You can configure a queue to determine how it selects the best available placement for a new game session. Queues can prioritize placement decisions based on a combination of location, hosting cost, and player latency. You can set up the queue to use the default prioritization or provide alternate instructions using PriorityConfiguration.

    Request options

    Use this operation to make these common types of requests.

    • Create a queue with the minimum required parameters.

      • Name

      • Destinations (This parameter isn't required, but a queue can't make placements without at least one destination.)

    • Create a queue with placement notification. Queues that have high placement activity must use a notification system, such as with Amazon Simple Notification Service (Amazon SNS) or Amazon CloudWatch.

      • Required parameters Name and Destinations

      • NotificationTarget

    • Create a queue with custom prioritization settings. These custom settings replace the default prioritization configuration for a queue.

      • Required parameters Name and Destinations

      • PriorityConfiguration

    • Create a queue with special rules for processing player latency data.

      • Required parameters Name and Destinations

      • PlayerLatencyPolicies

    Results

    If successful, this operation returns a new GameSessionQueue object with an assigned queue ARN. Use the queue's name or ARN when submitting new game session requests with StartGameSessionPlacement or StartMatchmaking.

    Learn more

    Design a game session queue

    Create a game session queue

    Related actions

    CreateGameSessionQueue | DescribeGameSessionQueues | UpdateGameSessionQueue | DeleteGameSessionQueue | All APIs by task

    " + "documentation":"

    Creates a placement queue that processes requests for new game sessions. A queue uses FleetIQ algorithms to locate the best available placement locations for a new game session, and then prompts the game server process to start a new game session.

    A game session queue is configured with a set of destinations (Amazon GameLift Servers fleets or aliases) that determine where the queue can place new game sessions. These destinations can span multiple Amazon Web Services Regions, can use different instance types, and can include both Spot and On-Demand fleets. If the queue includes multi-location fleets, the queue can place game sessions in any of a fleet's remote locations.

    You can configure a queue to determine how it selects the best available placement for a new game session. Queues can prioritize placement decisions based on a combination of location, hosting cost, and player latency. You can set up the queue to use the default prioritization or provide alternate instructions using PriorityConfiguration.

    Request options

    Use this operation to make these common types of requests.

    • Create a queue with the minimum required parameters.

      • Name

      • Destinations (This parameter isn't required, but a queue can't make placements without at least one destination.)

    • Create a queue with placement notification. Queues that have high placement activity must use a notification system, such as with Amazon Simple Notification Service (Amazon SNS) or Amazon CloudWatch.

      • Required parameters Name and Destinations

      • NotificationTarget

    • Create a queue with custom prioritization settings. These custom settings replace the default prioritization configuration for a queue.

      • Required parameters Name and Destinations

      • PriorityConfiguration

    • Create a queue with special rules for processing player latency data.

      • Required parameters Name and Destinations

      • PlayerLatencyPolicies

    Results

    If successful, this operation returns a new GameSessionQueue object with an assigned queue ARN. Use the queue's name or ARN when submitting new game session requests with StartGameSessionPlacement or StartMatchmaking.

    Learn more

    Design a game session queue

    Create a game session queue

    Related actions

    CreateGameSessionQueue | DescribeGameSessionQueues | UpdateGameSessionQueue | DeleteGameSessionQueue | All APIs by task

    " }, "CreateLocation":{ "name":"CreateLocation", @@ -255,7 +255,7 @@ {"shape":"UnsupportedRegionException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

    Defines a new matchmaking configuration for use with FlexMatch. Whether your are using FlexMatch with Amazon GameLift hosting or as a standalone matchmaking service, the matchmaking configuration sets out rules for matching players and forming teams. If you're also using Amazon GameLift hosting, it defines how to start game sessions for each match. Your matchmaking system can use multiple configurations to handle different game scenarios. All matchmaking requests identify the matchmaking configuration to use and provide player attributes consistent with that configuration.

    To create a matchmaking configuration, you must provide the following: configuration name and FlexMatch mode (with or without Amazon GameLift hosting); a rule set that specifies how to evaluate players and find acceptable matches; whether player acceptance is required; and the maximum time allowed for a matchmaking attempt. When using FlexMatch with Amazon GameLift hosting, you also need to identify the game session queue to use when starting a game session for the match.

    In addition, you must set up an Amazon Simple Notification Service topic to receive matchmaking notifications. Provide the topic ARN in the matchmaking configuration.

    Learn more

    Design a FlexMatch matchmaker

    Set up FlexMatch event notification

    " + "documentation":"

    Defines a new matchmaking configuration for use with FlexMatch. Whether your are using FlexMatch with Amazon GameLift Servers hosting or as a standalone matchmaking service, the matchmaking configuration sets out rules for matching players and forming teams. If you're also using Amazon GameLift Servers hosting, it defines how to start game sessions for each match. Your matchmaking system can use multiple configurations to handle different game scenarios. All matchmaking requests identify the matchmaking configuration to use and provide player attributes consistent with that configuration.

    To create a matchmaking configuration, you must provide the following: configuration name and FlexMatch mode (with or without Amazon GameLift Servers hosting); a rule set that specifies how to evaluate players and find acceptable matches; whether player acceptance is required; and the maximum time allowed for a matchmaking attempt. When using FlexMatch with Amazon GameLift Servers hosting, you also need to identify the game session queue to use when starting a game session for the match.

    In addition, you must set up an Amazon Simple Notification Service topic to receive matchmaking notifications. Provide the topic ARN in the matchmaking configuration.

    Learn more

    Design a FlexMatch matchmaker

    Set up FlexMatch event notification

    " }, "CreateMatchmakingRuleSet":{ "name":"CreateMatchmakingRuleSet", @@ -291,7 +291,7 @@ {"shape":"InvalidRequestException"}, {"shape":"NotFoundException"} ], - "documentation":"

    Reserves an open player slot in a game session for a player. New player sessions can be created in any game session with an open slot that is in ACTIVE status and has a player creation policy of ACCEPT_ALL. You can add a group of players to a game session with CreatePlayerSessions .

    To create a player session, specify a game session ID, player ID, and optionally a set of player data.

    If successful, a slot is reserved in the game session for the player and a new PlayerSessions object is returned with a player session ID. The player references the player session ID when sending a connection request to the game session, and the game server can use it to validate the player reservation with the Amazon GameLift service. Player sessions cannot be updated.

    The maximum number of players per game session is 200. It is not adjustable.

    Related actions

    All APIs by task

    " + "documentation":"

    Reserves an open player slot in a game session for a player. New player sessions can be created in any game session with an open slot that is in ACTIVE status and has a player creation policy of ACCEPT_ALL. You can add a group of players to a game session with CreatePlayerSessions .

    To create a player session, specify a game session ID, player ID, and optionally a set of player data.

    If successful, a slot is reserved in the game session for the player and a new PlayerSessions object is returned with a player session ID. The player references the player session ID when sending a connection request to the game session, and the game server can use it to validate the player reservation with the Amazon GameLift Servers service. Player sessions cannot be updated.

    The maximum number of players per game session is 200. It is not adjustable.

    Related actions

    All APIs by task

    " }, "CreatePlayerSessions":{ "name":"CreatePlayerSessions", @@ -310,7 +310,7 @@ {"shape":"InvalidRequestException"}, {"shape":"NotFoundException"} ], - "documentation":"

    Reserves open slots in a game session for a group of players. New player sessions can be created in any game session with an open slot that is in ACTIVE status and has a player creation policy of ACCEPT_ALL. To add a single player to a game session, use CreatePlayerSession

    To create player sessions, specify a game session ID and a list of player IDs. Optionally, provide a set of player data for each player ID.

    If successful, a slot is reserved in the game session for each player, and new PlayerSession objects are returned with player session IDs. Each player references their player session ID when sending a connection request to the game session, and the game server can use it to validate the player reservation with the Amazon GameLift service. Player sessions cannot be updated.

    The maximum number of players per game session is 200. It is not adjustable.

    Related actions

    All APIs by task

    " + "documentation":"

    Reserves open slots in a game session for a group of players. New player sessions can be created in any game session with an open slot that is in ACTIVE status and has a player creation policy of ACCEPT_ALL. To add a single player to a game session, use CreatePlayerSession

    To create player sessions, specify a game session ID and a list of player IDs. Optionally, provide a set of player data for each player ID.

    If successful, a slot is reserved in the game session for each player, and new PlayerSession objects are returned with player session IDs. Each player references their player session ID when sending a connection request to the game session, and the game server can use it to validate the player reservation with the Amazon GameLift Servers service. Player sessions cannot be updated.

    The maximum number of players per game session is 200. It is not adjustable.

    Related actions

    All APIs by task

    " }, "CreateScript":{ "name":"CreateScript", @@ -327,7 +327,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Creates a new script record for your Amazon GameLift Realtime script. Realtime scripts are JavaScript that provide configuration settings and optional custom game logic for your game. The script is deployed when you create a Amazon GameLift Realtime fleet to host your game sessions. Script logic is executed during an active game session.

    To create a new script record, specify a script name and provide the script file(s). The script files and all dependencies must be zipped into a single file. You can pull the zip file from either of these locations:

    • A locally available directory. Use the ZipFile parameter for this option.

    • An Amazon Simple Storage Service (Amazon S3) bucket under your Amazon Web Services account. Use the StorageLocation parameter for this option. You'll need to have an Identity Access Management (IAM) role that allows the Amazon GameLift service to access your S3 bucket.

    If the call is successful, a new script record is created with a unique script ID. If the script file is provided as a local file, the file is uploaded to an Amazon GameLift-owned S3 bucket and the script record's storage location reflects this location. If the script file is provided as an S3 bucket, Amazon GameLift accesses the file at this storage location as needed for deployment.

    Learn more

    Amazon GameLift Amazon GameLift Realtime

    Set Up a Role for Amazon GameLift Access

    Related actions

    All APIs by task

    " + "documentation":"

    Creates a new script record for your Amazon GameLift Servers Realtime script. Realtime scripts are JavaScript that provide configuration settings and optional custom game logic for your game. The script is deployed when you create a Amazon GameLift Servers Realtime fleet to host your game sessions. Script logic is executed during an active game session.

    To create a new script record, specify a script name and provide the script file(s). The script files and all dependencies must be zipped into a single file. You can pull the zip file from either of these locations:

    • A locally available directory. Use the ZipFile parameter for this option.

    • An Amazon Simple Storage Service (Amazon S3) bucket under your Amazon Web Services account. Use the StorageLocation parameter for this option. You'll need to have an Identity Access Management (IAM) role that allows the Amazon GameLift Servers service to access your S3 bucket.

    If the call is successful, a new script record is created with a unique script ID. If the script file is provided as a local file, the file is uploaded to an Amazon GameLift Servers-owned S3 bucket and the script record's storage location reflects this location. If the script file is provided as an S3 bucket, Amazon GameLift Servers accesses the file at this storage location as needed for deployment.

    Learn more

    Amazon GameLift Servers Amazon GameLift Servers Realtime

    Set Up a Role for Amazon GameLift Servers Access

    Related actions

    All APIs by task

    " }, "CreateVpcPeeringAuthorization":{ "name":"CreateVpcPeeringAuthorization", @@ -343,7 +343,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Requests authorization to create or delete a peer connection between the VPC for your Amazon GameLift fleet and a virtual private cloud (VPC) in your Amazon Web Services account. VPC peering enables the game servers on your fleet to communicate directly with other Amazon Web Services resources. After you've received authorization, use CreateVpcPeeringConnection to establish the peering connection. For more information, see VPC Peering with Amazon GameLift Fleets.

    You can peer with VPCs that are owned by any Amazon Web Services account you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different Regions.

    To request authorization to create a connection, call this operation from the Amazon Web Services account with the VPC that you want to peer to your Amazon GameLift fleet. For example, to enable your game servers to retrieve data from a DynamoDB table, use the account that manages that DynamoDB resource. Identify the following values: (1) The ID of the VPC that you want to peer with, and (2) the ID of the Amazon Web Services account that you use to manage Amazon GameLift. If successful, VPC peering is authorized for the specified VPC.

    To request authorization to delete a connection, call this operation from the Amazon Web Services account with the VPC that is peered with your Amazon GameLift fleet. Identify the following values: (1) VPC ID that you want to delete the peering connection for, and (2) ID of the Amazon Web Services account that you use to manage Amazon GameLift.

    The authorization remains valid for 24 hours unless it is canceled. You must create or delete the peering connection while the authorization is valid.

    Related actions

    All APIs by task

    " + "documentation":"

    Requests authorization to create or delete a peer connection between the VPC for your Amazon GameLift Servers fleet and a virtual private cloud (VPC) in your Amazon Web Services account. VPC peering enables the game servers on your fleet to communicate directly with other Amazon Web Services resources. After you've received authorization, use CreateVpcPeeringConnection to establish the peering connection. For more information, see VPC Peering with Amazon GameLift Servers Fleets.

    You can peer with VPCs that are owned by any Amazon Web Services account you have access to, including the account that you use to manage your Amazon GameLift Servers fleets. You cannot peer with VPCs that are in different Regions.

    To request authorization to create a connection, call this operation from the Amazon Web Services account with the VPC that you want to peer to your Amazon GameLift Servers fleet. For example, to enable your game servers to retrieve data from a DynamoDB table, use the account that manages that DynamoDB resource. Identify the following values: (1) The ID of the VPC that you want to peer with, and (2) the ID of the Amazon Web Services account that you use to manage Amazon GameLift Servers. If successful, VPC peering is authorized for the specified VPC.

    To request authorization to delete a connection, call this operation from the Amazon Web Services account with the VPC that is peered with your Amazon GameLift Servers fleet. Identify the following values: (1) VPC ID that you want to delete the peering connection for, and (2) ID of the Amazon Web Services account that you use to manage Amazon GameLift Servers.

    The authorization remains valid for 24 hours unless it is canceled. You must create or delete the peering connection while the authorization is valid.

    Related actions

    All APIs by task

    " }, "CreateVpcPeeringConnection":{ "name":"CreateVpcPeeringConnection", @@ -359,7 +359,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Establishes a VPC peering connection between a virtual private cloud (VPC) in an Amazon Web Services account with the VPC for your Amazon GameLift fleet. VPC peering enables the game servers on your fleet to communicate directly with other Amazon Web Services resources. You can peer with VPCs in any Amazon Web Services account that you have access to, including the account that you use to manage your Amazon GameLift fleets. You cannot peer with VPCs that are in different Regions. For more information, see VPC Peering with Amazon GameLift Fleets.

    Before calling this operation to establish the peering connection, you first need to use CreateVpcPeeringAuthorization and identify the VPC you want to peer with. Once the authorization for the specified VPC is issued, you have 24 hours to establish the connection. These two operations handle all tasks necessary to peer the two VPCs, including acceptance, updating routing tables, etc.

    To establish the connection, call this operation from the Amazon Web Services account that is used to manage the Amazon GameLift fleets. Identify the following values: (1) The ID of the fleet you want to be enable a VPC peering connection for; (2) The Amazon Web Services account with the VPC that you want to peer with; and (3) The ID of the VPC you want to peer with. This operation is asynchronous. If successful, a connection request is created. You can use continuous polling to track the request's status using DescribeVpcPeeringConnections , or by monitoring fleet events for success or failure using DescribeFleetEvents .

    Related actions

    All APIs by task

    " + "documentation":"

    Establishes a VPC peering connection between a virtual private cloud (VPC) in an Amazon Web Services account with the VPC for your Amazon GameLift Servers fleet. VPC peering enables the game servers on your fleet to communicate directly with other Amazon Web Services resources. You can peer with VPCs in any Amazon Web Services account that you have access to, including the account that you use to manage your Amazon GameLift Servers fleets. You cannot peer with VPCs that are in different Regions. For more information, see VPC Peering with Amazon GameLift Servers Fleets.

    Before calling this operation to establish the peering connection, you first need to use CreateVpcPeeringAuthorization and identify the VPC you want to peer with. Once the authorization for the specified VPC is issued, you have 24 hours to establish the connection. These two operations handle all tasks necessary to peer the two VPCs, including acceptance, updating routing tables, etc.

    To establish the connection, call this operation from the Amazon Web Services account that is used to manage the Amazon GameLift Servers fleets. Identify the following values: (1) The ID of the fleet you want to be enable a VPC peering connection for; (2) The Amazon Web Services account with the VPC that you want to peer with; and (3) The ID of the VPC you want to peer with. This operation is asynchronous. If successful, a connection request is created. You can use continuous polling to track the request's status using DescribeVpcPeeringConnections , or by monitoring fleet events for success or failure using DescribeFleetEvents .

    Related actions

    All APIs by task

    " }, "DeleteAlias":{ "name":"DeleteAlias", @@ -409,7 +409,7 @@ {"shape":"TaggingFailedException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Deletes all resources and information related to a container fleet and shuts down currently running fleet instances, including those in remote locations. The container fleet must be in ACTIVE status to be deleted.

    To delete a fleet, specify the fleet ID to be terminated. During the deletion process, the fleet status is changed to DELETING.

    Learn more

    Setting up Amazon GameLift Fleets

    " + "documentation":"

    Deletes all resources and information related to a container fleet and shuts down currently running fleet instances, including those in remote locations. The container fleet must be in ACTIVE status to be deleted.

    To delete a fleet, specify the fleet ID to be terminated. During the deletion process, the fleet status is changed to DELETING.

    Learn more

    Setting up Amazon GameLift Servers Fleets

    " }, "DeleteContainerGroupDefinition":{ "name":"DeleteContainerGroupDefinition", @@ -427,7 +427,7 @@ {"shape":"UnauthorizedException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Deletes a container group definition.

    Request options:

    • Delete an entire container group definition, including all versions. Specify the container group definition name, or use an ARN value without the version number.

    • Delete a particular version. Specify the container group definition name and a version number, or use an ARN value that includes the version number.

    • Keep the newest versions and delete all older versions. Specify the container group definition name and the number of versions to retain. For example, set VersionCountToRetain to 5 to delete all but the five most recent versions.

    Result

    If successful, Amazon GameLift removes the container group definition versions that you request deletion for. This request will fail for any requested versions if the following is true:

    • If the version is being used in an active fleet

    • If the version is being deployed to a fleet in a deployment that's currently in progress.

    • If the version is designated as a rollback definition in a fleet deployment that's currently in progress.

    Learn more

    " + "documentation":"

    Deletes a container group definition.

    Request options:

    • Delete an entire container group definition, including all versions. Specify the container group definition name, or use an ARN value without the version number.

    • Delete a particular version. Specify the container group definition name and a version number, or use an ARN value that includes the version number.

    • Keep the newest versions and delete all older versions. Specify the container group definition name and the number of versions to retain. For example, set VersionCountToRetain to 5 to delete all but the five most recent versions.

    Result

    If successful, Amazon GameLift Servers removes the container group definition versions that you request deletion for. This request will fail for any requested versions if the following is true:

    • If the version is being used in an active fleet

    • If the version is being deployed to a fleet in a deployment that's currently in progress.

    • If the version is designated as a rollback definition in a fleet deployment that's currently in progress.

    Learn more

    " }, "DeleteFleet":{ "name":"DeleteFleet", @@ -444,7 +444,7 @@ {"shape":"InvalidRequestException"}, {"shape":"TaggingFailedException"} ], - "documentation":"

    Deletes all resources and information related to a fleet and shuts down any currently running fleet instances, including those in remote locations.

    If the fleet being deleted has a VPC peering connection, you first need to get a valid authorization (good for 24 hours) by calling CreateVpcPeeringAuthorization. You don't need to explicitly delete the VPC peering connection.

    To delete a fleet, specify the fleet ID to be terminated. During the deletion process, the fleet status is changed to DELETING. When completed, the status switches to TERMINATED and the fleet event FLEET_DELETED is emitted.

    Learn more

    Setting up Amazon GameLift Fleets

    " + "documentation":"

    Deletes all resources and information related to a fleet and shuts down any currently running fleet instances, including those in remote locations.

    If the fleet being deleted has a VPC peering connection, you first need to get a valid authorization (good for 24 hours) by calling CreateVpcPeeringAuthorization. You don't need to explicitly delete the VPC peering connection.

    To delete a fleet, specify the fleet ID to be terminated. During the deletion process, the fleet status is changed to DELETING. When completed, the status switches to TERMINATED and the fleet event FLEET_DELETED is emitted.

    Learn more

    Setting up Amazon GameLift Servers Fleets

    " }, "DeleteFleetLocations":{ "name":"DeleteFleetLocations", @@ -461,7 +461,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Removes locations from a multi-location fleet. When deleting a location, all game server process and all instances that are still active in the location are shut down.

    To delete fleet locations, identify the fleet ID and provide a list of the locations to be deleted.

    If successful, GameLift sets the location status to DELETING, and begins to shut down existing server processes and terminate instances in each location being deleted. When completed, the location status changes to TERMINATED.

    Learn more

    Setting up Amazon GameLift fleets

    " + "documentation":"

    Removes locations from a multi-location fleet. When deleting a location, all game server process and all instances that are still active in the location are shut down.

    To delete fleet locations, identify the fleet ID and provide a list of the locations to be deleted.

    If successful, GameLift sets the location status to DELETING, and begins to shut down existing server processes and terminate instances in each location being deleted. When completed, the location status changes to TERMINATED.

    Learn more

    Setting up Amazon GameLift Servers fleets

    " }, "DeleteGameServerGroup":{ "name":"DeleteGameServerGroup", @@ -477,7 +477,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Terminates a game server group and permanently deletes the game server group record. You have several options for how these resources are impacted when deleting the game server group. Depending on the type of delete operation selected, this operation might affect these resources:

    • The game server group

    • The corresponding Auto Scaling group

    • All game servers that are currently running in the group

    To delete a game server group, identify the game server group to delete and specify the type of delete operation to initiate. Game server groups can only be deleted if they are in ACTIVE or ERROR status.

    If the delete request is successful, a series of operations are kicked off. The game server group status is changed to DELETE_SCHEDULED, which prevents new game servers from being registered and stops automatic scaling activity. Once all game servers in the game server group are deregistered, Amazon GameLift FleetIQ can begin deleting resources. If any of the delete operations fail, the game server group is placed in ERROR status.

    Amazon GameLift FleetIQ emits delete events to Amazon CloudWatch.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Terminates a game server group and permanently deletes the game server group record. You have several options for how these resources are impacted when deleting the game server group. Depending on the type of delete operation selected, this operation might affect these resources:

    • The game server group

    • The corresponding Auto Scaling group

    • All game servers that are currently running in the group

    To delete a game server group, identify the game server group to delete and specify the type of delete operation to initiate. Game server groups can only be deleted if they are in ACTIVE or ERROR status.

    If the delete request is successful, a series of operations are kicked off. The game server group status is changed to DELETE_SCHEDULED, which prevents new game servers from being registered and stops automatic scaling activity. Once all game servers in the game server group are deregistered, Amazon GameLift Servers FleetIQ can begin deleting resources. If any of the delete operations fail, the game server group is placed in ERROR status.

    Amazon GameLift Servers FleetIQ emits delete events to Amazon CloudWatch.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "DeleteGameSessionQueue":{ "name":"DeleteGameSessionQueue", @@ -560,7 +560,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Deletes a fleet scaling policy. Once deleted, the policy is no longer in force and Amazon GameLift removes all record of it. To delete a scaling policy, specify both the scaling policy name and the fleet ID it is associated with.

    To temporarily suspend scaling policies, use StopFleetActions. This operation suspends all policies for the fleet.

    " + "documentation":"

    Deletes a fleet scaling policy. Once deleted, the policy is no longer in force and Amazon GameLift Servers removes all record of it. To delete a scaling policy, specify both the scaling policy name and the fleet ID it is associated with.

    To temporarily suspend scaling policies, use StopFleetActions. This operation suspends all policies for the fleet.

    " }, "DeleteScript":{ "name":"DeleteScript", @@ -576,7 +576,7 @@ {"shape":"TaggingFailedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Deletes a Realtime script. This operation permanently deletes the script record. If script files were uploaded, they are also deleted (files stored in an S3 bucket are not deleted).

    To delete a script, specify the script ID. Before deleting a script, be sure to terminate all fleets that are deployed with the script being deleted. Fleet instances periodically check for script updates, and if the script record no longer exists, the instance will go into an error state and be unable to host game sessions.

    Learn more

    Amazon GameLift Amazon GameLift Realtime

    Related actions

    All APIs by task

    " + "documentation":"

    Deletes a Realtime script. This operation permanently deletes the script record. If script files were uploaded, they are also deleted (files stored in an S3 bucket are not deleted).

    To delete a script, specify the script ID. Before deleting a script, be sure to terminate all fleets that are deployed with the script being deleted. Fleet instances periodically check for script updates, and if the script record no longer exists, the instance will go into an error state and be unable to host game sessions.

    Learn more

    Amazon GameLift Servers Amazon GameLift Servers Realtime

    Related actions

    All APIs by task

    " }, "DeleteVpcPeeringAuthorization":{ "name":"DeleteVpcPeeringAuthorization", @@ -608,7 +608,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Removes a VPC peering connection. To delete the connection, you must have a valid authorization for the VPC peering connection that you want to delete..

    Once a valid authorization exists, call this operation from the Amazon Web Services account that is used to manage the Amazon GameLift fleets. Identify the connection to delete by the connection ID and fleet ID. If successful, the connection is removed.

    Related actions

    All APIs by task

    " + "documentation":"

    Removes a VPC peering connection. To delete the connection, you must have a valid authorization for the VPC peering connection that you want to delete..

    Once a valid authorization exists, call this operation from the Amazon Web Services account that is used to manage the Amazon GameLift Servers fleets. Identify the connection to delete by the connection ID and fleet ID. If successful, the connection is removed.

    Related actions

    All APIs by task

    " }, "DeregisterCompute":{ "name":"DeregisterCompute", @@ -624,7 +624,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Removes a compute resource from an Anywhere fleet. Deregistered computes can no longer host game sessions through Amazon GameLift. Use this operation with an Anywhere fleet that doesn't use the Amazon GameLift Agent For Anywhere fleets with the Agent, the Agent handles all compute registry tasks for you.

    To deregister a compute, call this operation from the compute that's being deregistered and specify the compute name and the fleet ID.

    " + "documentation":"

    Removes a compute resource from an Anywhere fleet. Deregistered computes can no longer host game sessions through Amazon GameLift Servers. Use this operation with an Anywhere fleet that doesn't use the Amazon GameLift Servers Agent For Anywhere fleets with the Agent, the Agent handles all compute registry tasks for you.

    To deregister a compute, call this operation from the compute that's being deregistered and specify the compute name and the fleet ID.

    " }, "DeregisterGameServer":{ "name":"DeregisterGameServer", @@ -639,7 +639,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Removes the game server from a game server group. As a result of this operation, the deregistered game server can no longer be claimed and will not be returned in a list of active game servers.

    To deregister a game server, specify the game server group and game server ID. If successful, this operation emits a CloudWatch event with termination timestamp and reason.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Removes the game server from a game server group. As a result of this operation, the deregistered game server can no longer be claimed and will not be returned in a list of active game servers.

    To deregister a game server, specify the game server group and game server ID. If successful, this operation emits a CloudWatch event with termination timestamp and reason.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "DescribeAlias":{ "name":"DescribeAlias", @@ -688,7 +688,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves properties for a specific compute resource in an Amazon GameLift fleet. You can list all computes in a fleet by calling ListCompute.

    Request options

    Provide the fleet ID and compute name. The compute name varies depending on the type of fleet.

    • For a compute in a managed EC2 fleet, provide an instance ID. Each instance in the fleet is a compute.

    • For a compute in a managed container fleet, provide a compute name. In a container fleet, each game server container group on a fleet instance is assigned a compute name.

    • For a compute in an Anywhere fleet, provide a registered compute name. Anywhere fleet computes are created when you register a hosting resource with the fleet.

    Results

    If successful, this operation returns details for the requested compute resource. Depending on the fleet's compute type, the result includes the following information:

    • For a managed EC2 fleet, this operation returns information about the EC2 instance.

    • For an Anywhere fleet, this operation returns information about the registered compute.

    " + "documentation":"

    Retrieves properties for a specific compute resource in an Amazon GameLift Servers fleet. You can list all computes in a fleet by calling ListCompute.

    Request options

    Provide the fleet ID and compute name. The compute name varies depending on the type of fleet.

    • For a compute in a managed EC2 fleet, provide an instance ID. Each instance in the fleet is a compute.

    • For a compute in a managed container fleet, provide a compute name. In a container fleet, each game server container group on a fleet instance is assigned a compute name.

    • For a compute in an Anywhere fleet, provide a registered compute name. Anywhere fleet computes are created when you register a hosting resource with the fleet.

    Results

    If successful, this operation returns details for the requested compute resource. Depending on the fleet's compute type, the result includes the following information:

    • For a managed EC2 fleet, this operation returns information about the EC2 instance.

    • For an Anywhere fleet, this operation returns information about the registered compute.

    " }, "DescribeContainerFleet":{ "name":"DescribeContainerFleet", @@ -738,7 +738,7 @@ {"shape":"UnauthorizedException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves the instance limits and current utilization for an Amazon Web Services Region or location. Instance limits control the number of instances, per instance type, per location, that your Amazon Web Services account can use. Learn more at Amazon EC2 Instance Types. The information returned includes the maximum number of instances allowed and your account's current usage across all fleets. This information can affect your ability to scale your Amazon GameLift fleets. You can request a limit increase for your account by using the Service limits page in the Amazon GameLift console.

    Instance limits differ based on whether the instances are deployed in a fleet's home Region or in a remote location. For remote locations, limits also differ based on the combination of home Region and remote location. All requests must specify an Amazon Web Services Region (either explicitly or as your default settings). To get the limit for a remote location, you must also specify the location. For example, the following requests all return different results:

    • Request specifies the Region ap-northeast-1 with no location. The result is limits and usage data on all instance types that are deployed in us-east-2, by all of the fleets that reside in ap-northeast-1.

    • Request specifies the Region us-east-1 with location ca-central-1. The result is limits and usage data on all instance types that are deployed in ca-central-1, by all of the fleets that reside in us-east-2. These limits do not affect fleets in any other Regions that deploy instances to ca-central-1.

    • Request specifies the Region eu-west-1 with location ca-central-1. The result is limits and usage data on all instance types that are deployed in ca-central-1, by all of the fleets that reside in eu-west-1.

    This operation can be used in the following ways:

    • To get limit and usage data for all instance types that are deployed in an Amazon Web Services Region by fleets that reside in the same Region: Specify the Region only. Optionally, specify a single instance type to retrieve information for.

    • To get limit and usage data for all instance types that are deployed to a remote location by fleets that reside in different Amazon Web Services Region: Provide both the Amazon Web Services Region and the remote location. Optionally, specify a single instance type to retrieve information for.

    If successful, an EC2InstanceLimits object is returned with limits and usage data for each requested instance type.

    Learn more

    Setting up Amazon GameLift fleets

    " + "documentation":"

    Retrieves the instance limits and current utilization for an Amazon Web Services Region or location. Instance limits control the number of instances, per instance type, per location, that your Amazon Web Services account can use. Learn more at Amazon EC2 Instance Types. The information returned includes the maximum number of instances allowed and your account's current usage across all fleets. This information can affect your ability to scale your Amazon GameLift Servers fleets. You can request a limit increase for your account by using the Service limits page in the Amazon GameLift Servers console.

    Instance limits differ based on whether the instances are deployed in a fleet's home Region or in a remote location. For remote locations, limits also differ based on the combination of home Region and remote location. All requests must specify an Amazon Web Services Region (either explicitly or as your default settings). To get the limit for a remote location, you must also specify the location. For example, the following requests all return different results:

    • Request specifies the Region ap-northeast-1 with no location. The result is limits and usage data on all instance types that are deployed in us-east-2, by all of the fleets that reside in ap-northeast-1.

    • Request specifies the Region us-east-1 with location ca-central-1. The result is limits and usage data on all instance types that are deployed in ca-central-1, by all of the fleets that reside in us-east-2. These limits do not affect fleets in any other Regions that deploy instances to ca-central-1.

    • Request specifies the Region eu-west-1 with location ca-central-1. The result is limits and usage data on all instance types that are deployed in ca-central-1, by all of the fleets that reside in eu-west-1.

    This operation can be used in the following ways:

    • To get limit and usage data for all instance types that are deployed in an Amazon Web Services Region by fleets that reside in the same Region: Specify the Region only. Optionally, specify a single instance type to retrieve information for.

    • To get limit and usage data for all instance types that are deployed to a remote location by fleets that reside in different Amazon Web Services Region: Provide both the Amazon Web Services Region and the remote location. Optionally, specify a single instance type to retrieve information for.

    If successful, an EC2InstanceLimits object is returned with limits and usage data for each requested instance type.

    Learn more

    Setting up Amazon GameLift Servers fleets

    " }, "DescribeFleetAttributes":{ "name":"DescribeFleetAttributes", @@ -754,7 +754,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

    Retrieves core fleet-wide properties for fleets in an Amazon Web Services Region. Properties include the computing hardware and deployment configuration for instances in the fleet.

    You can use this operation in the following ways:

    • To get attributes for specific fleets, provide a list of fleet IDs or fleet ARNs.

    • To get attributes for all fleets, do not provide a fleet identifier.

    When requesting attributes for multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a FleetAttributes object is returned for each fleet requested, unless the fleet identifier is not found.

    Some API operations limit the number of fleet IDs that allowed in one request. If a request exceeds this limit, the request fails and the error message contains the maximum allowed number.

    Learn more

    Setting up Amazon GameLift fleets

    " + "documentation":"

    Retrieves core fleet-wide properties for fleets in an Amazon Web Services Region. Properties include the computing hardware and deployment configuration for instances in the fleet.

    You can use this operation in the following ways:

    • To get attributes for specific fleets, provide a list of fleet IDs or fleet ARNs.

    • To get attributes for all fleets, do not provide a fleet identifier.

    When requesting attributes for multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a FleetAttributes object is returned for each fleet requested, unless the fleet identifier is not found.

    Some API operations limit the number of fleet IDs that allowed in one request. If a request exceeds this limit, the request fails and the error message contains the maximum allowed number.

    Learn more

    Setting up Amazon GameLift Servers fleets

    " }, "DescribeFleetCapacity":{ "name":"DescribeFleetCapacity", @@ -771,7 +771,7 @@ {"shape":"UnauthorizedException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves the resource capacity settings for one or more fleets. For a container fleet, this operation also returns counts for game server container groups.

    With multi-location fleets, this operation retrieves data for the fleet's home Region only. To retrieve capacity for remote locations, see https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html.

    This operation can be used in the following ways:

    • To get capacity data for one or more specific fleets, provide a list of fleet IDs or fleet ARNs.

    • To get capacity data for all fleets, do not provide a fleet identifier.

    When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a FleetCapacity object is returned for each requested fleet ID. Each FleetCapacity object includes a Location property, which is set to the fleet's home Region. Capacity values are returned only for fleets that currently exist.

    Some API operations may limit the number of fleet IDs that are allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

    Learn more

    Setting up Amazon GameLift fleets

    GameLift metrics for fleets

    " + "documentation":"

    Retrieves the resource capacity settings for one or more fleets. For a container fleet, this operation also returns counts for game server container groups.

    With multi-location fleets, this operation retrieves data for the fleet's home Region only. To retrieve capacity for remote locations, see https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html.

    This operation can be used in the following ways:

    • To get capacity data for one or more specific fleets, provide a list of fleet IDs or fleet ARNs.

    • To get capacity data for all fleets, do not provide a fleet identifier.

    When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a FleetCapacity object is returned for each requested fleet ID. Each FleetCapacity object includes a Location property, which is set to the fleet's home Region. Capacity values are returned only for fleets that currently exist.

    Some API operations may limit the number of fleet IDs that are allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

    Learn more

    Setting up Amazon GameLift Servers fleets

    GameLift metrics for fleets

    " }, "DescribeFleetDeployment":{ "name":"DescribeFleetDeployment", @@ -805,7 +805,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves entries from a fleet's event log. Fleet events are initiated by changes in status, such as during fleet creation and termination, changes in capacity, etc. If a fleet has multiple locations, events are also initiated by changes to status and capacity in remote locations.

    You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a collection of event log entries matching the request are returned.

    Learn more

    Setting up Amazon GameLift fleets

    " + "documentation":"

    Retrieves entries from a fleet's event log. Fleet events are initiated by changes in status, such as during fleet creation and termination, changes in capacity, etc. If a fleet has multiple locations, events are also initiated by changes to status and capacity in remote locations.

    You can specify a time range to limit the result set. Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a collection of event log entries matching the request are returned.

    Learn more

    Setting up Amazon GameLift Servers fleets

    " }, "DescribeFleetLocationAttributes":{ "name":"DescribeFleetLocationAttributes", @@ -822,7 +822,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves information on a fleet's remote locations, including life-cycle status and any suspended fleet activity.

    This operation can be used in the following ways:

    • To get data for specific locations, provide a fleet identifier and a list of locations. Location data is returned in the order that it is requested.

    • To get data for all locations, provide a fleet identifier only. Location data is returned in no particular order.

    When requesting attributes for multiple locations, use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a LocationAttributes object is returned for each requested location. If the fleet does not have a requested location, no information is returned. This operation does not return the home Region. To get information on a fleet's home Region, call DescribeFleetAttributes.

    Learn more

    Setting up Amazon GameLift fleets

    Amazon GameLift service locations for managed hosting

    " + "documentation":"

    Retrieves information on a fleet's remote locations, including life-cycle status and any suspended fleet activity.

    This operation can be used in the following ways:

    • To get data for specific locations, provide a fleet identifier and a list of locations. Location data is returned in the order that it is requested.

    • To get data for all locations, provide a fleet identifier only. Location data is returned in no particular order.

    When requesting attributes for multiple locations, use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a LocationAttributes object is returned for each requested location. If the fleet does not have a requested location, no information is returned. This operation does not return the home Region. To get information on a fleet's home Region, call DescribeFleetAttributes.

    Learn more

    Setting up Amazon GameLift Servers fleets

    Amazon GameLift Servers service locations for managed hosting

    " }, "DescribeFleetLocationCapacity":{ "name":"DescribeFleetLocationCapacity", @@ -839,7 +839,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves the resource capacity settings for a fleet location. The data returned includes the current capacity (number of EC2 instances) and some scaling settings for the requested fleet location. For a managed container fleet, this operation also returns counts for game server container groups.

    Use this operation to retrieve capacity information for a fleet's remote location or home Region (you can also retrieve home Region capacity by calling DescribeFleetCapacity).

    To retrieve capacity data, identify a fleet and location.

    If successful, a FleetCapacity object is returned for the requested fleet location.

    Learn more

    Setting up Amazon GameLift fleets

    Amazon GameLift service locations for managed hosting

    GameLift metrics for fleets

    " + "documentation":"

    Retrieves the resource capacity settings for a fleet location. The data returned includes the current capacity (number of EC2 instances) and some scaling settings for the requested fleet location. For a managed container fleet, this operation also returns counts for game server container groups.

    Use this operation to retrieve capacity information for a fleet's remote location or home Region (you can also retrieve home Region capacity by calling DescribeFleetCapacity).

    To retrieve capacity data, identify a fleet and location.

    If successful, a FleetCapacity object is returned for the requested fleet location.

    Learn more

    Setting up Amazon GameLift Servers fleets

    Amazon GameLift Servers service locations for managed hosting

    GameLift metrics for fleets

    " }, "DescribeFleetLocationUtilization":{ "name":"DescribeFleetLocationUtilization", @@ -856,7 +856,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves current usage data for a fleet location. Utilization data provides a snapshot of current game hosting activity at the requested location. Use this operation to retrieve utilization information for a fleet's remote location or home Region (you can also retrieve home Region utilization by calling DescribeFleetUtilization).

    To retrieve utilization data, identify a fleet and location.

    If successful, a FleetUtilization object is returned for the requested fleet location.

    Learn more

    Setting up Amazon GameLift fleets

    Amazon GameLift service locations for managed hosting

    GameLift metrics for fleets

    " + "documentation":"

    Retrieves current usage data for a fleet location. Utilization data provides a snapshot of current game hosting activity at the requested location. Use this operation to retrieve utilization information for a fleet's remote location or home Region (you can also retrieve home Region utilization by calling DescribeFleetUtilization).

    To retrieve utilization data, identify a fleet and location.

    If successful, a FleetUtilization object is returned for the requested fleet location.

    Learn more

    Setting up Amazon GameLift Servers fleets

    Amazon GameLift Servers service locations for managed hosting

    GameLift metrics for fleets

    " }, "DescribeFleetPortSettings":{ "name":"DescribeFleetPortSettings", @@ -873,7 +873,7 @@ {"shape":"UnauthorizedException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves a fleet's inbound connection permissions. Connection permissions specify IP addresses and port settings that incoming traffic can use to access server processes in the fleet. Game server processes that are running in the fleet must use a port that falls within this range.

    Use this operation in the following ways:

    • To retrieve the port settings for a fleet, identify the fleet's unique identifier.

    • To check the status of recent updates to a fleet remote location, specify the fleet ID and a location. Port setting updates can take time to propagate across all locations.

    If successful, a set of IpPermission objects is returned for the requested fleet ID. When specifying a location, this operation returns a pending status. If the requested fleet has been deleted, the result set is empty.

    Learn more

    Setting up Amazon GameLift fleets

    " + "documentation":"

    Retrieves a fleet's inbound connection permissions. Connection permissions specify IP addresses and port settings that incoming traffic can use to access server processes in the fleet. Game server processes that are running in the fleet must use a port that falls within this range.

    Use this operation in the following ways:

    • To retrieve the port settings for a fleet, identify the fleet's unique identifier.

    • To check the status of recent updates to a fleet remote location, specify the fleet ID and a location. Port setting updates can take time to propagate across all locations.

    If successful, a set of IpPermission objects is returned for the requested fleet ID. When specifying a location, this operation returns a pending status. If the requested fleet has been deleted, the result set is empty.

    Learn more

    Setting up Amazon GameLift Servers fleets

    " }, "DescribeFleetUtilization":{ "name":"DescribeFleetUtilization", @@ -889,7 +889,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

    Retrieves utilization statistics for one or more fleets. Utilization data provides a snapshot of how the fleet's hosting resources are currently being used. For fleets with remote locations, this operation retrieves data for the fleet's home Region only. See DescribeFleetLocationUtilization to get utilization statistics for a fleet's remote locations.

    This operation can be used in the following ways:

    • To get utilization data for one or more specific fleets, provide a list of fleet IDs or fleet ARNs.

    • To get utilization data for all fleets, do not provide a fleet identifier.

    When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a FleetUtilization object is returned for each requested fleet ID, unless the fleet identifier is not found. Each fleet utilization object includes a Location property, which is set to the fleet's home Region.

    Some API operations may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

    Learn more

    Setting up Amazon GameLift Fleets

    GameLift Metrics for Fleets

    " + "documentation":"

    Retrieves utilization statistics for one or more fleets. Utilization data provides a snapshot of how the fleet's hosting resources are currently being used. For fleets with remote locations, this operation retrieves data for the fleet's home Region only. See DescribeFleetLocationUtilization to get utilization statistics for a fleet's remote locations.

    This operation can be used in the following ways:

    • To get utilization data for one or more specific fleets, provide a list of fleet IDs or fleet ARNs.

    • To get utilization data for all fleets, do not provide a fleet identifier.

    When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a FleetUtilization object is returned for each requested fleet ID, unless the fleet identifier is not found. Each fleet utilization object includes a Location property, which is set to the fleet's home Region.

    Some API operations may limit the number of fleet IDs allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed.

    Learn more

    Setting up Amazon GameLift Servers Fleets

    GameLift Metrics for Fleets

    " }, "DescribeGameServer":{ "name":"DescribeGameServer", @@ -905,7 +905,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Retrieves information for a registered game server. Information includes game server status, health check info, and the instance that the game server is running on.

    To retrieve game server information, specify the game server ID. If successful, the requested game server object is returned.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Retrieves information for a registered game server. Information includes game server status, health check info, and the instance that the game server is running on.

    To retrieve game server information, specify the game server ID. If successful, the requested game server object is returned.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "DescribeGameServerGroup":{ "name":"DescribeGameServerGroup", @@ -921,7 +921,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Retrieves information on a game server group. This operation returns only properties related to Amazon GameLift FleetIQ. To view or update properties for the corresponding Auto Scaling group, such as launch template, auto scaling policies, and maximum/minimum group size, access the Auto Scaling group directly.

    To get attributes for a game server group, provide a group name or ARN value. If successful, a GameServerGroup object is returned.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Retrieves information on a game server group. This operation returns only properties related to Amazon GameLift Servers FleetIQ. To view or update properties for the corresponding Auto Scaling group, such as launch template, auto scaling policies, and maximum/minimum group size, access the Auto Scaling group directly.

    To get attributes for a game server group, provide a group name or ARN value. If successful, a GameServerGroup object is returned.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "DescribeGameServerInstances":{ "name":"DescribeGameServerInstances", @@ -937,7 +937,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Retrieves status information about the Amazon EC2 instances associated with a Amazon GameLift FleetIQ game server group. Use this operation to detect when instances are active or not available to host new game servers.

    To request status for all instances in the game server group, provide a game server group ID only. To request status for specific instances, provide the game server group ID and one or more instance IDs. Use the pagination parameters to retrieve results in sequential segments. If successful, a collection of GameServerInstance objects is returned.

    This operation is not designed to be called with every game server claim request; this practice can cause you to exceed your API limit, which results in errors. Instead, as a best practice, cache the results and refresh your cache no more than once every 10 seconds.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Retrieves status information about the Amazon EC2 instances associated with a Amazon GameLift Servers FleetIQ game server group. Use this operation to detect when instances are active or not available to host new game servers.

    To request status for all instances in the game server group, provide a game server group ID only. To request status for specific instances, provide the game server group ID and one or more instance IDs. Use the pagination parameters to retrieve results in sequential segments. If successful, a collection of GameServerInstance objects is returned.

    This operation is not designed to be called with every game server claim request; this practice can cause you to exceed your API limit, which results in errors. Instead, as a best practice, cache the results and refresh your cache no more than once every 10 seconds.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "DescribeGameSessionDetails":{ "name":"DescribeGameSessionDetails", @@ -1005,7 +1005,7 @@ {"shape":"TerminalRoutingStrategyException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves a set of one or more game sessions in a specific fleet location. You can optionally filter the results by current game session status.

    This operation can be used in the following ways:

    • To retrieve all game sessions that are currently running on all locations in a fleet, provide a fleet or alias ID, with an optional status filter. This approach returns all game sessions in the fleet's home Region and all remote locations.

    • To retrieve all game sessions that are currently running on a specific fleet location, provide a fleet or alias ID and a location name, with optional status filter. The location can be the fleet's home Region or any remote location.

    • To retrieve a specific game session, provide the game session ID. This approach looks for the game session ID in all fleets that reside in the Amazon Web Services Region defined in the request.

    Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a GameSession object is returned for each game session that matches the request.

    This operation is not designed to be continually called to track game session status. This practice can cause you to exceed your API limit, which results in errors. Instead, you must configure an Amazon Simple Notification Service (SNS) topic to receive notifications from FlexMatch or queues. Continuously polling with DescribeGameSessions should only be used for games in development with low game session usage.

    Available in Amazon GameLift Local.

    Learn more

    Find a game session

    All APIs by task

    " + "documentation":"

    Retrieves a set of one or more game sessions in a specific fleet location. You can optionally filter the results by current game session status.

    This operation can be used in the following ways:

    • To retrieve all game sessions that are currently running on all locations in a fleet, provide a fleet or alias ID, with an optional status filter. This approach returns all game sessions in the fleet's home Region and all remote locations.

    • To retrieve all game sessions that are currently running on a specific fleet location, provide a fleet or alias ID and a location name, with optional status filter. The location can be the fleet's home Region or any remote location.

    • To retrieve a specific game session, provide the game session ID. This approach looks for the game session ID in all fleets that reside in the Amazon Web Services Region defined in the request.

    Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a GameSession object is returned for each game session that matches the request.

    This operation is not designed to be continually called to track game session status. This practice can cause you to exceed your API limit, which results in errors. Instead, you must configure an Amazon Simple Notification Service (SNS) topic to receive notifications from FlexMatch or queues. Continuously polling with DescribeGameSessions should only be used for games in development with low game session usage.

    Available in Amazon GameLift Servers Local.

    Learn more

    Find a game session

    All APIs by task

    " }, "DescribeInstances":{ "name":"DescribeInstances", @@ -1022,7 +1022,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves information about the EC2 instances in an Amazon GameLift managed fleet, including instance ID, connection data, and status. You can use this operation with a multi-location fleet to get location-specific instance information. As an alternative, use the operations https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute and https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeCompute to retrieve information for compute resources, including EC2 and Anywhere fleets.

    You can call this operation in the following ways:

    • To get information on all instances in a fleet's home Region, specify the fleet ID.

    • To get information on all instances in a fleet's remote location, specify the fleet ID and location name.

    • To get information on a specific instance in a fleet, specify the fleet ID and instance ID.

    Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, this operation returns Instance objects for each requested instance, listed in no particular order. If you call this operation for an Anywhere fleet, you receive an InvalidRequestException.

    Learn more

    Remotely connect to fleet instances

    Debug fleet issues

    Related actions

    All APIs by task

    " + "documentation":"

    Retrieves information about the EC2 instances in an Amazon GameLift Servers managed fleet, including instance ID, connection data, and status. You can use this operation with a multi-location fleet to get location-specific instance information. As an alternative, use the operations https://docs.aws.amazon.com/gamelift/latest/apireference/API_ListCompute and https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeCompute to retrieve information for compute resources, including EC2 and Anywhere fleets.

    You can call this operation in the following ways:

    • To get information on all instances in a fleet's home Region, specify the fleet ID.

    • To get information on all instances in a fleet's remote location, specify the fleet ID and location name.

    • To get information on a specific instance in a fleet, specify the fleet ID and instance ID.

    Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, this operation returns Instance objects for each requested instance, listed in no particular order. If you call this operation for an Anywhere fleet, you receive an InvalidRequestException.

    Learn more

    Remotely connect to fleet instances

    Debug fleet issues

    Related actions

    All APIs by task

    " }, "DescribeMatchmaking":{ "name":"DescribeMatchmaking", @@ -1084,7 +1084,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

    Retrieves properties for one or more player sessions.

    This action can be used in the following ways:

    • To retrieve a specific player session, provide the player session ID only.

    • To retrieve all player sessions in a game session, provide the game session ID only.

    • To retrieve all player sessions for a specific player, provide a player ID only.

    To request player sessions, specify either a player session ID, game session ID, or player ID. You can filter this request by player session status. If you provide a specific PlayerSessionId or PlayerId, Amazon GameLift ignores the filter criteria. Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a PlayerSession object is returned for each session that matches the request.

    Related actions

    All APIs by task

    " + "documentation":"

    Retrieves properties for one or more player sessions.

    This action can be used in the following ways:

    • To retrieve a specific player session, provide the player session ID only.

    • To retrieve all player sessions in a game session, provide the game session ID only.

    • To retrieve all player sessions for a specific player, provide a player ID only.

    To request player sessions, specify either a player session ID, game session ID, or player ID. You can filter this request by player session status. If you provide a specific PlayerSessionId or PlayerId, Amazon GameLift Servers ignores the filter criteria. Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, a PlayerSession object is returned for each session that matches the request.

    Related actions

    All APIs by task

    " }, "DescribeRuntimeConfiguration":{ "name":"DescribeRuntimeConfiguration", @@ -1100,7 +1100,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    Retrieves a fleet's runtime configuration settings. The runtime configuration determines which server processes run, and how, on computes in the fleet. For managed EC2 fleets, the runtime configuration describes server processes that run on each fleet instance. can update a fleet's runtime configuration at any time using UpdateRuntimeConfiguration.

    To get the current runtime configuration for a fleet, provide the fleet ID.

    If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty.

    Learn more

    Setting up Amazon GameLift fleets

    Running multiple processes on a fleet

    " + "documentation":"

    Retrieves a fleet's runtime configuration settings. The runtime configuration determines which server processes run, and how, on computes in the fleet. For managed EC2 fleets, the runtime configuration describes server processes that run on each fleet instance. can update a fleet's runtime configuration at any time using UpdateRuntimeConfiguration.

    To get the current runtime configuration for a fleet, provide the fleet ID.

    If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty.

    Learn more

    Setting up Amazon GameLift Servers fleets

    Running multiple processes on a fleet

    " }, "DescribeScalingPolicies":{ "name":"DescribeScalingPolicies", @@ -1133,7 +1133,7 @@ {"shape":"InternalServiceException"}, {"shape":"NotFoundException"} ], - "documentation":"

    Retrieves properties for a Realtime script.

    To request a script record, specify the script ID. If successful, an object containing the script properties is returned.

    Learn more

    Amazon GameLift Amazon GameLift Realtime

    Related actions

    All APIs by task

    " + "documentation":"

    Retrieves properties for a Realtime script.

    To request a script record, specify the script ID. If successful, an object containing the script properties is returned.

    Learn more

    Amazon GameLift Servers Amazon GameLift Servers Realtime

    Related actions

    All APIs by task

    " }, "DescribeVpcPeeringAuthorizations":{ "name":"DescribeVpcPeeringAuthorizations", @@ -1164,7 +1164,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Retrieves information on VPC peering connections. Use this operation to get peering information for all fleets or for one specific fleet ID.

    To retrieve connection information, call this operation from the Amazon Web Services account that is used to manage the Amazon GameLift fleets. Specify a fleet ID or leave the parameter empty to retrieve all connection records. If successful, the retrieved information includes both active and pending connections. Active connections identify the IpV4 CIDR block that the VPC uses to connect.

    Related actions

    All APIs by task

    " + "documentation":"

    Retrieves information on VPC peering connections. Use this operation to get peering information for all fleets or for one specific fleet ID.

    To retrieve connection information, call this operation from the Amazon Web Services account that is used to manage the Amazon GameLift Servers fleets. Specify a fleet ID or leave the parameter empty to retrieve all connection records. If successful, the retrieved information includes both active and pending connections. Active connections identify the IpV4 CIDR block that the VPC uses to connect.

    Related actions

    All APIs by task

    " }, "GetComputeAccess":{ "name":"GetComputeAccess", @@ -1181,7 +1181,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Requests authorization to remotely connect to a hosting resource in a Amazon GameLift managed fleet. This operation is not used with Amazon GameLift Anywhere fleets.

    Request options

    Provide the fleet ID and compute name. The compute name varies depending on the type of fleet.

    • For a compute in a managed EC2 fleet, provide an instance ID. Each instance in the fleet is a compute.

    • For a compute in a managed container fleet, provide a compute name. In a container fleet, each game server container group on a fleet instance is assigned a compute name.

    Results

    If successful, this operation returns a set of temporary Amazon Web Services credentials, including a two-part access key and a session token.

    • With a managed EC2 fleet (where compute type is EC2), use these credentials with Amazon EC2 Systems Manager (SSM) to start a session with the compute. For more details, see Starting a session (CLI) in the Amazon EC2 Systems Manager User Guide.

    " + "documentation":"

    Requests authorization to remotely connect to a hosting resource in a Amazon GameLift Servers managed fleet. This operation is not used with Amazon GameLift Servers Anywhere fleets.

    Request options

    Provide the fleet ID and compute name. The compute name varies depending on the type of fleet.

    • For a compute in a managed EC2 fleet, provide an instance ID. Each instance in the fleet is a compute.

    • For a compute in a managed container fleet, provide a compute name. In a container fleet, each game server container group on a fleet instance is assigned a compute name.

    Results

    If successful, this operation returns a set of temporary Amazon Web Services credentials, including a two-part access key and a session token.

    • With a managed EC2 fleet (where compute type is EC2), use these credentials with Amazon EC2 Systems Manager (SSM) to start a session with the compute. For more details, see Starting a session (CLI) in the Amazon EC2 Systems Manager User Guide.

    " }, "GetComputeAuthToken":{ "name":"GetComputeAuthToken", @@ -1198,7 +1198,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Requests an authentication token from Amazon GameLift for a compute resource in an Amazon GameLift fleet. Game servers that are running on the compute use this token to communicate with the Amazon GameLift service, such as when calling the Amazon GameLift server SDK action InitSDK(). Authentication tokens are valid for a limited time span, so you need to request a fresh token before the current token expires.

    Request options

    • For managed EC2 fleets (compute type EC2), auth token retrieval and refresh is handled automatically. All game servers that are running on all fleet instances have access to a valid auth token.

    • For Anywhere fleets (compute type ANYWHERE), if you're using the Amazon GameLift Agent, auth token retrieval and refresh is handled automatically for any compute where the Agent is running. If you're not using the Agent, create a mechanism to retrieve and refresh auth tokens for computes that are running game server processes.

    Learn more

    " + "documentation":"

    Requests an authentication token from Amazon GameLift Servers for a compute resource in an Amazon GameLift Servers fleet. Game servers that are running on the compute use this token to communicate with the Amazon GameLift Servers service, such as when calling the Amazon GameLift Servers server SDK action InitSDK(). Authentication tokens are valid for a limited time span, so you need to request a fresh token before the current token expires.

    Request options

    • For managed EC2 fleets (compute type EC2), auth token retrieval and refresh is handled automatically. All game servers that are running on all fleet instances have access to a valid auth token.

    • For Anywhere fleets (compute type ANYWHERE), if you're using the Amazon GameLift Servers Agent, auth token retrieval and refresh is handled automatically for any compute where the Agent is running. If you're not using the Agent, create a mechanism to retrieve and refresh auth tokens for computes that are running game server processes.

    Learn more

    " }, "GetGameSessionLogUrl":{ "name":"GetGameSessionLogUrl", @@ -1214,7 +1214,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

    Retrieves the location of stored game session logs for a specified game session on Amazon GameLift managed fleets. When a game session is terminated, Amazon GameLift automatically stores the logs in Amazon S3 and retains them for 14 days. Use this URL to download the logs.

    See the Amazon Web Services Service Limits page for maximum log file sizes. Log files that exceed this limit are not saved.

    All APIs by task

    " + "documentation":"

    Retrieves the location of stored game session logs for a specified game session on Amazon GameLift Servers managed fleets. When a game session is terminated, Amazon GameLift Servers automatically stores the logs in Amazon S3 and retains them for 14 days. Use this URL to download the logs.

    See the Amazon Web Services Service Limits page for maximum log file sizes. Log files that exceed this limit are not saved.

    All APIs by task

    " }, "GetInstanceAccess":{ "name":"GetInstanceAccess", @@ -1230,7 +1230,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Requests authorization to remotely connect to an instance in an Amazon GameLift managed fleet. Use this operation to connect to instances with game servers that use Amazon GameLift server SDK 4.x or earlier. To connect to instances with game servers that use server SDK 5.x or later, call https://docs.aws.amazon.com/gamelift/latest/apireference/API_GetComputeAccess.

    To request access to an instance, specify IDs for the instance and the fleet it belongs to. You can retrieve instance IDs for a fleet by calling DescribeInstances with the fleet ID.

    If successful, this operation returns an IP address and credentials. The returned credentials match the operating system of the instance, as follows:

    • For a Windows instance: returns a user name and secret (password) for use with a Windows Remote Desktop client.

    • For a Linux instance: returns a user name and secret (RSA private key) for use with an SSH client. You must save the secret to a .pem file. If you're using the CLI, see the example Get credentials for a Linux instance for tips on automatically saving the secret to a .pem file.

    Learn more

    Remotely connect to fleet instances

    Debug fleet issues

    Related actions

    All APIs by task

    " + "documentation":"

    Requests authorization to remotely connect to an instance in an Amazon GameLift Servers managed fleet. Use this operation to connect to instances with game servers that use Amazon GameLift Servers server SDK 4.x or earlier. To connect to instances with game servers that use server SDK 5.x or later, call https://docs.aws.amazon.com/gamelift/latest/apireference/API_GetComputeAccess.

    To request access to an instance, specify IDs for the instance and the fleet it belongs to. You can retrieve instance IDs for a fleet by calling DescribeInstances with the fleet ID.

    If successful, this operation returns an IP address and credentials. The returned credentials match the operating system of the instance, as follows:

    • For a Windows instance: returns a user name and secret (password) for use with a Windows Remote Desktop client.

    • For a Linux instance: returns a user name and secret (RSA private key) for use with an SSH client. You must save the secret to a .pem file. If you're using the CLI, see the example Get credentials for a Linux instance for tips on automatically saving the secret to a .pem file.

    Learn more

    Remotely connect to fleet instances

    Debug fleet issues

    Related actions

    All APIs by task

    " }, "ListAliases":{ "name":"ListAliases", @@ -1276,7 +1276,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves information on the compute resources in an Amazon GameLift fleet. Use the pagination parameters to retrieve results in a set of sequential pages.

    Request options

    • Retrieve a list of all computes in a fleet. Specify a fleet ID.

    • Retrieve a list of all computes in a specific fleet location. Specify a fleet ID and location.

    Results

    If successful, this operation returns information on a set of computes. Depending on the type of fleet, the result includes the following information:

    • For a managed EC2 fleet (compute type EC2), this operation returns information about the EC2 instance. Compute names are EC2 instance IDs.

    • For an Anywhere fleet (compute type ANYWHERE), this operation returns compute names and details from when the compute was registered with RegisterCompute. This includes GameLiftServiceSdkEndpoint or GameLiftAgentEndpoint.

    " + "documentation":"

    Retrieves information on the compute resources in an Amazon GameLift Servers fleet. Use the pagination parameters to retrieve results in a set of sequential pages.

    Request options

    • Retrieve a list of all computes in a fleet. Specify a fleet ID.

    • Retrieve a list of all computes in a specific fleet location. Specify a fleet ID and location.

    Results

    If successful, this operation returns information on a set of computes. Depending on the type of fleet, the result includes the following information:

    • For a managed EC2 fleet (compute type EC2), this operation returns information about the EC2 instance. Compute names are EC2 instance IDs.

    • For an Anywhere fleet (compute type ANYWHERE), this operation returns compute names and details from when the compute was registered with RegisterCompute. This includes GameLiftServiceSdkEndpoint or GameLiftAgentEndpoint.

    " }, "ListContainerFleets":{ "name":"ListContainerFleets", @@ -1292,7 +1292,7 @@ {"shape":"UnauthorizedException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves a collection of container fleet resources in an Amazon Web Services Region. For fleets that have multiple locations, this operation retrieves fleets based on their home Region only.

    Request options

    • Get a list of all fleets. Call this operation without specifying a container group definition.

    • Get a list of fleets filtered by container group definition. Provide the container group definition name or ARN value.

    • To get a list of all Amazon GameLift Realtime fleets with a specific configuration script, provide the script ID.

    Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, this operation returns a collection of container fleets that match the request parameters. A NextToken value is also returned if there are more result pages to retrieve.

    Fleet IDs are returned in no particular order.

    " + "documentation":"

    Retrieves a collection of container fleet resources in an Amazon Web Services Region. For fleets that have multiple locations, this operation retrieves fleets based on their home Region only.

    Request options

    • Get a list of all fleets. Call this operation without specifying a container group definition.

    • Get a list of fleets filtered by container group definition. Provide the container group definition name or ARN value.

    • To get a list of all Amazon GameLift Servers Realtime fleets with a specific configuration script, provide the script ID.

    Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, this operation returns a collection of container fleets that match the request parameters. A NextToken value is also returned if there are more result pages to retrieve.

    Fleet IDs are returned in no particular order.

    " }, "ListContainerGroupDefinitionVersions":{ "name":"ListContainerGroupDefinitionVersions", @@ -1358,7 +1358,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

    Retrieves a collection of fleet resources in an Amazon Web Services Region. You can filter the result set to find only those fleets that are deployed with a specific build or script. For fleets that have multiple locations, this operation retrieves fleets based on their home Region only.

    You can use operation in the following ways:

    • To get a list of all fleets in a Region, don't provide a build or script identifier.

    • To get a list of all fleets where a specific game build is deployed, provide the build ID.

    • To get a list of all Amazon GameLift Realtime fleets with a specific configuration script, provide the script ID.

    Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, this operation returns a list of fleet IDs that match the request parameters. A NextToken value is also returned if there are more result pages to retrieve.

    Fleet IDs are returned in no particular order.

    " + "documentation":"

    Retrieves a collection of fleet resources in an Amazon Web Services Region. You can filter the result set to find only those fleets that are deployed with a specific build or script. For fleets that have multiple locations, this operation retrieves fleets based on their home Region only.

    You can use operation in the following ways:

    • To get a list of all fleets in a Region, don't provide a build or script identifier.

    • To get a list of all fleets where a specific game build is deployed, provide the build ID.

    • To get a list of all Amazon GameLift Servers Realtime fleets with a specific configuration script, provide the script ID.

    Use the pagination parameters to retrieve results as a set of sequential pages.

    If successful, this operation returns a list of fleet IDs that match the request parameters. A NextToken value is also returned if there are more result pages to retrieve.

    Fleet IDs are returned in no particular order.

    " }, "ListGameServerGroups":{ "name":"ListGameServerGroups", @@ -1388,7 +1388,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Retrieves information on all game servers that are currently active in a specified game server group. You can opt to sort the list by game server age. Use the pagination parameters to retrieve results in a set of sequential segments.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Retrieves information on all game servers that are currently active in a specified game server group. You can opt to sort the list by game server age. Use the pagination parameters to retrieve results in a set of sequential segments.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "ListLocations":{ "name":"ListLocations", @@ -1403,7 +1403,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Lists all custom and Amazon Web Services locations.

    " + "documentation":"

    Lists all custom and Amazon Web Services locations where Amazon GameLift Servers can host game servers.

    Note that if you call this API using a location that doesn't have a service endpoint, such as one that can only be a remote location in a multi-location fleet, the API returns an error.

    Consult the table of supported locations in Amazon GameLift Servers service locations to identify home Regions that support single and multi-location fleets.

    Learn more

    Service locations

    " }, "ListScripts":{ "name":"ListScripts", @@ -1418,7 +1418,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Retrieves script records for all Realtime scripts that are associated with the Amazon Web Services account in use.

    Learn more

    Amazon GameLift Amazon GameLift Realtime

    Related actions

    All APIs by task

    " + "documentation":"

    Retrieves script records for all Realtime scripts that are associated with the Amazon Web Services account in use.

    Learn more

    Amazon GameLift Servers Amazon GameLift Servers Realtime

    Related actions

    All APIs by task

    " }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1435,7 +1435,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Retrieves all tags assigned to a Amazon GameLift resource. Use resource tags to organize Amazon Web Services resources for a range of purposes. This operation handles the permissions necessary to manage tags for Amazon GameLift resources that support tagging.

    To list tags for a resource, specify the unique ARN value for the resource.

    Learn more

    Tagging Amazon Web Services Resources in the Amazon Web Services General Reference

    Amazon Web Services Tagging Strategies

    Related actions

    All APIs by task

    " + "documentation":"

    Retrieves all tags assigned to a Amazon GameLift Servers resource. Use resource tags to organize Amazon Web Services resources for a range of purposes. This operation handles the permissions necessary to manage tags for Amazon GameLift Servers resources that support tagging.

    To list tags for a resource, specify the unique ARN value for the resource.

    Learn more

    Tagging Amazon Web Services Resources in the Amazon Web Services General Reference

    Amazon Web Services Tagging Strategies

    Related actions

    All APIs by task

    " }, "PutScalingPolicy":{ "name":"PutScalingPolicy", @@ -1452,7 +1452,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Creates or updates a scaling policy for a fleet. Scaling policies are used to automatically scale a fleet's hosting capacity to meet player demand. An active scaling policy instructs Amazon GameLift to track a fleet metric and automatically change the fleet's capacity when a certain threshold is reached. There are two types of scaling policies: target-based and rule-based. Use a target-based policy to quickly and efficiently manage fleet scaling; this option is the most commonly used. Use rule-based policies when you need to exert fine-grained control over auto-scaling.

    Fleets can have multiple scaling policies of each type in force at the same time; you can have one target-based policy, one or multiple rule-based scaling policies, or both. We recommend caution, however, because multiple auto-scaling policies can have unintended consequences.

    Learn more about how to work with auto-scaling in Set Up Fleet Automatic Scaling.

    Target-based policy

    A target-based policy tracks a single metric: PercentAvailableGameSessions. This metric tells us how much of a fleet's hosting capacity is ready to host game sessions but is not currently in use. This is the fleet's buffer; it measures the additional player demand that the fleet could handle at current capacity. With a target-based policy, you set your ideal buffer size and leave it to Amazon GameLift to take whatever action is needed to maintain that target.

    For example, you might choose to maintain a 10% buffer for a fleet that has the capacity to host 100 simultaneous game sessions. This policy tells Amazon GameLift to take action whenever the fleet's available capacity falls below or rises above 10 game sessions. Amazon GameLift will start new instances or stop unused instances in order to return to the 10% buffer.

    To create or update a target-based policy, specify a fleet ID and name, and set the policy type to \"TargetBased\". Specify the metric to track (PercentAvailableGameSessions) and reference a TargetConfiguration object with your desired buffer value. Exclude all other parameters. On a successful request, the policy name is returned. The scaling policy is automatically in force as soon as it's successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

    Rule-based policy

    A rule-based policy tracks specified fleet metric, sets a threshold value, and specifies the type of action to initiate when triggered. With a rule-based policy, you can select from several available fleet metrics. Each policy specifies whether to scale up or scale down (and by how much), so you need one policy for each type of action.

    For example, a policy may make the following statement: \"If the percentage of idle instances is greater than 20% for more than 15 minutes, then reduce the fleet capacity by 10%.\"

    A policy's rule statement has the following structure:

    If [MetricName] is [ComparisonOperator] [Threshold] for [EvaluationPeriods] minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment].

    To implement the example, the rule statement would look like this:

    If [PercentIdleInstances] is [GreaterThanThreshold] [20] for [15] minutes, then [PercentChangeInCapacity] to/by [10].

    To create or update a scaling policy, specify a unique combination of name and fleet ID, and set the policy type to \"RuleBased\". Specify the parameter values for a policy rule statement. On a successful request, the policy name is returned. Scaling policies are automatically in force as soon as they're successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

    " + "documentation":"

    Creates or updates a scaling policy for a fleet. Scaling policies are used to automatically scale a fleet's hosting capacity to meet player demand. An active scaling policy instructs Amazon GameLift Servers to track a fleet metric and automatically change the fleet's capacity when a certain threshold is reached. There are two types of scaling policies: target-based and rule-based. Use a target-based policy to quickly and efficiently manage fleet scaling; this option is the most commonly used. Use rule-based policies when you need to exert fine-grained control over auto-scaling.

    Fleets can have multiple scaling policies of each type in force at the same time; you can have one target-based policy, one or multiple rule-based scaling policies, or both. We recommend caution, however, because multiple auto-scaling policies can have unintended consequences.

    Learn more about how to work with auto-scaling in Set Up Fleet Automatic Scaling.

    Target-based policy

    A target-based policy tracks a single metric: PercentAvailableGameSessions. This metric tells us how much of a fleet's hosting capacity is ready to host game sessions but is not currently in use. This is the fleet's buffer; it measures the additional player demand that the fleet could handle at current capacity. With a target-based policy, you set your ideal buffer size and leave it to Amazon GameLift Servers to take whatever action is needed to maintain that target.

    For example, you might choose to maintain a 10% buffer for a fleet that has the capacity to host 100 simultaneous game sessions. This policy tells Amazon GameLift Servers to take action whenever the fleet's available capacity falls below or rises above 10 game sessions. Amazon GameLift Servers will start new instances or stop unused instances in order to return to the 10% buffer.

    To create or update a target-based policy, specify a fleet ID and name, and set the policy type to \"TargetBased\". Specify the metric to track (PercentAvailableGameSessions) and reference a TargetConfiguration object with your desired buffer value. Exclude all other parameters. On a successful request, the policy name is returned. The scaling policy is automatically in force as soon as it's successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

    Rule-based policy

    A rule-based policy tracks specified fleet metric, sets a threshold value, and specifies the type of action to initiate when triggered. With a rule-based policy, you can select from several available fleet metrics. Each policy specifies whether to scale up or scale down (and by how much), so you need one policy for each type of action.

    For example, a policy may make the following statement: \"If the percentage of idle instances is greater than 20% for more than 15 minutes, then reduce the fleet capacity by 10%.\"

    A policy's rule statement has the following structure:

    If [MetricName] is [ComparisonOperator] [Threshold] for [EvaluationPeriods] minutes, then [ScalingAdjustmentType] to/by [ScalingAdjustment].

    To implement the example, the rule statement would look like this:

    If [PercentIdleInstances] is [GreaterThanThreshold] [20] for [15] minutes, then [PercentChangeInCapacity] to/by [10].

    To create or update a scaling policy, specify a unique combination of name and fleet ID, and set the policy type to \"RuleBased\". Specify the parameter values for a policy rule statement. On a successful request, the policy name is returned. Scaling policies are automatically in force as soon as they're successfully created. If the fleet's auto-scaling actions are temporarily suspended, the new policy will be in force once the fleet actions are restarted.

    " }, "RegisterCompute":{ "name":"RegisterCompute", @@ -1470,7 +1470,7 @@ {"shape":"NotReadyException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Registers a compute resource in an Amazon GameLift Anywhere fleet.

    For an Anywhere fleet that's running the Amazon GameLift Agent, the Agent handles all compute registry tasks for you. For an Anywhere fleet that doesn't use the Agent, call this operation to register fleet computes.

    To register a compute, give the compute a name (must be unique within the fleet) and specify the compute resource's DNS name or IP address. Provide a fleet ID and a fleet location to associate with the compute being registered. You can optionally include the path to a TLS certificate on the compute resource.

    If successful, this operation returns compute details, including an Amazon GameLift SDK endpoint or Agent endpoint. Game server processes running on the compute can use this endpoint to communicate with the Amazon GameLift service. Each server process includes the SDK endpoint in its call to the Amazon GameLift server SDK action InitSDK().

    To view compute details, call DescribeCompute with the compute name.

    Learn more

    " + "documentation":"

    Registers a compute resource in an Amazon GameLift Servers Anywhere fleet.

    For an Anywhere fleet that's running the Amazon GameLift Servers Agent, the Agent handles all compute registry tasks for you. For an Anywhere fleet that doesn't use the Agent, call this operation to register fleet computes.

    To register a compute, give the compute a name (must be unique within the fleet) and specify the compute resource's DNS name or IP address. Provide a fleet ID and a fleet location to associate with the compute being registered. You can optionally include the path to a TLS certificate on the compute resource.

    If successful, this operation returns compute details, including an Amazon GameLift Servers SDK endpoint or Agent endpoint. Game server processes running on the compute can use this endpoint to communicate with the Amazon GameLift Servers service. Each server process includes the SDK endpoint in its call to the Amazon GameLift Servers server SDK action InitSDK().

    To view compute details, call DescribeCompute with the compute name.

    Learn more

    " }, "RegisterGameServer":{ "name":"RegisterGameServer", @@ -1487,7 +1487,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Creates a new game server resource and notifies Amazon GameLift FleetIQ that the game server is ready to host gameplay and players. This operation is called by a game server process that is running on an instance in a game server group. Registering game servers enables Amazon GameLift FleetIQ to track available game servers and enables game clients and services to claim a game server for a new game session.

    To register a game server, identify the game server group and instance where the game server is running, and provide a unique identifier for the game server. You can also include connection and game server data.

    Once a game server is successfully registered, it is put in status AVAILABLE. A request to register a game server may fail if the instance it is running on is in the process of shutting down as part of instance balancing or scale-down activity.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Creates a new game server resource and notifies Amazon GameLift Servers FleetIQ that the game server is ready to host gameplay and players. This operation is called by a game server process that is running on an instance in a game server group. Registering game servers enables Amazon GameLift Servers FleetIQ to track available game servers and enables game clients and services to claim a game server for a new game session.

    To register a game server, identify the game server group and instance where the game server is running, and provide a unique identifier for the game server. You can also include connection and game server data.

    Once a game server is successfully registered, it is put in status AVAILABLE. A request to register a game server may fail if the instance it is running on is in the process of shutting down as part of instance balancing or scale-down activity.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "RequestUploadCredentials":{ "name":"RequestUploadCredentials", @@ -1503,7 +1503,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Retrieves a fresh set of credentials for use when uploading a new set of game build files to Amazon GameLift's Amazon S3. This is done as part of the build creation process; see CreateBuild.

    To request new credentials, specify the build ID as returned with an initial CreateBuild request. If successful, a new set of credentials are returned, along with the S3 storage location associated with the build ID.

    Learn more

    Create a Build with Files in S3

    All APIs by task

    " + "documentation":"

    Retrieves a fresh set of credentials for use when uploading a new set of game build files to Amazon GameLift Servers's Amazon S3. This is done as part of the build creation process; see CreateBuild.

    To request new credentials, specify the build ID as returned with an initial CreateBuild request. If successful, a new set of credentials are returned, along with the S3 storage location associated with the build ID.

    Learn more

    Create a Build with Files in S3

    All APIs by task

    " }, "ResolveAlias":{ "name":"ResolveAlias", @@ -1520,7 +1520,7 @@ {"shape":"TerminalRoutingStrategyException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Attempts to retrieve a fleet ID that is associated with an alias. Specify a unique alias identifier.

    If the alias has a SIMPLE routing strategy, Amazon GameLift returns a fleet ID. If the alias has a TERMINAL routing strategy, the result is a TerminalRoutingStrategyException.

    Related actions

    All APIs by task

    " + "documentation":"

    Attempts to retrieve a fleet ID that is associated with an alias. Specify a unique alias identifier.

    If the alias has a SIMPLE routing strategy, Amazon GameLift Servers returns a fleet ID. If the alias has a TERMINAL routing strategy, the result is a TerminalRoutingStrategyException.

    Related actions

    All APIs by task

    " }, "ResumeGameServerGroup":{ "name":"ResumeGameServerGroup", @@ -1536,7 +1536,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Reinstates activity on a game server group after it has been suspended. A game server group might be suspended by the SuspendGameServerGroup operation, or it might be suspended involuntarily due to a configuration problem. In the second case, you can manually resume activity on the group once the configuration problem has been resolved. Refer to the game server group status and status reason for more information on why group activity is suspended.

    To resume activity, specify a game server group ARN and the type of activity to be resumed. If successful, a GameServerGroup object is returned showing that the resumed activity is no longer listed in SuspendedActions.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Reinstates activity on a game server group after it has been suspended. A game server group might be suspended by the SuspendGameServerGroup operation, or it might be suspended involuntarily due to a configuration problem. In the second case, you can manually resume activity on the group once the configuration problem has been resolved. Refer to the game server group status and status reason for more information on why group activity is suspended.

    To resume activity, specify a game server group ARN and the type of activity to be resumed. If successful, a GameServerGroup object is returned showing that the resumed activity is no longer listed in SuspendedActions.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "SearchGameSessions":{ "name":"SearchGameSessions", @@ -1571,7 +1571,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Resumes certain types of activity on fleet instances that were suspended with StopFleetActions. For multi-location fleets, fleet actions are managed separately for each location. Currently, this operation is used to restart a fleet's auto-scaling activity.

    This operation can be used in the following ways:

    • To restart actions on instances in the fleet's home Region, provide a fleet ID and the type of actions to resume.

    • To restart actions on instances in one of the fleet's remote locations, provide a fleet ID, a location name, and the type of actions to resume.

    If successful, Amazon GameLift once again initiates scaling events as triggered by the fleet's scaling policies. If actions on the fleet location were never stopped, this operation will have no effect.

    Learn more

    Setting up Amazon GameLift fleets

    " + "documentation":"

    Resumes certain types of activity on fleet instances that were suspended with StopFleetActions. For multi-location fleets, fleet actions are managed separately for each location. Currently, this operation is used to restart a fleet's auto-scaling activity.

    This operation can be used in the following ways:

    • To restart actions on instances in the fleet's home Region, provide a fleet ID and the type of actions to resume.

    • To restart actions on instances in one of the fleet's remote locations, provide a fleet ID, a location name, and the type of actions to resume.

    If successful, Amazon GameLift Servers once again initiates scaling events as triggered by the fleet's scaling policies. If actions on the fleet location were never stopped, this operation will have no effect.

    Learn more

    Setting up Amazon GameLift Servers fleets

    " }, "StartGameSessionPlacement":{ "name":"StartGameSessionPlacement", @@ -1588,7 +1588,7 @@ {"shape":"UnauthorizedException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Makes a request to start a new game session using a game session queue. When processing a placement request, Amazon GameLift looks for the best possible available resource to host the game session, based on how the queue is configured to prioritize factors such as resource cost, latency, and location. After selecting an available resource, Amazon GameLift prompts the resource to start a game session. A placement request can include a list of players to create a set of player sessions. The request can also include information to pass to the new game session, such as to specify a game map or other options.

    Request options

    Use this operation to make the following types of requests.

    • Request a placement using the queue's default prioritization process (see the default prioritization described in PriorityConfiguration). Include these required parameters:

      • GameSessionQueueName

      • MaximumPlayerSessionCount

      • PlacementID

    • Request a placement and prioritize based on latency. Include these parameters:

      • Required parameters GameSessionQueueName, MaximumPlayerSessionCount, PlacementID.

      • PlayerLatencies. Include a set of latency values for destinations in the queue. When a request includes latency data, Amazon GameLift automatically reorder the queue's locations priority list based on lowest available latency values. If a request includes latency data for multiple players, Amazon GameLift calculates each location's average latency for all players and reorders to find the lowest latency across all players.

      • Don't include PriorityConfigurationOverride.

      • Prioritize based on a custom list of locations. If you're using a queue that's configured to prioritize location first (see PriorityConfiguration for game session queues), you can optionally use the PriorityConfigurationOverride parameter to substitute a different location priority list for this placement request. Amazon GameLift searches each location on the priority override list to find an available hosting resource for the new game session. Specify a fallback strategy to use in the event that Amazon GameLift fails to place the game session in any of the locations on the override list.

    • Request a placement and prioritized based on a custom list of locations.

    • You can request new player sessions for a group of players. Include the DesiredPlayerSessions parameter and include at minimum a unique player ID for each. You can also include player-specific data to pass to the new game session.

    Result

    If successful, this operation generates a new game session placement request and adds it to the game session queue for processing. You can track the status of individual placement requests by calling DescribeGameSessionPlacement or by monitoring queue notifications. When the request status is FULFILLED, a new game session has started and the placement request is updated with connection information for the game session (IP address and port). If the request included player session data, Amazon GameLift creates a player session for each player ID in the request.

    The request results in a InvalidRequestException in the following situations:

    • If the request includes both PlayerLatencies and PriorityConfigurationOverride parameters.

    • If the request includes the PriorityConfigurationOverride parameter and specifies a queue that doesn't prioritize locations.

    Amazon GameLift continues to retry each placement request until it reaches the queue's timeout setting. If a request times out, you can resubmit the request to the same queue or try a different queue.

    " + "documentation":"

    Makes a request to start a new game session using a game session queue. When processing a placement request, Amazon GameLift Servers looks for the best possible available resource to host the game session, based on how the queue is configured to prioritize factors such as resource cost, latency, and location. After selecting an available resource, Amazon GameLift Servers prompts the resource to start a game session. A placement request can include a list of players to create a set of player sessions. The request can also include information to pass to the new game session, such as to specify a game map or other options.

    Request options

    Use this operation to make the following types of requests.

    • Request a placement using the queue's default prioritization process (see the default prioritization described in PriorityConfiguration). Include these required parameters:

      • GameSessionQueueName

      • MaximumPlayerSessionCount

      • PlacementID

    • Request a placement and prioritize based on latency. Include these parameters:

      • Required parameters GameSessionQueueName, MaximumPlayerSessionCount, PlacementID.

      • PlayerLatencies. Include a set of latency values for destinations in the queue. When a request includes latency data, Amazon GameLift Servers automatically reorder the queue's locations priority list based on lowest available latency values. If a request includes latency data for multiple players, Amazon GameLift Servers calculates each location's average latency for all players and reorders to find the lowest latency across all players.

      • Don't include PriorityConfigurationOverride.

      • Prioritize based on a custom list of locations. If you're using a queue that's configured to prioritize location first (see PriorityConfiguration for game session queues), you can optionally use the PriorityConfigurationOverride parameter to substitute a different location priority list for this placement request. Amazon GameLift Servers searches each location on the priority override list to find an available hosting resource for the new game session. Specify a fallback strategy to use in the event that Amazon GameLift Servers fails to place the game session in any of the locations on the override list.

    • Request a placement and prioritized based on a custom list of locations.

    • You can request new player sessions for a group of players. Include the DesiredPlayerSessions parameter and include at minimum a unique player ID for each. You can also include player-specific data to pass to the new game session.

    Result

    If successful, this operation generates a new game session placement request and adds it to the game session queue for processing. You can track the status of individual placement requests by calling DescribeGameSessionPlacement or by monitoring queue notifications. When the request status is FULFILLED, a new game session has started and the placement request is updated with connection information for the game session (IP address and port). If the request included player session data, Amazon GameLift Servers creates a player session for each player ID in the request.

    The request results in a InvalidRequestException in the following situations:

    • If the request includes both PlayerLatencies and PriorityConfigurationOverride parameters.

    • If the request includes the PriorityConfigurationOverride parameter and specifies a queue that doesn't prioritize locations.

    Amazon GameLift Servers continues to retry each placement request until it reaches the queue's timeout setting. If a request times out, you can resubmit the request to the same queue or try a different queue.

    " }, "StartMatchBackfill":{ "name":"StartMatchBackfill", @@ -1604,7 +1604,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Finds new players to fill open slots in currently running game sessions. The backfill match process is essentially identical to the process of forming new matches. Backfill requests use the same matchmaker that was used to make the original match, and they provide matchmaking data for all players currently in the game session. FlexMatch uses this information to select new players so that backfilled match continues to meet the original match requirements.

    When using FlexMatch with Amazon GameLift managed hosting, you can request a backfill match from a client service by calling this operation with a GameSessions ID. You also have the option of making backfill requests directly from your game server. In response to a request, FlexMatch creates player sessions for the new players, updates the GameSession resource, and sends updated matchmaking data to the game server. You can request a backfill match at any point after a game session is started. Each game session can have only one active backfill request at a time; a subsequent request automatically replaces the earlier request.

    When using FlexMatch as a standalone component, request a backfill match by calling this operation without a game session identifier. As with newly formed matches, matchmaking results are returned in a matchmaking event so that your game can update the game session that is being backfilled.

    To request a backfill match, specify a unique ticket ID, the original matchmaking configuration, and matchmaking data for all current players in the game session being backfilled. Optionally, specify the GameSession ARN. If successful, a match backfill ticket is created and returned with status set to QUEUED. Track the status of backfill tickets using the same method for tracking tickets for new matches.

    Only game sessions created by FlexMatch are supported for match backfill.

    Learn more

    Backfill existing games with FlexMatch

    Matchmaking events (reference)

    How Amazon GameLift FlexMatch works

    " + "documentation":"

    Finds new players to fill open slots in currently running game sessions. The backfill match process is essentially identical to the process of forming new matches. Backfill requests use the same matchmaker that was used to make the original match, and they provide matchmaking data for all players currently in the game session. FlexMatch uses this information to select new players so that backfilled match continues to meet the original match requirements.

    When using FlexMatch with Amazon GameLift Servers managed hosting, you can request a backfill match from a client service by calling this operation with a GameSessions ID. You also have the option of making backfill requests directly from your game server. In response to a request, FlexMatch creates player sessions for the new players, updates the GameSession resource, and sends updated matchmaking data to the game server. You can request a backfill match at any point after a game session is started. Each game session can have only one active backfill request at a time; a subsequent request automatically replaces the earlier request.

    When using FlexMatch as a standalone component, request a backfill match by calling this operation without a game session identifier. As with newly formed matches, matchmaking results are returned in a matchmaking event so that your game can update the game session that is being backfilled.

    To request a backfill match, specify a unique ticket ID, the original matchmaking configuration, and matchmaking data for all current players in the game session being backfilled. Optionally, specify the GameSession ARN. If successful, a match backfill ticket is created and returned with status set to QUEUED. Track the status of backfill tickets using the same method for tracking tickets for new matches.

    Only game sessions created by FlexMatch are supported for match backfill.

    Learn more

    Backfill existing games with FlexMatch

    Matchmaking events (reference)

    How Amazon GameLift Servers FlexMatch works

    " }, "StartMatchmaking":{ "name":"StartMatchmaking", @@ -1620,7 +1620,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules. With games that use Amazon GameLift managed hosting, this operation also triggers Amazon GameLift to find hosting resources and start a new game session for the new match. Each matchmaking request includes information on one or more players and specifies the FlexMatch matchmaker to use. When a request is for multiple players, FlexMatch attempts to build a match that includes all players in the request, placing them in the same team and finding additional players as needed to fill the match.

    To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include any player attributes that are required by the matchmaking configuration's rule set. If successful, a matchmaking ticket is returned with status set to QUEUED.

    Track matchmaking events to respond as needed and acquire game session connection information for successfully completed matches. Ticket status updates are tracked using event notification through Amazon Simple Notification Service, which is defined in the matchmaking configuration.

    Learn more

    Add FlexMatch to a game client

    Set Up FlexMatch event notification

    How Amazon GameLift FlexMatch works

    " + "documentation":"

    Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules. With games that use Amazon GameLift Servers managed hosting, this operation also triggers Amazon GameLift Servers to find hosting resources and start a new game session for the new match. Each matchmaking request includes information on one or more players and specifies the FlexMatch matchmaker to use. When a request is for multiple players, FlexMatch attempts to build a match that includes all players in the request, placing them in the same team and finding additional players as needed to fill the match.

    To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include any player attributes that are required by the matchmaking configuration's rule set. If successful, a matchmaking ticket is returned with status set to QUEUED.

    Track matchmaking events to respond as needed and acquire game session connection information for successfully completed matches. Ticket status updates are tracked using event notification through Amazon Simple Notification Service, which is defined in the matchmaking configuration.

    Learn more

    Add FlexMatch to a game client

    Set Up FlexMatch event notification

    How Amazon GameLift Servers FlexMatch works

    " }, "StopFleetActions":{ "name":"StopFleetActions", @@ -1637,7 +1637,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Suspends certain types of activity in a fleet location. Currently, this operation is used to stop auto-scaling activity. For multi-location fleets, fleet actions are managed separately for each location.

    Stopping fleet actions has several potential purposes. It allows you to temporarily stop auto-scaling activity but retain your scaling policies for use in the future. For multi-location fleets, you can set up fleet-wide auto-scaling, and then opt out of it for certain locations.

    This operation can be used in the following ways:

    • To stop actions on instances in the fleet's home Region, provide a fleet ID and the type of actions to suspend.

    • To stop actions on instances in one of the fleet's remote locations, provide a fleet ID, a location name, and the type of actions to suspend.

    If successful, Amazon GameLift no longer initiates scaling events except in response to manual changes using UpdateFleetCapacity. To restart fleet actions again, call StartFleetActions.

    Learn more

    Setting up Amazon GameLift Fleets

    " + "documentation":"

    Suspends certain types of activity in a fleet location. Currently, this operation is used to stop auto-scaling activity. For multi-location fleets, fleet actions are managed separately for each location.

    Stopping fleet actions has several potential purposes. It allows you to temporarily stop auto-scaling activity but retain your scaling policies for use in the future. For multi-location fleets, you can set up fleet-wide auto-scaling, and then opt out of it for certain locations.

    This operation can be used in the following ways:

    • To stop actions on instances in the fleet's home Region, provide a fleet ID and the type of actions to suspend.

    • To stop actions on instances in one of the fleet's remote locations, provide a fleet ID, a location name, and the type of actions to suspend.

    If successful, Amazon GameLift Servers no longer initiates scaling events except in response to manual changes using UpdateFleetCapacity. To restart fleet actions again, call StartFleetActions.

    Learn more

    Setting up Amazon GameLift Servers Fleets

    " }, "StopGameSessionPlacement":{ "name":"StopGameSessionPlacement", @@ -1685,7 +1685,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Temporarily stops activity on a game server group without terminating instances or the game server group. You can restart activity by calling ResumeGameServerGroup. You can suspend the following activity:

    • Instance type replacement - This activity evaluates the current game hosting viability of all Spot instance types that are defined for the game server group. It updates the Auto Scaling group to remove nonviable Spot Instance types, which have a higher chance of game server interruptions. It then balances capacity across the remaining viable Spot Instance types. When this activity is suspended, the Auto Scaling group continues with its current balance, regardless of viability. Instance protection, utilization metrics, and capacity scaling activities continue to be active.

    To suspend activity, specify a game server group ARN and the type of activity to be suspended. If successful, a GameServerGroup object is returned showing that the activity is listed in SuspendedActions.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Temporarily stops activity on a game server group without terminating instances or the game server group. You can restart activity by calling ResumeGameServerGroup. You can suspend the following activity:

    • Instance type replacement - This activity evaluates the current game hosting viability of all Spot instance types that are defined for the game server group. It updates the Auto Scaling group to remove nonviable Spot Instance types, which have a higher chance of game server interruptions. It then balances capacity across the remaining viable Spot Instance types. When this activity is suspended, the Auto Scaling group continues with its current balance, regardless of viability. Instance protection, utilization metrics, and capacity scaling activities continue to be active.

    To suspend activity, specify a game server group ARN and the type of activity to be suspended. If successful, a GameServerGroup object is returned showing that the activity is listed in SuspendedActions.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "TagResource":{ "name":"TagResource", @@ -1702,7 +1702,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Assigns a tag to an Amazon GameLift resource. You can use tags to organize resources, create IAM permissions policies to manage access to groups of resources, customize Amazon Web Services cost breakdowns, and more. This operation handles the permissions necessary to manage tags for Amazon GameLift resources that support tagging.

    To add a tag to a resource, specify the unique ARN value for the resource and provide a tag list containing one or more tags. The operation succeeds even if the list includes tags that are already assigned to the resource.

    Learn more

    Tagging Amazon Web Services Resources in the Amazon Web Services General Reference

    Amazon Web Services Tagging Strategies

    Related actions

    All APIs by task

    " + "documentation":"

    Assigns a tag to an Amazon GameLift Servers resource. You can use tags to organize resources, create IAM permissions policies to manage access to groups of resources, customize Amazon Web Services cost breakdowns, and more. This operation handles the permissions necessary to manage tags for Amazon GameLift Servers resources that support tagging.

    To add a tag to a resource, specify the unique ARN value for the resource and provide a tag list containing one or more tags. The operation succeeds even if the list includes tags that are already assigned to the resource.

    Learn more

    Tagging Amazon Web Services Resources in the Amazon Web Services General Reference

    Amazon Web Services Tagging Strategies

    Related actions

    All APIs by task

    " }, "TerminateGameSession":{ "name":"TerminateGameSession", @@ -1720,7 +1720,7 @@ {"shape":"InvalidGameSessionStatusException"}, {"shape":"NotReadyException"} ], - "documentation":"

    Ends a game session that's currently in progress. Use this action to terminate any game session that isn't in ERROR status. Terminating a game session is the most efficient way to free up a server process when it's hosting a game session that's in a bad state or not ending properly. You can use this action to terminate a game session that's being hosted on any type of Amazon GameLift fleet compute, including computes for managed EC2, managed container, and Anywhere fleets. The game server must be integrated with Amazon GameLift server SDK 5.x or greater.

    Request options

    Request termination for a single game session. Provide the game session ID and the termination mode. There are two potential methods for terminating a game session:

    • Initiate a graceful termination using the normal game session shutdown sequence. With this mode, the Amazon GameLift service prompts the server process that's hosting the game session by calling the server SDK callback method OnProcessTerminate(). The callback implementation is part of the custom game server code. It might involve a variety of actions to gracefully end a game session, such as notifying players, before stopping the server process.

    • Force an immediate game session termination. With this mode, the Amazon GameLift service takes action to stop the server process, which ends the game session without the normal game session shutdown sequence.

    Results

    If successful, game session termination is initiated. During this activity, the game session status is changed to TERMINATING. When completed, the server process that was hosting the game session has been stopped and replaced with a new server process that's ready to host a new game session. The old game session's status is changed to TERMINATED with a status reason that indicates the termination method used.

    Learn more

    Add Amazon GameLift to your game server

    Amazon GameLift server SDK 5 reference guide for OnProcessTerminate() (C++) (C#) (Unreal) (Go)

    " + "documentation":"

    Ends a game session that's currently in progress. Use this action to terminate any game session that isn't in ERROR status. Terminating a game session is the most efficient way to free up a server process when it's hosting a game session that's in a bad state or not ending properly. You can use this action to terminate a game session that's being hosted on any type of Amazon GameLift Servers fleet compute, including computes for managed EC2, managed container, and Anywhere fleets. The game server must be integrated with Amazon GameLift Servers server SDK 5.x or greater.

    Request options

    Request termination for a single game session. Provide the game session ID and the termination mode. There are two potential methods for terminating a game session:

    • Initiate a graceful termination using the normal game session shutdown sequence. With this mode, the Amazon GameLift Servers service prompts the server process that's hosting the game session by calling the server SDK callback method OnProcessTerminate(). The callback implementation is part of the custom game server code. It might involve a variety of actions to gracefully end a game session, such as notifying players, before stopping the server process.

    • Force an immediate game session termination. With this mode, the Amazon GameLift Servers service takes action to stop the server process, which ends the game session without the normal game session shutdown sequence.

    Results

    If successful, game session termination is initiated. During this activity, the game session status is changed to TERMINATING. When completed, the server process that was hosting the game session has been stopped and replaced with a new server process that's ready to host a new game session. The old game session's status is changed to TERMINATED with a status reason that indicates the termination method used.

    Learn more

    Add Amazon GameLift Servers to your game server

    Amazon GameLift Servers server SDK 5 reference guide for OnProcessTerminate() (C++) (C#) (Unreal) (Go)

    " }, "UntagResource":{ "name":"UntagResource", @@ -1737,7 +1737,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Removes a tag assigned to a Amazon GameLift resource. You can use resource tags to organize Amazon Web Services resources for a range of purposes. This operation handles the permissions necessary to manage tags for Amazon GameLift resources that support tagging.

    To remove a tag from a resource, specify the unique ARN value for the resource and provide a string list containing one or more tags to remove. This operation succeeds even if the list includes tags that aren't assigned to the resource.

    Learn more

    Tagging Amazon Web Services Resources in the Amazon Web Services General Reference

    Amazon Web Services Tagging Strategies

    Related actions

    All APIs by task

    " + "documentation":"

    Removes a tag assigned to a Amazon GameLift Servers resource. You can use resource tags to organize Amazon Web Services resources for a range of purposes. This operation handles the permissions necessary to manage tags for Amazon GameLift Servers resources that support tagging.

    To remove a tag from a resource, specify the unique ARN value for the resource and provide a string list containing one or more tags to remove. This operation succeeds even if the list includes tags that aren't assigned to the resource.

    Learn more

    Tagging Amazon Web Services Resources in the Amazon Web Services General Reference

    Amazon Web Services Tagging Strategies

    Related actions

    All APIs by task

    " }, "UpdateAlias":{ "name":"UpdateAlias", @@ -1825,7 +1825,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

    Updates a fleet's mutable attributes, such as game session protection and resource creation limits.

    To update fleet attributes, specify the fleet ID and the property values that you want to change. If successful, Amazon GameLift returns the identifiers for the updated fleet.

    Learn more

    Setting up Amazon GameLift fleets

    " + "documentation":"

    Updates a fleet's mutable attributes, such as game session protection and resource creation limits.

    To update fleet attributes, specify the fleet ID and the property values that you want to change. If successful, Amazon GameLift Servers returns the identifiers for the updated fleet.

    Learn more

    Setting up Amazon GameLift Servers fleets

    " }, "UpdateFleetCapacity":{ "name":"UpdateFleetCapacity", @@ -1845,7 +1845,7 @@ {"shape":"UnauthorizedException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"

    Updates capacity settings for a managed EC2 fleet or managed container fleet. For these fleets, you adjust capacity by changing the number of instances in the fleet. Fleet capacity determines the number of game sessions and players that the fleet can host based on its configuration. For fleets with multiple locations, use this operation to manage capacity settings in each location individually.

    Use this operation to set these fleet capacity properties:

    • Minimum/maximum size: Set hard limits on the number of Amazon EC2 instances allowed. If Amazon GameLift receives a request--either through manual update or automatic scaling--it won't change the capacity to a value outside of this range.

    • Desired capacity: As an alternative to automatic scaling, manually set the number of Amazon EC2 instances to be maintained. Before changing a fleet's desired capacity, check the maximum capacity of the fleet's Amazon EC2 instance type by calling DescribeEC2InstanceLimits.

    To update capacity for a fleet's home Region, or if the fleet has no remote locations, omit the Location parameter. The fleet must be in ACTIVE status.

    To update capacity for a fleet's remote location, set the Location parameter to the location to update. The location must be in ACTIVE status.

    If successful, Amazon GameLift updates the capacity settings and returns the identifiers for the updated fleet and/or location. If a requested change to desired capacity exceeds the instance type's limit, the LimitExceeded exception occurs.

    Updates often prompt an immediate change in fleet capacity, such as when current capacity is different than the new desired capacity or outside the new limits. In this scenario, Amazon GameLift automatically initiates steps to add or remove instances in the fleet location. You can track a fleet's current capacity by calling DescribeFleetCapacity or DescribeFleetLocationCapacity.

    Learn more

    Scaling fleet capacity

    " + "documentation":"

    Updates capacity settings for a managed EC2 fleet or managed container fleet. For these fleets, you adjust capacity by changing the number of instances in the fleet. Fleet capacity determines the number of game sessions and players that the fleet can host based on its configuration. For fleets with multiple locations, use this operation to manage capacity settings in each location individually.

    Use this operation to set these fleet capacity properties:

    • Minimum/maximum size: Set hard limits on the number of Amazon EC2 instances allowed. If Amazon GameLift Servers receives a request--either through manual update or automatic scaling--it won't change the capacity to a value outside of this range.

    • Desired capacity: As an alternative to automatic scaling, manually set the number of Amazon EC2 instances to be maintained. Before changing a fleet's desired capacity, check the maximum capacity of the fleet's Amazon EC2 instance type by calling DescribeEC2InstanceLimits.

    To update capacity for a fleet's home Region, or if the fleet has no remote locations, omit the Location parameter. The fleet must be in ACTIVE status.

    To update capacity for a fleet's remote location, set the Location parameter to the location to update. The location must be in ACTIVE status.

    If successful, Amazon GameLift Servers updates the capacity settings and returns the identifiers for the updated fleet and/or location. If a requested change to desired capacity exceeds the instance type's limit, the LimitExceeded exception occurs.

    Updates often prompt an immediate change in fleet capacity, such as when current capacity is different than the new desired capacity or outside the new limits. In this scenario, Amazon GameLift Servers automatically initiates steps to add or remove instances in the fleet location. You can track a fleet's current capacity by calling DescribeFleetCapacity or DescribeFleetLocationCapacity.

    Learn more

    Scaling fleet capacity

    " }, "UpdateFleetPortSettings":{ "name":"UpdateFleetPortSettings", @@ -1864,7 +1864,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

    Updates permissions that allow inbound traffic to connect to game sessions in the fleet.

    To update settings, specify the fleet ID to be updated and specify the changes to be made. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions.

    If successful, the fleet ID for the updated fleet is returned. For fleets with remote locations, port setting updates can take time to propagate across all locations. You can check the status of updates in each location by calling DescribeFleetPortSettings with a location name.

    Learn more

    Setting up Amazon GameLift fleets

    " + "documentation":"

    Updates permissions that allow inbound traffic to connect to game sessions in the fleet.

    To update settings, specify the fleet ID to be updated and specify the changes to be made. List the permissions you want to add in InboundPermissionAuthorizations, and permissions you want to remove in InboundPermissionRevocations. Permissions to be removed must match existing fleet permissions.

    If successful, the fleet ID for the updated fleet is returned. For fleets with remote locations, port setting updates can take time to propagate across all locations. You can check the status of updates in each location by calling DescribeFleetPortSettings with a location name.

    Learn more

    Setting up Amazon GameLift Servers fleets

    " }, "UpdateGameServer":{ "name":"UpdateGameServer", @@ -1880,7 +1880,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Updates information about a registered game server to help Amazon GameLift FleetIQ track game server availability. This operation is called by a game server process that is running on an instance in a game server group.

    Use this operation to update the following types of game server information. You can make all three types of updates in the same request:

    • To update the game server's utilization status from AVAILABLE (when the game server is available to be claimed) to UTILIZED (when the game server is currently hosting games). Identify the game server and game server group and specify the new utilization status. You can't change the status from to UTILIZED to AVAILABLE .

    • To report health status, identify the game server and game server group and set health check to HEALTHY. If a game server does not report health status for a certain length of time, the game server is no longer considered healthy. As a result, it will be eventually deregistered from the game server group to avoid affecting utilization metrics. The best practice is to report health every 60 seconds.

    • To change game server metadata, provide updated game server data.

    Once a game server is successfully updated, the relevant statuses and timestamps are updated.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Updates information about a registered game server to help Amazon GameLift Servers FleetIQ track game server availability. This operation is called by a game server process that is running on an instance in a game server group.

    Use this operation to update the following types of game server information. You can make all three types of updates in the same request:

    • To update the game server's utilization status from AVAILABLE (when the game server is available to be claimed) to UTILIZED (when the game server is currently hosting games). Identify the game server and game server group and specify the new utilization status. You can't change the status from to UTILIZED to AVAILABLE .

    • To report health status, identify the game server and game server group and set health check to HEALTHY. If a game server does not report health status for a certain length of time, the game server is no longer considered healthy. As a result, it will be eventually deregistered from the game server group to avoid affecting utilization metrics. The best practice is to report health every 60 seconds.

    • To change game server metadata, provide updated game server data.

    Once a game server is successfully updated, the relevant statuses and timestamps are updated.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "UpdateGameServerGroup":{ "name":"UpdateGameServerGroup", @@ -1896,7 +1896,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    This operation is used with the Amazon GameLift FleetIQ solution and game server groups.

    Updates Amazon GameLift FleetIQ-specific properties for a game server group. Many Auto Scaling group properties are updated on the Auto Scaling group directly, including the launch template, Auto Scaling policies, and maximum/minimum/desired instance counts.

    To update the game server group, specify the game server group ID and provide the updated values. Before applying the updates, the new values are validated to ensure that Amazon GameLift FleetIQ can continue to perform instance balancing activity. If successful, a GameServerGroup object is returned.

    Learn more

    Amazon GameLift FleetIQ Guide

    " + "documentation":"

    This operation is used with the Amazon GameLift Servers FleetIQ solution and game server groups.

    Updates Amazon GameLift Servers FleetIQ-specific properties for a game server group. Many Auto Scaling group properties are updated on the Auto Scaling group directly, including the launch template, Auto Scaling policies, and maximum/minimum/desired instance counts.

    To update the game server group, specify the game server group ID and provide the updated values. Before applying the updates, the new values are validated to ensure that Amazon GameLift Servers FleetIQ can continue to perform instance balancing activity. If successful, a GameServerGroup object is returned.

    Learn more

    Amazon GameLift Servers FleetIQ Guide

    " }, "UpdateGameSession":{ "name":"UpdateGameSession", @@ -1964,7 +1964,7 @@ {"shape":"InvalidFleetStatusException"}, {"shape":"LimitExceededException"} ], - "documentation":"

    Updates the runtime configuration for the specified fleet. The runtime configuration tells Amazon GameLift how to launch server processes on computes in managed EC2 and Anywhere fleets. You can update a fleet's runtime configuration at any time after the fleet is created; it does not need to be in ACTIVE status.

    To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration with an updated set of server process configurations.

    If successful, the fleet's runtime configuration settings are updated. Fleet computes that run game server processes regularly check for and receive updated runtime configurations. The computes immediately take action to comply with the new configuration by launching new server processes or by not replacing existing processes when they shut down. Updating a fleet's runtime configuration never affects existing server processes.

    Learn more

    Setting up Amazon GameLift fleets

    " + "documentation":"

    Updates the runtime configuration for the specified fleet. The runtime configuration tells Amazon GameLift Servers how to launch server processes on computes in managed EC2 and Anywhere fleets. You can update a fleet's runtime configuration at any time after the fleet is created; it does not need to be in ACTIVE status.

    To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration with an updated set of server process configurations.

    If successful, the fleet's runtime configuration settings are updated. Fleet computes that run game server processes regularly check for and receive updated runtime configurations. The computes immediately take action to comply with the new configuration by launching new server processes or by not replacing existing processes when they shut down. Updating a fleet's runtime configuration never affects existing server processes.

    Learn more

    Setting up Amazon GameLift Servers fleets

    " }, "UpdateScript":{ "name":"UpdateScript", @@ -1980,7 +1980,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

    Updates Realtime script metadata and content.

    To update script metadata, specify the script ID and provide updated name and/or version values.

    To update script content, provide an updated zip file by pointing to either a local file or an Amazon S3 bucket location. You can use either method regardless of how the original script was uploaded. Use the Version parameter to track updates to the script.

    If the call is successful, the updated metadata is stored in the script record and a revised script is uploaded to the Amazon GameLift service. Once the script is updated and acquired by a fleet instance, the new version is used for all new game sessions.

    Learn more

    Amazon GameLift Amazon GameLift Realtime

    Related actions

    All APIs by task

    " + "documentation":"

    Updates Realtime script metadata and content.

    To update script metadata, specify the script ID and provide updated name and/or version values.

    To update script content, provide an updated zip file by pointing to either a local file or an Amazon S3 bucket location. You can use either method regardless of how the original script was uploaded. Use the Version parameter to track updates to the script.

    If the call is successful, the updated metadata is stored in the script record and a revised script is uploaded to the Amazon GameLift Servers service. Once the script is updated and acquired by a fleet instance, the new version is used for all new game sessions.

    Learn more

    Amazon GameLift Servers Amazon GameLift Servers Realtime

    Related actions

    All APIs by task

    " }, "ValidateMatchmakingRuleSet":{ "name":"ValidateMatchmakingRuleSet", @@ -2023,8 +2023,7 @@ }, "AcceptMatchOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "AcceptanceType":{ "type":"string", @@ -2046,7 +2045,7 @@ }, "AliasArn":{ "shape":"AliasArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift alias resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::alias/alias-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. In a GameLift alias ARN, the resource ID matches the alias ID value.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers alias resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::alias/alias-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. In a GameLift alias ARN, the resource ID matches the alias ID value.

    " }, "Description":{ "shape":"FreeText", @@ -2094,10 +2093,10 @@ "members":{ "Cost":{ "shape":"NonNegativeLimitedLengthDouble", - "documentation":"

    The cost to run your fleet per hour. Amazon GameLift uses the provided cost of your fleet to balance usage in queues. For more information about queues, see Setting up queues in the Amazon GameLift Developer Guide.

    " + "documentation":"

    The cost to run your fleet per hour. Amazon GameLift Servers uses the provided cost of your fleet to balance usage in queues. For more information about queues, see Setting up queues in the Amazon GameLift Servers Developer Guide.

    " } }, - "documentation":"

    Amazon GameLift configuration options for your Anywhere fleets.

    " + "documentation":"

    Amazon GameLift Servers configuration options for your Anywhere fleets.

    " }, "ArnStringModel":{ "type":"string", @@ -2149,7 +2148,7 @@ "documentation":"

    The token that users must pass to the service API to use the temporary credentials.

    " } }, - "documentation":"

    Amazon Web Services account security credentials that allow interactions with Amazon GameLift resources. The credentials are temporary and valid for a limited time span. You can request fresh credentials at any time.

    Amazon Web Services security credentials consist of three parts: an access key ID, a secret access key, and a session token. You must use all three parts together to authenticate your access requests.

    You need Amazon Web Services credentials for the following tasks:

    ", + "documentation":"

    Amazon Web Services account security credentials that allow interactions with Amazon GameLift Servers resources. The credentials are temporary and valid for a limited time span. You can request fresh credentials at any time.

    Amazon Web Services security credentials consist of three parts: an access key ID, a secret access key, and a session token. You must use all three parts together to authenticate your access requests.

    You need Amazon Web Services credentials for the following tasks:

    ", "sensitive":true }, "BackfillMode":{ @@ -2177,7 +2176,7 @@ }, "BuildArn":{ "shape":"BuildArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift build resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::build/build-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. In a GameLift build ARN, the resource ID matches the BuildId value.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers build resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::build/build-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. In a GameLift build ARN, the resource ID matches the BuildId value.

    " }, "Name":{ "shape":"FreeText", @@ -2197,7 +2196,7 @@ }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

    Operating system that the game server binaries are built to run on. This value determines the type of fleet resources that you can use for this build.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " + "documentation":"

    Operating system that the game server binaries are built to run on. This value determines the type of fleet resources that you can use for this build.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift Servers, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " }, "CreationTime":{ "shape":"Timestamp", @@ -2205,7 +2204,7 @@ }, "ServerSdkVersion":{ "shape":"ServerSdkVersion", - "documentation":"

    The Amazon GameLift Server SDK version used to develop your game server.

    " + "documentation":"

    The Amazon GameLift Servers Server SDK version used to develop your game server.

    " } }, "documentation":"

    Properties describing a custom game build.

    All APIs by task

    " @@ -2243,7 +2242,7 @@ "documentation":"

    Indicates whether a TLS/SSL certificate is generated for a fleet.

    Valid values include:

    • GENERATED - Generate a TLS/SSL certificate for this fleet.

    • DISABLED - (default) Do not generate a TLS/SSL certificate for this fleet.

    " } }, - "documentation":"

    Determines whether a TLS/SSL certificate is generated for a fleet. This feature must be enabled when creating the fleet. All instances in a fleet share the same certificate. The certificate can be retrieved by calling the Amazon GameLift Server SDK operation GetInstanceCertificate.

    " + "documentation":"

    Determines whether a TLS/SSL certificate is generated for a fleet. This feature must be enabled when creating the fleet. All instances in a fleet share the same certificate. The certificate can be retrieved by calling the Amazon GameLift Servers Server SDK operation GetInstanceCertificate.

    " }, "CertificateType":{ "type":"string", @@ -2260,7 +2259,7 @@ "documentation":"

    List of instance statuses that game servers may be claimed on. If provided, the list must contain the ACTIVE status.

    " } }, - "documentation":"

    This data type is used with the Amazon GameLift FleetIQ and game server groups.

    Filters which game servers may be claimed when calling ClaimGameServer.

    " + "documentation":"

    This data type is used with the Amazon GameLift Servers FleetIQ and game server groups.

    Filters which game servers may be claimed when calling ClaimGameServer.

    " }, "ClaimGameServerInput":{ "type":"structure", @@ -2268,11 +2267,11 @@ "members":{ "GameServerGroupName":{ "shape":"GameServerGroupNameOrArn", - "documentation":"

    A unique identifier for the game server group where the game server is running. If you are not specifying a game server to claim, this value identifies where you want Amazon GameLift FleetIQ to look for an available game server to claim.

    " + "documentation":"

    A unique identifier for the game server group where the game server is running. If you are not specifying a game server to claim, this value identifies where you want Amazon GameLift Servers FleetIQ to look for an available game server to claim.

    " }, "GameServerId":{ "shape":"GameServerId", - "documentation":"

    A custom string that uniquely identifies the game server to claim. If this parameter is left empty, Amazon GameLift FleetIQ searches for an available game server in the specified game server group.

    " + "documentation":"

    A custom string that uniquely identifies the game server to claim. If this parameter is left empty, Amazon GameLift Servers FleetIQ searches for an available game server in the specified game server group.

    " }, "GameServerData":{ "shape":"GameServerData", @@ -2323,11 +2322,11 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address of a compute resource. Amazon GameLift requires a DNS name or IP address for a compute.

    " + "documentation":"

    The IP address of a compute resource. Amazon GameLift Servers requires a DNS name or IP address for a compute.

    " }, "DnsName":{ "shape":"DnsName", - "documentation":"

    The DNS name of a compute resource. Amazon GameLift requires a DNS name or IP address for a compute.

    " + "documentation":"

    The DNS name of a compute resource. Amazon GameLift Servers requires a DNS name or IP address for a compute.

    " }, "ComputeStatus":{ "shape":"ComputeStatus", @@ -2343,19 +2342,19 @@ }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

    The type of operating system on the compute resource.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " + "documentation":"

    The type of operating system on the compute resource.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift Servers, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " }, "Type":{ "shape":"EC2InstanceType", - "documentation":"

    The Amazon EC2 instance type that the fleet uses. For registered computes in an Amazon GameLift Anywhere fleet, this property is empty.

    " + "documentation":"

    The Amazon EC2 instance type that the fleet uses. For registered computes in an Amazon GameLift Servers Anywhere fleet, this property is empty.

    " }, "GameLiftServiceSdkEndpoint":{ "shape":"GameLiftServiceSdkEndpointOutput", - "documentation":"

    The Amazon GameLift SDK endpoint connection for a registered compute resource in an Anywhere fleet. The game servers on the compute use this endpoint to connect to the Amazon GameLift service.

    " + "documentation":"

    The Amazon GameLift Servers SDK endpoint connection for a registered compute resource in an Anywhere fleet. The game servers on the compute use this endpoint to connect to the Amazon GameLift Servers service.

    " }, "GameLiftAgentEndpoint":{ "shape":"GameLiftAgentEndpointOutput", - "documentation":"

    The endpoint of the Amazon GameLift Agent.

    " + "documentation":"

    The endpoint of the Amazon GameLift Servers Agent.

    " }, "InstanceId":{ "shape":"InstanceId", @@ -2370,7 +2369,7 @@ "documentation":"

    The game server container group definition for the compute.

    " } }, - "documentation":"

    An Amazon GameLift compute resource for hosting your game servers. Computes in an Amazon GameLift fleet differs depending on the fleet's compute type property as follows:

    • For managed EC2 fleets, a compute is an EC2 instance.

    • For Anywhere fleets, a compute is a computing resource that you provide and is registered to the fleet.

    " + "documentation":"

    An Amazon GameLift Servers compute resource for hosting your game servers. Computes in an Amazon GameLift Servers fleet differs depending on the fleet's compute type property as follows:

    • For managed EC2 fleets, a compute is an EC2 instance.

    • For Anywhere fleets, a compute is a computing resource that you provide and is registered to the fleet.

    " }, "ComputeArn":{ "type":"string", @@ -2532,11 +2531,11 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. In a GameLift fleet ARN, the resource ID matches the FleetId value.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. In a GameLift fleet ARN, the resource ID matches the FleetId value.

    " }, "FleetRoleArn":{ "shape":"IamRoleArn", - "documentation":"

    The unique identifier for an Identity and Access Management (IAM) role with permissions to run your containers on resources that are managed by Amazon GameLift. See Set up an IAM service role. This fleet property can't be changed.

    " + "documentation":"

    The unique identifier for an Identity and Access Management (IAM) role with permissions to run your containers on resources that are managed by Amazon GameLift Servers. See Set up an IAM service role. This fleet property can't be changed.

    " }, "GameServerContainerGroupDefinitionName":{ "shape":"ContainerGroupDefinitionName", @@ -2589,7 +2588,7 @@ }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", - "documentation":"

    Determines whether Amazon GameLift can shut down game sessions on the fleet that are actively running and hosting players. Amazon GameLift might prompt an instance shutdown when scaling down fleet capacity or when retiring unhealthy instances. You can also set game session protection for individual game sessions using UpdateGameSession.

    • NoProtection -- Game sessions can be shut down during active gameplay.

    • FullProtection -- Game sessions in ACTIVE status can't be shut down.

    " + "documentation":"

    Determines whether Amazon GameLift Servers can shut down game sessions on the fleet that are actively running and hosting players. Amazon GameLift Servers might prompt an instance shutdown when scaling down fleet capacity or when retiring unhealthy instances. You can also set game session protection for individual game sessions using UpdateGameSession.

    • NoProtection -- Game sessions can be shut down during active gameplay.

    • FullProtection -- Game sessions in ACTIVE status can't be shut down.

    " }, "GameSessionCreationLimitPolicy":{ "shape":"GameSessionCreationLimitPolicy", @@ -2605,14 +2604,14 @@ }, "LogConfiguration":{ "shape":"LogConfiguration", - "documentation":"

    The method that is used to collect container logs for the fleet. Amazon GameLift saves all standard output for each container in logs, including game session logs.

    • CLOUDWATCH -- Send logs to an Amazon CloudWatch log group that you define. Each container emits a log stream, which is organized in the log group.

    • S3 -- Store logs in an Amazon S3 bucket that you define.

    • NONE -- Don't collect container logs.

    " + "documentation":"

    The method that is used to collect container logs for the fleet. Amazon GameLift Servers saves all standard output for each container in logs, including game session logs.

    • CLOUDWATCH -- Send logs to an Amazon CloudWatch log group that you define. Each container emits a log stream, which is organized in the log group.

    • S3 -- Store logs in an Amazon S3 bucket that you define.

    • NONE -- Don't collect container logs.

    " }, "LocationAttributes":{ "shape":"ContainerFleetLocationAttributesList", "documentation":"

    Information about the container fleet's remote locations where fleet instances are deployed.

    " } }, - "documentation":"

    Describes an Amazon GameLift managed container fleet.

    " + "documentation":"

    Describes an Amazon GameLift Servers managed container fleet.

    " }, "ContainerFleetBillingType":{ "type":"string", @@ -2684,7 +2683,7 @@ "members":{ "ContainerGroupDefinitionArn":{ "shape":"ContainerGroupDefinitionArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to an Amazon GameLift ContainerGroupDefinition resource. It uniquely identifies the resource across all Amazon Web Services Regions. Format is arn:aws:gamelift:[region]::containergroupdefinition/[container group definition name]:[version].

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to an Amazon GameLift Servers ContainerGroupDefinition resource. It uniquely identifies the resource across all Amazon Web Services Regions. Format is arn:aws:gamelift:[region]::containergroupdefinition/[container group definition name]:[version].

    " }, "CreationTime":{ "shape":"Timestamp", @@ -2692,7 +2691,7 @@ }, "OperatingSystem":{ "shape":"ContainerOperatingSystem", - "documentation":"

    The platform that all containers in the container group definition run on.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " + "documentation":"

    The platform that all containers in the container group definition run on.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift Servers, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " }, "Name":{ "shape":"ContainerGroupDefinitionName", @@ -2700,7 +2699,7 @@ }, "ContainerGroupType":{ "shape":"ContainerGroupType", - "documentation":"

    The type of container group. Container group type determines how Amazon GameLift deploys the container group on each fleet instance.

    " + "documentation":"

    The type of container group. Container group type determines how Amazon GameLift Servers deploys the container group on each fleet instance.

    " }, "TotalMemoryLimitMebibytes":{ "shape":"ContainerTotalMemoryLimit", @@ -2728,11 +2727,11 @@ }, "Status":{ "shape":"ContainerGroupDefinitionStatus", - "documentation":"

    Current status of the container group definition resource. Values include:

    • COPYING -- Amazon GameLift is in the process of making copies of all container images that are defined in the group. While in this state, the resource can't be used to create a container fleet.

    • READY -- Amazon GameLift has copied the registry images for all containers that are defined in the group. You can use a container group definition in this status to create a container fleet.

    • FAILED -- Amazon GameLift failed to create a valid container group definition resource. For more details on the cause of the failure, see StatusReason. A container group definition resource in failed status will be deleted within a few minutes.

    " + "documentation":"

    Current status of the container group definition resource. Values include:

    • COPYING -- Amazon GameLift Servers is in the process of making copies of all container images that are defined in the group. While in this state, the resource can't be used to create a container fleet.

    • READY -- Amazon GameLift Servers has copied the registry images for all containers that are defined in the group. You can use a container group definition in this status to create a container fleet.

    • FAILED -- Amazon GameLift Servers failed to create a valid container group definition resource. For more details on the cause of the failure, see StatusReason. A container group definition resource in failed status will be deleted within a few minutes.

    " }, "StatusReason":{ "shape":"NonZeroAndMaxString", - "documentation":"

    Additional information about a container group definition that's in FAILED status. Possible reasons include:

    • An internal issue prevented Amazon GameLift from creating the container group definition resource. Delete the failed resource and call CreateContainerGroupDefinitionagain.

    • An access-denied message means that you don't have permissions to access the container image on ECR. See IAM permission examples for help setting up required IAM permissions for Amazon GameLift.

    • The ImageUri value for at least one of the containers in the container group definition was invalid or not found in the current Amazon Web Services account.

    • At least one of the container images referenced in the container group definition exceeds the allowed size. For size limits, see Amazon GameLift endpoints and quotas.

    • At least one of the container images referenced in the container group definition uses a different operating system than the one defined for the container group.

    " + "documentation":"

    Additional information about a container group definition that's in FAILED status. Possible reasons include:

    • An internal issue prevented Amazon GameLift Servers from creating the container group definition resource. Delete the failed resource and call CreateContainerGroupDefinitionagain.

    • An access-denied message means that you don't have permissions to access the container image on ECR. See IAM permission examples for help setting up required IAM permissions for Amazon GameLift Servers.

    • The ImageUri value for at least one of the containers in the container group definition was invalid or not found in the current Amazon Web Services account.

    • At least one of the container images referenced in the container group definition exceeds the allowed size. For size limits, see Amazon GameLift Servers endpoints and quotas.

    • At least one of the container images referenced in the container group definition uses a different operating system than the one defined for the container group.

    " } }, "documentation":"

    The properties that describe a container group resource. You can update all properties of a container group definition properties. Updates to a container group definition are saved as new versions.

    Used with: CreateContainerGroupDefinition

    Returned by: DescribeContainerGroupDefinition, ListContainerGroupDefinitions, UpdateContainerGroupDefinition

    " @@ -2799,7 +2798,7 @@ "documentation":"

    The time period (in seconds) to wait for a health check to succeed before counting a failed health check.

    " } }, - "documentation":"

    Instructions on when and how to check the health of a support container in a container fleet. These properties override any Docker health checks that are set in the container image. For more information on container health checks, see HealthCheck command in the Amazon Elastic Container Service API. Game server containers don't have a health check parameter; Amazon GameLift automatically handles health checks for these containers.

    The following example instructs the container to initiate a health check command every 60 seconds and wait 10 seconds for it to succeed. If it fails, retry the command 3 times before flagging the container as unhealthy. It also tells the container to wait 100 seconds after launch before counting failed health checks.

    {\"Command\": [ \"CMD-SHELL\", \"ps cax | grep \"processmanager\" || exit 1\" ], \"Interval\": 60, \"Timeout\": 10, \"Retries\": 3, \"StartPeriod\": 100 }

    Part of: SupportContainerDefinition, SupportContainerDefinitionInput

    " + "documentation":"

    Instructions on when and how to check the health of a support container in a container fleet. These properties override any Docker health checks that are set in the container image. For more information on container health checks, see HealthCheck command in the Amazon Elastic Container Service API. Game server containers don't have a health check parameter; Amazon GameLift Servers automatically handles health checks for these containers.

    The following example instructs the container to initiate a health check command every 60 seconds and wait 10 seconds for it to succeed. If it fails, retry the command 3 times before flagging the container as unhealthy. It also tells the container to wait 100 seconds after launch before counting failed health checks.

    {\"Command\": [ \"CMD-SHELL\", \"ps cax | grep \"processmanager\" || exit 1\" ], \"Interval\": 60, \"Timeout\": 10, \"Retries\": 3, \"StartPeriod\": 100 }

    Part of: SupportContainerDefinition, SupportContainerDefinitionInput

    " }, "ContainerHealthCheckInterval":{ "type":"integer", @@ -2894,10 +2893,10 @@ "members":{ "ContainerPortRanges":{ "shape":"ContainerPortRangeList", - "documentation":"

    A set of one or more container port number ranges. The ranges can't overlap.

    " + "documentation":"

    A set of one or more container port number ranges. The ranges can't overlap if the ranges' network protocols are the same. Overlapping ranges with different protocols is allowed but not recommended.

    " } }, - "documentation":"

    A set of port ranges that can be opened on the container. A process that's running in the container can bind to a port number, making it accessible to inbound traffic. Container ports map to a container fleet's connection ports.

    Part of: GameServerContainerDefinition, GameServerContainerDefinitionInput, SupportContainerDefinition, SupportContainerDefinitionInput

    " + "documentation":"

    A set of port ranges that can be opened on the container. A process that's running in the container can bind to a port number, making it accessible to inbound traffic when it's mapped to a container fleet's connection port.

    Each container port range specifies a network protocol. When the configuration supports more than one protocol, we recommend that you use a different range for each protocol. If your ranges have overlapping port numbers, Amazon GameLift Servers maps a duplicated container port number to different connection ports. For example, if you include 1935 in port ranges for both TCP and UDP, it might result in the following mappings:

    • container port 1935 (tcp) => connection port 2001

    • container port 1935 (udp) => connection port 2002

    Part of: GameServerContainerDefinition, GameServerContainerDefinitionInput, SupportContainerDefinition, SupportContainerDefinitionInput

    " }, "ContainerPortRange":{ "type":"structure", @@ -2920,7 +2919,7 @@ "documentation":"

    The network protocol that these ports support.

    " } }, - "documentation":"

    A set of one or more port numbers that can be opened on the container.

    Part of: ContainerPortConfiguration

    " + "documentation":"

    A set of one or more port numbers that can be opened on the container, and the supported network protocol.

    Part of: ContainerPortConfiguration

    " }, "ContainerPortRangeList":{ "type":"list", @@ -2990,11 +2989,11 @@ }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

    Information indicating where your game build files are stored. Use this parameter only when creating a build with files stored in an Amazon S3 bucket that you own. The storage location must specify an Amazon S3 bucket name and key. The location must also specify a role ARN that you set up to allow Amazon GameLift to access your Amazon S3 bucket. The S3 bucket and your new build must be in the same Region.

    If a StorageLocation is specified, the size of your file can be found in your Amazon S3 bucket. Amazon GameLift will report a SizeOnDisk of 0.

    " + "documentation":"

    Information indicating where your game build files are stored. Use this parameter only when creating a build with files stored in an Amazon S3 bucket that you own. The storage location must specify an Amazon S3 bucket name and key. The location must also specify a role ARN that you set up to allow Amazon GameLift Servers to access your Amazon S3 bucket. The S3 bucket and your new build must be in the same Region.

    If a StorageLocation is specified, the size of your file can be found in your Amazon S3 bucket. Amazon GameLift Servers will report a SizeOnDisk of 0.

    " }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

    The operating system that your game server binaries run on. This value determines the type of fleet resources that you use for this build. If your game build contains multiple executables, they all must run on the same operating system. You must specify a valid operating system in this request. There is no default value. You can't change a build's operating system later.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " + "documentation":"

    The operating system that your game server binaries run on. This value determines the type of fleet resources that you use for this build. If your game build contains multiple executables, they all must run on the same operating system. You must specify a valid operating system in this request. There is no default value. You can't change a build's operating system later.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift Servers, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " }, "Tags":{ "shape":"TagList", @@ -3002,7 +3001,7 @@ }, "ServerSdkVersion":{ "shape":"ServerSdkVersion", - "documentation":"

    A server SDK version you used when integrating your game server build with Amazon GameLift. For more information see Integrate games with custom game servers. By default Amazon GameLift sets this value to 4.0.2.

    " + "documentation":"

    A server SDK version you used when integrating your game server build with Amazon GameLift Servers. For more information see Integrate games with custom game servers. By default Amazon GameLift Servers sets this value to 4.0.2.

    " } } }, @@ -3015,7 +3014,7 @@ }, "UploadCredentials":{ "shape":"AwsCredentials", - "documentation":"

    This element is returned only when the operation is called without a storage location. It contains credentials to use when you are uploading a build file to an Amazon S3 bucket that is owned by Amazon GameLift. Credentials have a limited life span. To refresh these credentials, call RequestUploadCredentials.

    " + "documentation":"

    This element is returned only when the operation is called without a storage location. It contains credentials to use when you are uploading a build file to an Amazon S3 bucket that is owned by Amazon GameLift Servers. Credentials have a limited life span. To refresh these credentials, call RequestUploadCredentials.

    " }, "StorageLocation":{ "shape":"S3Location", @@ -3029,7 +3028,7 @@ "members":{ "FleetRoleArn":{ "shape":"IamRoleArn", - "documentation":"

    The unique identifier for an Identity and Access Management (IAM) role with permissions to run your containers on resources that are managed by Amazon GameLift. Use an IAM service role with the GameLiftContainerFleetPolicy managed policy attached. For more information, see Set up an IAM service role. You can't change this fleet property after the fleet is created.

    IAM role ARN values use the following pattern: arn:aws:iam::[Amazon Web Services account]:role/[role name].

    " + "documentation":"

    The unique identifier for an Identity and Access Management (IAM) role with permissions to run your containers on resources that are managed by Amazon GameLift Servers. Use an IAM service role with the GameLiftContainerFleetPolicy managed policy attached. For more information, see Set up an IAM service role. You can't change this fleet property after the fleet is created.

    IAM role ARN values use the following pattern: arn:aws:iam::[Amazon Web Services account]:role/[role name].

    " }, "Description":{ "shape":"NonZeroAndMaxString", @@ -3045,19 +3044,19 @@ }, "InstanceConnectionPortRange":{ "shape":"ConnectionPortRange", - "documentation":"

    The set of port numbers to open on each fleet instance. A fleet's connection ports map to container ports that are configured in the fleet's container group definitions.

    By default, Amazon GameLift calculates an optimal port range based on your fleet configuration. To use the calculated range, don't set this parameter. The values are:

    • Port range: 4192 to a number calculated based on your fleet configuration. Amazon GameLift uses the following formula: 4192 + [# of game server container groups per fleet instance] * [# of container ports in the game server container group definition] + [# of container ports in the game server container group definition]

    You can also choose to manually set this parameter. When manually setting this parameter, you must use port numbers that match the fleet's inbound permissions port range.

    If you set values manually, Amazon GameLift no longer calculates a port range for you, even if you later remove the manual settings.

    " + "documentation":"

    The set of port numbers to open on each fleet instance. A fleet's connection ports map to container ports that are configured in the fleet's container group definitions.

    By default, Amazon GameLift Servers calculates an optimal port range based on your fleet configuration. To use the calculated range, don't set this parameter. The values are:

    • Port range: 4192 to a number calculated based on your fleet configuration. Amazon GameLift Servers uses the following formula: 4192 + [# of game server container groups per fleet instance] * [# of container ports in the game server container group definition] + [# of container ports in the game server container group definition]

    You can also choose to manually set this parameter. When manually setting this parameter, you must use port numbers that match the fleet's inbound permissions port range.

    If you set values manually, Amazon GameLift Servers no longer calculates a port range for you, even if you later remove the manual settings.

    " }, "InstanceInboundPermissions":{ "shape":"IpPermissionsList", - "documentation":"

    The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. As a best practice, when remotely accessing a fleet instance, we recommend opening ports only when you need them and closing them when you're finished.

    By default, Amazon GameLift calculates an optimal port range based on your fleet configuration. To use the calculated range, don't set this parameter. The values are:

    • Protocol: UDP

    • Port range: 4192 to a number calculated based on your fleet configuration. Amazon GameLift uses the following formula: 4192 + [# of game server container groups per fleet instance] * [# of container ports in the game server container group definition] + [# of container ports in the game server container group definition]

    You can also choose to manually set this parameter. When manually setting this parameter, you must use port numbers that match the fleet's connection port range.

    If you set values manually, Amazon GameLift no longer calculates a port range for you, even if you later remove the manual settings.

    " + "documentation":"

    The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. As a best practice, when remotely accessing a fleet instance, we recommend opening ports only when you need them and closing them when you're finished.

    By default, Amazon GameLift Servers calculates an optimal port range based on your fleet configuration. To use the calculated range, don't set this parameter. The values are:

    • Protocol: UDP

    • Port range: 4192 to a number calculated based on your fleet configuration. Amazon GameLift Servers uses the following formula: 4192 + [# of game server container groups per fleet instance] * [# of container ports in the game server container group definition] + [# of container ports in the game server container group definition]

    You can also choose to manually set this parameter. When manually setting this parameter, you must use port numbers that match the fleet's connection port range.

    If you set values manually, Amazon GameLift Servers no longer calculates a port range for you, even if you later remove the manual settings.

    " }, "GameServerContainerGroupsPerInstance":{ "shape":"GameServerContainerGroupsPerInstance", - "documentation":"

    The number of times to replicate the game server container group on each fleet instance.

    By default, Amazon GameLift calculates the maximum number of game server container groups that can fit on each instance. This calculation is based on the CPU and memory resources of the fleet's instance type). To use the calculated maximum, don't set this parameter. If you set this number manually, Amazon GameLift uses your value as long as it's less than the calculated maximum.

    " + "documentation":"

    The number of times to replicate the game server container group on each fleet instance.

    By default, Amazon GameLift Servers calculates the maximum number of game server container groups that can fit on each instance. This calculation is based on the CPU and memory resources of the fleet's instance type). To use the calculated maximum, don't set this parameter. If you set this number manually, Amazon GameLift Servers uses your value as long as it's less than the calculated maximum.

    " }, "InstanceType":{ "shape":"NonZeroAndMaxString", - "documentation":"

    The Amazon EC2 instance type to use for all instances in the fleet. For multi-location fleets, the instance type must be available in the home region and all remote locations. Instance type determines the computing resources and processing power that's available to host your game servers. This includes including CPU, memory, storage, and networking capacity.

    By default, Amazon GameLift selects an instance type that fits the needs of your container groups and is available in all selected fleet locations. You can also choose to manually set this parameter. See Amazon Elastic Compute Cloud Instance Types for detailed descriptions of Amazon EC2 instance types.

    You can't update this fleet property later.

    " + "documentation":"

    The Amazon EC2 instance type to use for all instances in the fleet. For multi-location fleets, the instance type must be available in the home region and all remote locations. Instance type determines the computing resources and processing power that's available to host your game servers. This includes including CPU, memory, storage, and networking capacity.

    By default, Amazon GameLift Servers selects an instance type that fits the needs of your container groups and is available in all selected fleet locations. You can also choose to manually set this parameter. See Amazon Elastic Compute Cloud Instance Types for detailed descriptions of Amazon EC2 instance types.

    You can't update this fleet property later.

    " }, "BillingType":{ "shape":"ContainerFleetBillingType", @@ -3065,7 +3064,7 @@ }, "Locations":{ "shape":"LocationConfigurationList", - "documentation":"

    A set of locations to deploy container fleet instances to. You can add any Amazon Web Services Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more Amazon Web Services Region codes, such as us-west-2, or Local Zone names. Also include the fleet's home Region, which is the Amazon Web Services Region where the fleet is created. For a list of supported Regions and Local Zones, see Amazon GameLift service locations for managed hosting.

    " + "documentation":"

    A set of locations to deploy container fleet instances to. You can add any Amazon Web Services Region or Local Zone that's supported by Amazon GameLift Servers. Provide a list of one or more Amazon Web Services Region codes, such as us-west-2, or Local Zone names. Also include the fleet's home Region, which is the Amazon Web Services Region where the fleet is created. For a list of supported Regions and Local Zones, see Amazon GameLift Servers service locations for managed hosting.

    " }, "MetricGroups":{ "shape":"MetricGroupList", @@ -3073,7 +3072,7 @@ }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", - "documentation":"

    Determines whether Amazon GameLift can shut down game sessions on the fleet that are actively running and hosting players. Amazon GameLift might prompt an instance shutdown when scaling down fleet capacity or when retiring unhealthy instances. You can also set game session protection for individual game sessions using UpdateGameSession.

    • NoProtection -- Game sessions can be shut down during active gameplay.

    • FullProtection -- Game sessions in ACTIVE status can't be shut down.

    By default, this property is set to NoProtection.

    " + "documentation":"

    Determines whether Amazon GameLift Servers can shut down game sessions on the fleet that are actively running and hosting players. Amazon GameLift Servers might prompt an instance shutdown when scaling down fleet capacity or when retiring unhealthy instances. You can also set game session protection for individual game sessions using UpdateGameSession.

    • NoProtection -- Game sessions can be shut down during active gameplay.

    • FullProtection -- Game sessions in ACTIVE status can't be shut down.

    By default, this property is set to NoProtection.

    " }, "GameSessionCreationLimitPolicy":{ "shape":"GameSessionCreationLimitPolicy", @@ -3081,7 +3080,7 @@ }, "LogConfiguration":{ "shape":"LogConfiguration", - "documentation":"

    A method for collecting container logs for the fleet. Amazon GameLift saves all standard output for each container in logs, including game session logs. You can select from the following methods:

    • CLOUDWATCH -- Send logs to an Amazon CloudWatch log group that you define. Each container emits a log stream, which is organized in the log group.

    • S3 -- Store logs in an Amazon S3 bucket that you define.

    • NONE -- Don't collect container logs.

    By default, this property is set to CLOUDWATCH.

    Amazon GameLift requires permissions to send logs other Amazon Web Services services in your account. These permissions are included in the IAM fleet role for this container fleet (see FleetRoleArn).

    " + "documentation":"

    A method for collecting container logs for the fleet. Amazon GameLift Servers saves all standard output for each container in logs, including game session logs. You can select from the following methods:

    • CLOUDWATCH -- Send logs to an Amazon CloudWatch log group that you define. Each container emits a log stream, which is organized in the log group.

    • S3 -- Store logs in an Amazon S3 bucket that you define.

    • NONE -- Don't collect container logs.

    By default, this property is set to CLOUDWATCH.

    Amazon GameLift Servers requires permissions to send logs other Amazon Web Services services in your account. These permissions are included in the IAM fleet role for this container fleet (see FleetRoleArn).

    " }, "Tags":{ "shape":"TagList", @@ -3113,7 +3112,7 @@ }, "ContainerGroupType":{ "shape":"ContainerGroupType", - "documentation":"

    The type of container group being defined. Container group type determines how Amazon GameLift deploys the container group on each fleet instance.

    Default value: GAME_SERVER

    " + "documentation":"

    The type of container group being defined. Container group type determines how Amazon GameLift Servers deploys the container group on each fleet instance.

    Default value: GAME_SERVER

    " }, "TotalMemoryLimitMebibytes":{ "shape":"ContainerTotalMemoryLimit", @@ -3133,7 +3132,7 @@ }, "OperatingSystem":{ "shape":"ContainerOperatingSystem", - "documentation":"

    The platform that all containers in the group use. Containers in a group must run on the same operating system.

    Default value: AMAZON_LINUX_2023

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " + "documentation":"

    The platform that all containers in the group use. Containers in a group must run on the same operating system.

    Default value: AMAZON_LINUX_2023

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift Servers, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " }, "VersionDescription":{ "shape":"NonZeroAndMaxString", @@ -3168,11 +3167,11 @@ }, "BuildId":{ "shape":"BuildIdOrArn", - "documentation":"

    The unique identifier for a custom game server build to be deployed to a fleet with compute type EC2. You can use either the build ID or ARN. The build must be uploaded to Amazon GameLift and in READY status. This fleet property can't be changed after the fleet is created.

    " + "documentation":"

    The unique identifier for a custom game server build to be deployed to a fleet with compute type EC2. You can use either the build ID or ARN. The build must be uploaded to Amazon GameLift Servers and in READY status. This fleet property can't be changed after the fleet is created.

    " }, "ScriptId":{ "shape":"ScriptIdOrArn", - "documentation":"

    The unique identifier for a Realtime configuration script to be deployed to a fleet with compute type EC2. You can use either the script ID or ARN. Scripts must be uploaded to Amazon GameLift prior to creating the fleet. This fleet property can't be changed after the fleet is created.

    " + "documentation":"

    The unique identifier for a Realtime configuration script to be deployed to a fleet with compute type EC2. You can use either the script ID or ARN. Scripts must be uploaded to Amazon GameLift Servers prior to creating the fleet. This fleet property can't be changed after the fleet is created.

    " }, "ServerLaunchPath":{ "shape":"LaunchPathStringModel", @@ -3184,15 +3183,15 @@ }, "LogPaths":{ "shape":"StringList", - "documentation":"

    This parameter is no longer used. To specify where Amazon GameLift should store log files once a server process shuts down, use the Amazon GameLift server API ProcessReady() and specify one or more directory paths in logParameters. For more information, see Initialize the server process in the Amazon GameLift Developer Guide.

    " + "documentation":"

    This parameter is no longer used. To specify where Amazon GameLift Servers should store log files once a server process shuts down, use the Amazon GameLift Servers server API ProcessReady() and specify one or more directory paths in logParameters. For more information, see Initialize the server process in the Amazon GameLift Servers Developer Guide.

    " }, "EC2InstanceType":{ "shape":"EC2InstanceType", - "documentation":"

    The Amazon GameLift-supported Amazon EC2 instance type to use with managed EC2 fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See Amazon Elastic Compute Cloud Instance Types for detailed descriptions of Amazon EC2 instance types.

    " + "documentation":"

    The Amazon GameLift Servers-supported Amazon EC2 instance type to use with managed EC2 fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See Amazon Elastic Compute Cloud Instance Types for detailed descriptions of Amazon EC2 instance types.

    " }, "EC2InboundPermissions":{ "shape":"IpPermissionsList", - "documentation":"

    The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetPortSettings to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Amazon GameLift Realtime fleets, Amazon GameLift automatically sets TCP and UDP ranges.

    " + "documentation":"

    The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetPortSettings to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Amazon GameLift Servers Realtime fleets, Amazon GameLift Servers automatically sets TCP and UDP ranges.

    " }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", @@ -3200,7 +3199,7 @@ }, "RuntimeConfiguration":{ "shape":"RuntimeConfiguration", - "documentation":"

    Instructions for how to launch and run server processes on the fleet. Set runtime configuration for managed EC2 fleets. For an Anywhere fleets, set this parameter only if the fleet is running the Amazon GameLift Agent. The runtime configuration defines one or more server process configurations. Each server process identifies a game executable or Realtime script file and the number of processes to run concurrently.

    This parameter replaces the parameters ServerLaunchPath and ServerLaunchParameters, which are still supported for backward compatibility.

    " + "documentation":"

    Instructions for how to launch and run server processes on the fleet. Set runtime configuration for managed EC2 fleets. For an Anywhere fleets, set this parameter only if the fleet is running the Amazon GameLift Servers Agent. The runtime configuration defines one or more server process configurations. Each server process identifies a game executable or Realtime script file and the number of processes to run concurrently.

    This parameter replaces the parameters ServerLaunchPath and ServerLaunchParameters, which are still supported for backward compatibility.

    " }, "ResourceCreationLimitPolicy":{ "shape":"ResourceCreationLimitPolicy", @@ -3212,11 +3211,11 @@ }, "PeerVpcAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    Used when peering your Amazon GameLift fleet with a VPC, the unique identifier for the Amazon Web Services account that owns the VPC. You can find your account ID in the Amazon Web Services Management Console under account settings.

    " + "documentation":"

    Used when peering your Amazon GameLift Servers fleet with a VPC, the unique identifier for the Amazon Web Services account that owns the VPC. You can find your account ID in the Amazon Web Services Management Console under account settings.

    " }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the Amazon Web Services Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

    " + "documentation":"

    A unique identifier for a VPC with resources to be accessed by your Amazon GameLift Servers fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the Amazon Web Services Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Servers Fleets.

    " }, "FleetType":{ "shape":"FleetType", @@ -3228,11 +3227,11 @@ }, "CertificateConfiguration":{ "shape":"CertificateConfiguration", - "documentation":"

    Prompts Amazon GameLift to generate a TLS/SSL certificate for the fleet. Amazon GameLift uses the certificates to encrypt traffic between game clients and the game servers running on Amazon GameLift. By default, the CertificateConfiguration is DISABLED. You can't change this property after you create the fleet.

    Certificate Manager (ACM) certificates expire after 13 months. Certificate expiration can cause fleets to fail, preventing players from connecting to instances in the fleet. We recommend you replace fleets before 13 months, consider using fleet aliases for a smooth transition.

    ACM isn't available in all Amazon Web Services regions. A fleet creation request with certificate generation enabled in an unsupported Region, fails with a 4xx error. For more information about the supported Regions, see Supported Regions in the Certificate Manager User Guide.

    " + "documentation":"

    Prompts Amazon GameLift Servers to generate a TLS/SSL certificate for the fleet. Amazon GameLift Servers uses the certificates to encrypt traffic between game clients and the game servers running on Amazon GameLift Servers. By default, the CertificateConfiguration is DISABLED. You can't change this property after you create the fleet.

    Certificate Manager (ACM) certificates expire after 13 months. Certificate expiration can cause fleets to fail, preventing players from connecting to instances in the fleet. We recommend you replace fleets before 13 months, consider using fleet aliases for a smooth transition.

    ACM isn't available in all Amazon Web Services regions. A fleet creation request with certificate generation enabled in an unsupported Region, fails with a 4xx error. For more information about the supported Regions, see Supported Regions in the Certificate Manager User Guide.

    " }, "Locations":{ "shape":"LocationConfigurationList", - "documentation":"

    A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in Amazon Web Services Regions that support multiple locations. You can add any Amazon Web Services Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more Amazon Web Services Region codes, such as us-west-2, or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see Amazon GameLift service locations for managed hosting.

    " + "documentation":"

    A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in Amazon Web Services Regions that support multiple locations. You can add any Amazon Web Services Region or Local Zone that's supported by Amazon GameLift Servers. Provide a list of one or more Amazon Web Services Region codes, such as us-west-2, or Local Zone names. When using this parameter, Amazon GameLift Servers requires you to include your home location in the request. For a list of supported Regions and Local Zones, see Amazon GameLift Servers service locations for managed hosting.

    " }, "Tags":{ "shape":"TagList", @@ -3244,11 +3243,11 @@ }, "AnywhereConfiguration":{ "shape":"AnywhereConfiguration", - "documentation":"

    Amazon GameLift Anywhere configuration options.

    " + "documentation":"

    Amazon GameLift Servers Anywhere configuration options.

    " }, "InstanceRoleCredentialsProvider":{ "shape":"InstanceRoleCredentialsProvider", - "documentation":"

    Prompts Amazon GameLift to generate a shared credentials file for the IAM role that's defined in InstanceRoleArn. The shared credentials file is stored on each fleet instance and refreshed as needed. Use shared credentials for applications that are deployed along with the game server executable, if the game server is integrated with server SDK version 5.x. For more information about using shared credentials, see Communicate with other Amazon Web Services resources from your fleets.

    " + "documentation":"

    Prompts Amazon GameLift Servers to generate a shared credentials file for the IAM role that's defined in InstanceRoleArn. The shared credentials file is stored on each fleet instance and refreshed as needed. Use shared credentials for applications that are deployed along with the game server executable, if the game server is integrated with server SDK version 5.x. For more information about using shared credentials, see Communicate with other Amazon Web Services resources from your fleets.

    " } } }, @@ -3265,7 +3264,7 @@ }, "Locations":{ "shape":"LocationConfigurationList", - "documentation":"

    A list of locations to deploy additional instances to and manage as part of the fleet. You can add any Amazon GameLift-supported Amazon Web Services Region as a remote location, in the form of an Amazon Web Services Region code such as us-west-2.

    " + "documentation":"

    A list of locations to deploy additional instances to and manage as part of the fleet. You can add any Amazon GameLift Servers-supported Amazon Web Services Region as a remote location, in the form of an Amazon Web Services Region code such as us-west-2.

    " } } }, @@ -3278,11 +3277,11 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "LocationStates":{ "shape":"LocationStateList", - "documentation":"

    The remote locations that are being added to the fleet, and the life-cycle status of each location. For new locations, the status is set to NEW. During location creation, Amazon GameLift updates each location's status as instances are deployed there and prepared for game hosting. This list does not include the fleet home Region or any remote locations that were already added to the fleet.

    " + "documentation":"

    The remote locations that are being added to the fleet, and the life-cycle status of each location. For new locations, the status is set to NEW. During location creation, Amazon GameLift Servers updates each location's status as instances are deployed there and prepared for game hosting. This list does not include the fleet home Region or any remote locations that were already added to the fleet.

    " } } }, @@ -3295,7 +3294,7 @@ }, "LocationStates":{ "shape":"LocationStateList", - "documentation":"

    The fleet's locations and life-cycle status of each location. For new fleets, the status of all locations is set to NEW. During fleet creation, Amazon GameLift updates each location status as instances are deployed there and prepared for game hosting. This list includes an entry for the fleet's home Region. For fleets with no remote locations, only one entry, representing the home Region, is returned.

    " + "documentation":"

    The fleet's locations and life-cycle status of each location. For new fleets, the status of all locations is set to NEW. During fleet creation, Amazon GameLift Servers updates each location status as instances are deployed there and prepared for game hosting. This list includes an entry for the fleet's home Region. For fleets with no remote locations, only one entry, representing the home Region, is returned.

    " } } }, @@ -3312,27 +3311,27 @@ "members":{ "GameServerGroupName":{ "shape":"GameServerGroupName", - "documentation":"

    An identifier for the new game server group. This value is used to generate unique ARN identifiers for the Amazon EC2 Auto Scaling group and the Amazon GameLift FleetIQ game server group. The name must be unique per Region per Amazon Web Services account.

    " + "documentation":"

    An identifier for the new game server group. This value is used to generate unique ARN identifiers for the Amazon EC2 Auto Scaling group and the Amazon GameLift Servers FleetIQ game server group. The name must be unique per Region per Amazon Web Services account.

    " }, "RoleArn":{ "shape":"IamRoleArn", - "documentation":"

    The Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift to access your Amazon EC2 Auto Scaling groups.

    " + "documentation":"

    The Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift Servers to access your Amazon EC2 Auto Scaling groups.

    " }, "MinSize":{ "shape":"WholeNumber", - "documentation":"

    The minimum number of instances allowed in the Amazon EC2 Auto Scaling group. During automatic scaling events, Amazon GameLift FleetIQ and Amazon EC2 do not scale down the group below this minimum. In production, this value should be set to at least 1. After the Auto Scaling group is created, update this value directly in the Auto Scaling group using the Amazon Web Services console or APIs.

    " + "documentation":"

    The minimum number of instances allowed in the Amazon EC2 Auto Scaling group. During automatic scaling events, Amazon GameLift Servers FleetIQ and Amazon EC2 do not scale down the group below this minimum. In production, this value should be set to at least 1. After the Auto Scaling group is created, update this value directly in the Auto Scaling group using the Amazon Web Services console or APIs.

    " }, "MaxSize":{ "shape":"PositiveInteger", - "documentation":"

    The maximum number of instances allowed in the Amazon EC2 Auto Scaling group. During automatic scaling events, Amazon GameLift FleetIQ and EC2 do not scale up the group above this maximum. After the Auto Scaling group is created, update this value directly in the Auto Scaling group using the Amazon Web Services console or APIs.

    " + "documentation":"

    The maximum number of instances allowed in the Amazon EC2 Auto Scaling group. During automatic scaling events, Amazon GameLift Servers FleetIQ and EC2 do not scale up the group above this maximum. After the Auto Scaling group is created, update this value directly in the Auto Scaling group using the Amazon Web Services console or APIs.

    " }, "LaunchTemplate":{ "shape":"LaunchTemplateSpecification", - "documentation":"

    The Amazon EC2 launch template that contains configuration settings and game server code to be deployed to all instances in the game server group. You can specify the template using either the template name or ID. For help with creating a launch template, see Creating a Launch Template for an Auto Scaling Group in the Amazon Elastic Compute Cloud Auto Scaling User Guide. After the Auto Scaling group is created, update this value directly in the Auto Scaling group using the Amazon Web Services console or APIs.

    If you specify network interfaces in your launch template, you must explicitly set the property AssociatePublicIpAddress to \"true\". If no network interface is specified in the launch template, Amazon GameLift FleetIQ uses your account's default VPC.

    " + "documentation":"

    The Amazon EC2 launch template that contains configuration settings and game server code to be deployed to all instances in the game server group. You can specify the template using either the template name or ID. For help with creating a launch template, see Creating a Launch Template for an Auto Scaling Group in the Amazon Elastic Compute Cloud Auto Scaling User Guide. After the Auto Scaling group is created, update this value directly in the Auto Scaling group using the Amazon Web Services console or APIs.

    If you specify network interfaces in your launch template, you must explicitly set the property AssociatePublicIpAddress to \"true\". If no network interface is specified in the launch template, Amazon GameLift Servers FleetIQ uses your account's default VPC.

    " }, "InstanceDefinitions":{ "shape":"InstanceDefinitions", - "documentation":"

    The Amazon EC2 instance types and sizes to use in the Auto Scaling group. The instance definitions must specify at least two different instance types that are supported by Amazon GameLift FleetIQ. For more information on instance types, see EC2 Instance Types in the Amazon Elastic Compute Cloud User Guide. You can optionally specify capacity weighting for each instance type. If no weight value is specified for an instance type, it is set to the default value \"1\". For more information about capacity weighting, see Instance Weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

    " + "documentation":"

    The Amazon EC2 instance types and sizes to use in the Auto Scaling group. The instance definitions must specify at least two different instance types that are supported by Amazon GameLift Servers FleetIQ. For more information on instance types, see EC2 Instance Types in the Amazon Elastic Compute Cloud User Guide. You can optionally specify capacity weighting for each instance type. If no weight value is specified for an instance type, it is set to the default value \"1\". For more information about capacity weighting, see Instance Weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

    " }, "AutoScalingPolicy":{ "shape":"GameServerGroupAutoScalingPolicy", @@ -3340,7 +3339,7 @@ }, "BalancingStrategy":{ "shape":"BalancingStrategy", - "documentation":"

    Indicates how Amazon GameLift FleetIQ balances the use of Spot Instances and On-Demand Instances in the game server group. Method options include the following:

    • SPOT_ONLY - Only Spot Instances are used in the game server group. If Spot Instances are unavailable or not viable for game hosting, the game server group provides no hosting capacity until Spot Instances can again be used. Until then, no new instances are started, and the existing nonviable Spot Instances are terminated (after current gameplay ends) and are not replaced.

    • SPOT_PREFERRED - (default value) Spot Instances are used whenever available in the game server group. If Spot Instances are unavailable, the game server group continues to provide hosting capacity by falling back to On-Demand Instances. Existing nonviable Spot Instances are terminated (after current gameplay ends) and are replaced with new On-Demand Instances.

    • ON_DEMAND_ONLY - Only On-Demand Instances are used in the game server group. No Spot Instances are used, even when available, while this balancing strategy is in force.

    " + "documentation":"

    Indicates how Amazon GameLift Servers FleetIQ balances the use of Spot Instances and On-Demand Instances in the game server group. Method options include the following:

    • SPOT_ONLY - Only Spot Instances are used in the game server group. If Spot Instances are unavailable or not viable for game hosting, the game server group provides no hosting capacity until Spot Instances can again be used. Until then, no new instances are started, and the existing nonviable Spot Instances are terminated (after current gameplay ends) and are not replaced.

    • SPOT_PREFERRED - (default value) Spot Instances are used whenever available in the game server group. If Spot Instances are unavailable, the game server group continues to provide hosting capacity by falling back to On-Demand Instances. Existing nonviable Spot Instances are terminated (after current gameplay ends) and are replaced with new On-Demand Instances.

    • ON_DEMAND_ONLY - Only On-Demand Instances are used in the game server group. No Spot Instances are used, even when available, while this balancing strategy is in force.

    " }, "GameServerProtectionPolicy":{ "shape":"GameServerProtectionPolicy", @@ -3348,7 +3347,7 @@ }, "VpcSubnets":{ "shape":"VpcSubnets", - "documentation":"

    A list of virtual private cloud (VPC) subnets to use with instances in the game server group. By default, all Amazon GameLift FleetIQ-supported Availability Zones are used. You can use this parameter to specify VPCs that you've set up. This property cannot be updated after the game server group is created, and the corresponding Auto Scaling group will always use the property value that is set with this request, even if the Auto Scaling group is updated directly.

    " + "documentation":"

    A list of virtual private cloud (VPC) subnets to use with instances in the game server group. By default, all Amazon GameLift Servers FleetIQ-supported Availability Zones are used. You can use this parameter to specify VPCs that you've set up. This property cannot be updated after the game server group is created, and the corresponding Auto Scaling group will always use the property value that is set with this request, even if the Auto Scaling group is updated directly.

    " }, "Tags":{ "shape":"TagList", @@ -3361,7 +3360,7 @@ "members":{ "GameServerGroup":{ "shape":"GameServerGroup", - "documentation":"

    The newly created game server group object, including the new ARN value for the Amazon GameLift FleetIQ game server group and the object's status. The Amazon EC2 Auto Scaling group ARN is initially null, since the group has not yet been created. This value is added once the game server group status reaches ACTIVE.

    " + "documentation":"

    The newly created game server group object, including the new ARN value for the Amazon GameLift Servers FleetIQ game server group and the object's status. The Amazon EC2 Auto Scaling group ARN is initially null, since the group has not yet been created. This value is added once the game server group status reaches ACTIVE.

    " } } }, @@ -3391,7 +3390,7 @@ }, "CreatorId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for a player or entity creating the game session.

    If you add a resource creation limit policy to a fleet, the CreateGameSession operation requires a CreatorId. Amazon GameLift limits the number of game session creation requests with the same CreatorId in a specified time period.

    If you your fleet doesn't have a resource creation limit policy and you provide a CreatorId in your CreateGameSession requests, Amazon GameLift limits requests to one request per CreatorId per second.

    To not limit CreateGameSession requests with the same CreatorId, don't provide a CreatorId in your CreateGameSession request.

    " + "documentation":"

    A unique identifier for a player or entity creating the game session.

    If you add a resource creation limit policy to a fleet, the CreateGameSession operation requires a CreatorId. Amazon GameLift Servers limits the number of game session creation requests with the same CreatorId in a specified time period.

    If you your fleet doesn't have a resource creation limit policy and you provide a CreatorId in your CreateGameSession requests, Amazon GameLift Servers limits requests to one request per CreatorId per second.

    To not limit CreateGameSession requests with the same CreatorId, don't provide a CreatorId in your CreateGameSession request.

    " }, "GameSessionId":{ "shape":"IdStringModel", @@ -3434,7 +3433,7 @@ }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", - "documentation":"

    A set of policies that enforce a sliding cap on player latency when processing game sessions placement requests. Use multiple policies to gradually relax the cap over time if Amazon GameLift can't make a placement. Policies are evaluated in order starting with the lowest maximum latency value.

    " + "documentation":"

    A set of policies that enforce a sliding cap on player latency when processing game sessions placement requests. Use multiple policies to gradually relax the cap over time if Amazon GameLift Servers can't make a placement. Policies are evaluated in order starting with the lowest maximum latency value.

    " }, "Destinations":{ "shape":"GameSessionQueueDestinationList", @@ -3513,7 +3512,7 @@ }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::gamesessionqueue/<queue name>. Queues can be located in any Region. Queues are used to start new Amazon GameLift-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode is set to STANDALONE, do not set this parameter.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::gamesessionqueue/<queue name>. Queues can be located in any Region. Queues are used to start new Amazon GameLift Servers-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode is set to STANDALONE, do not set this parameter.

    " }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", @@ -3553,11 +3552,11 @@ }, "BackfillMode":{ "shape":"BackfillMode", - "documentation":"

    The method used to backfill game sessions that are created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have Amazon GameLift create a backfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch. Automatic backfill is not available when FlexMatchMode is set to STANDALONE.

    " + "documentation":"

    The method used to backfill game sessions that are created with this matchmaking configuration. Specify MANUAL when your game manages backfill requests manually or does not use the match backfill feature. Specify AUTOMATIC to have Amazon GameLift Servers create a backfill request whenever a game session has one or more open slots. Learn more about manual and automatic backfill in Backfill Existing Games with FlexMatch. Automatic backfill is not available when FlexMatchMode is set to STANDALONE.

    " }, "FlexMatchMode":{ "shape":"FlexMatchMode", - "documentation":"

    Indicates whether this matchmaking configuration is being used with Amazon GameLift hosting or as a standalone matchmaking solution.

    • STANDALONE - FlexMatch forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event.

    • WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift queue to start a game session for the match.

    " + "documentation":"

    Indicates whether this matchmaking configuration is being used with Amazon GameLift Servers hosting or as a standalone matchmaking solution.

    • STANDALONE - FlexMatch forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event.

    • WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift Servers queue to start a game session for the match.

    " }, "Tags":{ "shape":"TagList", @@ -3622,7 +3621,7 @@ }, "PlayerData":{ "shape":"PlayerData", - "documentation":"

    Developer-defined information related to a player. Amazon GameLift does not use this data, so it can be formatted as needed for use in the game.

    " + "documentation":"

    Developer-defined information related to a player. Amazon GameLift Servers does not use this data, so it can be formatted as needed for use in the game.

    " } } }, @@ -3652,7 +3651,7 @@ }, "PlayerDataMap":{ "shape":"PlayerDataMap", - "documentation":"

    Map of string pairs, each specifying a player ID and a set of developer-defined information related to the player. Amazon GameLift does not use this data, so it can be formatted as needed for use in the game. Any player data strings for player IDs that are not included in the PlayerIds parameter are ignored.

    " + "documentation":"

    Map of string pairs, each specifying a player ID and a set of developer-defined information related to the player. Amazon GameLift Servers does not use this data, so it can be formatted as needed for use in the game. Any player data strings for player IDs that are not included in the PlayerIds parameter are ignored.

    " } } }, @@ -3678,7 +3677,7 @@ }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

    The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

    " + "documentation":"

    The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift Servers to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift Servers uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

    " }, "ZipFile":{ "shape":"ZipBlob", @@ -3695,7 +3694,7 @@ "members":{ "Script":{ "shape":"Script", - "documentation":"

    The newly created script record with a unique script ID and ARN. The new script's storage location reflects an Amazon S3 location: (1) If the script was uploaded from an S3 bucket under your account, the storage location reflects the information that was provided in the CreateScript request; (2) If the script file was uploaded from a local zip file, the storage location reflects an S3 location controls by the Amazon GameLift service.

    " + "documentation":"

    The newly created script record with a unique script ID and ARN. The new script's storage location reflects an Amazon S3 location: (1) If the script was uploaded from an S3 bucket under your account, the storage location reflects the information that was provided in the CreateScript request; (2) If the script file was uploaded from a local zip file, the storage location reflects an S3 location controls by the Amazon GameLift Servers service.

    " } } }, @@ -3708,11 +3707,11 @@ "members":{ "GameLiftAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for the Amazon Web Services account that you use to manage your Amazon GameLift fleet. You can find your Account ID in the Amazon Web Services Management Console under account settings.

    " + "documentation":"

    A unique identifier for the Amazon Web Services account that you use to manage your Amazon GameLift Servers fleet. You can find your Account ID in the Amazon Web Services Management Console under account settings.

    " }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the Amazon Web Services Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

    " + "documentation":"

    A unique identifier for a VPC with resources to be accessed by your Amazon GameLift Servers fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the Amazon Web Services Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Servers Fleets.

    " } } }, @@ -3735,22 +3734,21 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

    A unique identifier for the fleet. You can use either the fleet ID or ARN value. This tells Amazon GameLift which GameLift VPC to peer with.

    " + "documentation":"

    A unique identifier for the fleet. You can use either the fleet ID or ARN value. This tells Amazon GameLift Servers which GameLift VPC to peer with.

    " }, "PeerVpcAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for the Amazon Web Services account with the VPC that you want to peer your Amazon GameLift fleet with. You can find your Account ID in the Amazon Web Services Management Console under account settings.

    " + "documentation":"

    A unique identifier for the Amazon Web Services account with the VPC that you want to peer your Amazon GameLift Servers fleet with. You can find your Account ID in the Amazon Web Services Management Console under account settings.

    " }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the Amazon Web Services Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

    " + "documentation":"

    A unique identifier for a VPC with resources to be accessed by your Amazon GameLift Servers fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the Amazon Web Services Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Servers Fleets.

    " } } }, "CreateVpcPeeringConnectionOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "CustomEventData":{ "type":"string", @@ -3801,8 +3799,7 @@ }, "DeleteContainerFleetOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteContainerGroupDefinitionInput":{ "type":"structure", @@ -3824,8 +3821,7 @@ }, "DeleteContainerGroupDefinitionOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteFleetInput":{ "type":"structure", @@ -3863,7 +3859,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "LocationStates":{ "shape":"LocationStateList", @@ -3906,8 +3902,7 @@ }, "DeleteGameSessionQueueOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLocationInput":{ "type":"structure", @@ -3921,8 +3916,7 @@ }, "DeleteLocationOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteMatchmakingConfigurationInput":{ "type":"structure", @@ -3936,8 +3930,7 @@ }, "DeleteMatchmakingConfigurationOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteMatchmakingRuleSetInput":{ "type":"structure", @@ -3951,8 +3944,7 @@ }, "DeleteMatchmakingRuleSetOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteScalingPolicyInput":{ "type":"structure", @@ -3990,18 +3982,17 @@ "members":{ "GameLiftAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for the Amazon Web Services account that you use to manage your Amazon GameLift fleet. You can find your Account ID in the Amazon Web Services Management Console under account settings.

    " + "documentation":"

    A unique identifier for the Amazon Web Services account that you use to manage your Amazon GameLift Servers fleet. You can find your Account ID in the Amazon Web Services Management Console under account settings.

    " }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the Amazon Web Services Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

    " + "documentation":"

    A unique identifier for a VPC with resources to be accessed by your Amazon GameLift Servers fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the Amazon Web Services Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Servers Fleets.

    " } } }, "DeleteVpcPeeringAuthorizationOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteVpcPeeringConnectionInput":{ "type":"structure", @@ -4022,8 +4013,7 @@ }, "DeleteVpcPeeringConnectionOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeploymentConfiguration":{ "type":"structure", @@ -4104,8 +4094,7 @@ }, "DeregisterComputeOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeregisterGameServerInput":{ "type":"structure", @@ -4235,7 +4224,7 @@ "members":{ "EC2InstanceType":{ "shape":"EC2InstanceType", - "documentation":"

    Name of an Amazon EC2 instance type that is supported in Amazon GameLift. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. Do not specify a value for this parameter to retrieve limits for all instance types.

    " + "documentation":"

    Name of an Amazon EC2 instance type that is supported in Amazon GameLift Servers. A fleet instance type determines the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. Do not specify a value for this parameter to retrieve limits for all instance types.

    " }, "Location":{ "shape":"LocationStringModel", @@ -4409,7 +4398,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "LocationAttributes":{ "shape":"LocationAttributesList", @@ -4496,7 +4485,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "InboundPermissions":{ "shape":"IpPermissionsList", @@ -5007,8 +4996,7 @@ }, "DescribeVpcPeeringAuthorizationsInput":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeVpcPeeringAuthorizationsOutput":{ "type":"structure", @@ -5046,7 +5034,7 @@ }, "PlayerData":{ "shape":"PlayerData", - "documentation":"

    Developer-defined information related to a player. Amazon GameLift does not use this data, so it can be formatted as needed for use in the game.

    " + "documentation":"

    Developer-defined information related to a player. Amazon GameLift Servers does not use this data, so it can be formatted as needed for use in the game.

    " } }, "documentation":"

    Player information for use when creating player sessions using a game session placement request.

    " @@ -5069,7 +5057,7 @@ "members":{ "DESIRED":{ "shape":"WholeNumber", - "documentation":"

    Requested number of active instances. Amazon GameLift takes action as needed to maintain the desired number of instances. Capacity is scaled up or down by changing the desired instances. A change in the desired instances value can take up to 1 minute to be reflected when viewing a fleet's capacity settings.

    " + "documentation":"

    Requested number of active instances. Amazon GameLift Servers takes action as needed to maintain the desired number of instances. Capacity is scaled up or down by changing the desired instances. A change in the desired instances value can take up to 1 minute to be reflected when viewing a fleet's capacity settings.

    " }, "MINIMUM":{ "shape":"WholeNumber", @@ -5118,7 +5106,7 @@ "documentation":"

    An Amazon Web Services Region code, such as us-west-2.

    " } }, - "documentation":"

    The Amazon GameLift service limits for an Amazon EC2 instance type and current utilization. Amazon GameLift allows Amazon Web Services accounts a maximum number of instances, per instance type, per Amazon Web Services Region or location, for use with Amazon GameLift. You can request an limit increase for your account by using the Service limits page in the Amazon GameLift console.

    " + "documentation":"

    The Amazon GameLift Servers service limits for an Amazon EC2 instance type and current utilization. Amazon GameLift Servers allows Amazon Web Services accounts a maximum number of instances, per instance type, per Amazon Web Services Region or location, for use with Amazon GameLift Servers. You can request an limit increase for your account by using the Service limits page in the Amazon GameLift Servers console.

    " }, "EC2InstanceLimitList":{ "type":"list", @@ -5647,7 +5635,7 @@ }, "EventCode":{ "shape":"EventCode", - "documentation":"

    The type of event being logged.

    Fleet state transition events:

    • FLEET_CREATED -- A fleet resource was successfully created with a status of NEW. Event messaging includes the fleet ID.

    • FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. Amazon GameLift is downloading the compressed build and running install scripts.

    • FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING. Amazon GameLift has successfully installed build and is now validating the build files.

    • FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING. Amazon GameLift has successfully verified the build files and is now launching a fleet instance.

    • FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING. Amazon GameLift is launching a game server process on the fleet instance and is testing its connectivity with the Amazon GameLift service.

    • FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

    • FLEET_STATE_ERROR -- The Fleet's status changed to ERROR. Describe the fleet event message for more details.

    Fleet creation events (ordered by fleet creation activity):

    • FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.

    • FLEET_CREATION_EXTRACTING_BUILD -- The game server build was successfully downloaded to an instance, and Amazon GameLiftis now extracting the build files from the uploaded build. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_CREATION_RUNNING_INSTALLER -- The game server build files were successfully extracted, and Amazon GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_CREATION_COMPLETED_INSTALLER -- The game server build files were successfully installed and validation of the installation will begin soon.

    • FLEET_CREATION_FAILED_INSTALLER -- The installed failed while attempting to install the build files. This event indicates that the failure occurred before Amazon GameLift could start validation.

    • FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the GameLift is now verifying that the game server launch paths, which are specified in the fleet's runtime configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the runtime configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime configuration failed because the executable specified in a launch path does not exist on the instance.

    • FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime configuration failed because the executable specified in a launch path failed to run on the fleet instance.

    • FLEET_VALIDATION_TIMED_OUT -- Validation of the fleet at the end of creation timed out. Try fleet creation again.

    • FLEET_ACTIVATION_FAILED -- The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. For more information, see Debug Fleet Creation Issues.

    • FLEET_ACTIVATION_FAILED_NO_INSTANCES -- Fleet creation was not able to obtain any instances based on the input fleet attributes. Try again at a different time or choose a different combination of fleet attributes such as fleet type, instance type, etc.

    • FLEET_INITIALIZATION_FAILED -- A generic exception occurred during fleet creation. Describe the fleet event message for more details.

    VPC peering events:

    • FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your Amazon Web Services account.

    • FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your Amazon Web Services account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

    • FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.

    Spot instance events:

    • INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

    • INSTANCE_RECYCLED -- A spot instance was determined to have a high risk of interruption and is scheduled to be recycled once it has no active game sessions.

    Server process events:

    • SERVER_PROCESS_INVALID_PATH -- The game server executable or script could not be found based on the Fleet runtime configuration. Check that the launch path is correct based on the operating system of the Fleet.

    • SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT -- The server process did not call InitSDK() within the time expected (5 minutes). Check your game session log to see why InitSDK() was not called in time. This event is not emitted for managed container fleets and Anywhere fleets unless they're deployed with the Amazon GameLift Agent.

    • SERVER_PROCESS_PROCESS_READY_TIMEOUT -- The server process did not call ProcessReady() within the time expected (5 minutes) after calling InitSDK(). Check your game session log to see why ProcessReady() was not called in time.

    • SERVER_PROCESS_CRASHED -- The server process exited without calling ProcessEnding(). Check your game session log to see why ProcessEnding() was not called.

    • SERVER_PROCESS_TERMINATED_UNHEALTHY -- The server process did not report a valid health check for too long and was therefore terminated by GameLift. Check your game session log to see if the thread became stuck processing a synchronous task for too long.

    • SERVER_PROCESS_FORCE_TERMINATED -- The server process did not exit cleanly within the time expected after OnProcessTerminate() was sent. Check your game session log to see why termination took longer than expected.

    • SERVER_PROCESS_PROCESS_EXIT_TIMEOUT -- The server process did not exit cleanly within the time expected (30 seconds) after calling ProcessEnding(). Check your game session log to see why termination took longer than expected.

    Game session events:

    • GAME_SESSION_ACTIVATION_TIMEOUT -- GameSession failed to activate within the expected time. Check your game session log to see why ActivateGameSession() took longer to complete than expected.

    Other fleet events:

    • FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

    • FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

    • FLEET_DELETED -- A request to delete a fleet was initiated.

    • GENERIC_EVENT -- An unspecified event has occurred.

    " + "documentation":"

    The type of event being logged.

    Fleet state transition events:

    • FLEET_CREATED -- A fleet resource was successfully created with a status of NEW. Event messaging includes the fleet ID.

    • FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. Amazon GameLift Servers is downloading the compressed build and running install scripts.

    • FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING. Amazon GameLift Servers has successfully installed build and is now validating the build files.

    • FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING to BUILDING. Amazon GameLift Servers has successfully verified the build files and is now launching a fleet instance.

    • FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING. Amazon GameLift Servers is launching a game server process on the fleet instance and is testing its connectivity with the Amazon GameLift Servers service.

    • FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The fleet is now ready to host game sessions.

    • FLEET_STATE_ERROR -- The Fleet's status changed to ERROR. Describe the fleet event message for more details.

    Fleet creation events (ordered by fleet creation activity):

    • FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.

    • FLEET_CREATION_EXTRACTING_BUILD -- The game server build was successfully downloaded to an instance, and Amazon GameLift Serversis now extracting the build files from the uploaded build. Failure at this stage prevents a fleet from moving to ACTIVE status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_CREATION_RUNNING_INSTALLER -- The game server build files were successfully extracted, and Amazon GameLift Servers is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_CREATION_COMPLETED_INSTALLER -- The game server build files were successfully installed and validation of the installation will begin soon.

    • FLEET_CREATION_FAILED_INSTALLER -- The installed failed while attempting to install the build files. This event indicates that the failure occurred before Amazon GameLift Servers could start validation.

    • FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the GameLift is now verifying that the game server launch paths, which are specified in the fleet's runtime configuration, exist. If any listed launch path exists, Amazon GameLift Servers tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE status. Logs for this stage list the launch paths in the runtime configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.

    • FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime configuration failed because the executable specified in a launch path does not exist on the instance.

    • FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the runtime configuration failed because the executable specified in a launch path failed to run on the fleet instance.

    • FLEET_VALIDATION_TIMED_OUT -- Validation of the fleet at the end of creation timed out. Try fleet creation again.

    • FLEET_ACTIVATION_FAILED -- The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. For more information, see Debug Fleet Creation Issues.

    • FLEET_ACTIVATION_FAILED_NO_INSTANCES -- Fleet creation was not able to obtain any instances based on the input fleet attributes. Try again at a different time or choose a different combination of fleet attributes such as fleet type, instance type, etc.

    • FLEET_INITIALIZATION_FAILED -- A generic exception occurred during fleet creation. Describe the fleet event message for more details.

    VPC peering events:

    • FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift Servers fleet and a VPC in your Amazon Web Services account.

    • FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your Amazon Web Services account. For more information on VPC peering failures, see https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html

    • FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.

    Spot instance events:

    • INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.

    • INSTANCE_RECYCLED -- A spot instance was determined to have a high risk of interruption and is scheduled to be recycled once it has no active game sessions.

    Server process events:

    • SERVER_PROCESS_INVALID_PATH -- The game server executable or script could not be found based on the Fleet runtime configuration. Check that the launch path is correct based on the operating system of the Fleet.

    • SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT -- The server process did not call InitSDK() within the time expected (5 minutes). Check your game session log to see why InitSDK() was not called in time. This event is not emitted for managed container fleets and Anywhere fleets unless they're deployed with the Amazon GameLift Servers Agent.

    • SERVER_PROCESS_PROCESS_READY_TIMEOUT -- The server process did not call ProcessReady() within the time expected (5 minutes) after calling InitSDK(). Check your game session log to see why ProcessReady() was not called in time.

    • SERVER_PROCESS_CRASHED -- The server process exited without calling ProcessEnding(). Check your game session log to see why ProcessEnding() was not called.

    • SERVER_PROCESS_TERMINATED_UNHEALTHY -- The server process did not report a valid health check for too long and was therefore terminated by GameLift. Check your game session log to see if the thread became stuck processing a synchronous task for too long.

    • SERVER_PROCESS_FORCE_TERMINATED -- The server process did not exit cleanly within the time expected after OnProcessTerminate() was sent. Check your game session log to see why termination took longer than expected.

    • SERVER_PROCESS_PROCESS_EXIT_TIMEOUT -- The server process did not exit cleanly within the time expected (30 seconds) after calling ProcessEnding(). Check your game session log to see why termination took longer than expected.

    Game session events:

    • GAME_SESSION_ACTIVATION_TIMEOUT -- GameSession failed to activate within the expected time. Check your game session log to see why ActivateGameSession() took longer to complete than expected.

    Other fleet events:

    • FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.

    • FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.

    • FLEET_DELETED -- A request to delete a fleet was initiated.

    • GENERIC_EVENT -- An unspecified event has occurred.

    " }, "Message":{ "shape":"NonEmptyString", @@ -5659,14 +5647,14 @@ }, "PreSignedLogUrl":{ "shape":"NonZeroAndMaxString", - "documentation":"

    Location of stored logs with additional detail that is related to the event. This is useful for debugging issues. The URL is valid for 15 minutes. You can also access fleet creation logs through the Amazon GameLift console.

    " + "documentation":"

    Location of stored logs with additional detail that is related to the event. This is useful for debugging issues. The URL is valid for 15 minutes. You can also access fleet creation logs through the Amazon GameLift Servers console.

    " }, "Count":{ "shape":"EventCount", "documentation":"

    The number of times that this event occurred.

    " } }, - "documentation":"

    Log entry describing an event that involves Amazon GameLift resources (such as a fleet). In addition to tracking activity, event codes and messages can provide additional information for troubleshooting and debugging problems.

    " + "documentation":"

    Log entry describing an event that involves Amazon GameLift Servers resources (such as a fleet). In addition to tracking activity, event codes and messages can provide additional information for troubleshooting and debugging problems.

    " }, "EventCode":{ "type":"string", @@ -5784,7 +5772,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. In a GameLift fleet ARN, the resource ID matches the FleetId value.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912. In a GameLift fleet ARN, the resource ID matches the FleetId value.

    " }, "FleetType":{ "shape":"FleetType", @@ -5812,7 +5800,7 @@ }, "Status":{ "shape":"FleetStatus", - "documentation":"

    Current status of the fleet. Possible fleet statuses include the following:

    • NEW -- A new fleet resource has been defined and Amazon GameLift has started creating the fleet. Desired instances is set to 1.

    • DOWNLOADING/VALIDATING/BUILDING -- Amazon GameLift is download the game server build, running install scripts, and then validating the build files. When complete, Amazon GameLift launches a fleet instance.

    • ACTIVATING -- Amazon GameLift is launching a game server process and testing its connectivity with the Amazon GameLift service.

    • ACTIVE -- The fleet is now ready to host game sessions.

    • ERROR -- An error occurred when downloading, validating, building, or activating the fleet.

    • DELETING -- Hosts are responding to a delete fleet request.

    • TERMINATED -- The fleet no longer exists.

    " + "documentation":"

    Current status of the fleet. Possible fleet statuses include the following:

    • NEW -- A new fleet resource has been defined and Amazon GameLift Servers has started creating the fleet. Desired instances is set to 1.

    • DOWNLOADING/VALIDATING/BUILDING -- Amazon GameLift Servers is download the game server build, running install scripts, and then validating the build files. When complete, Amazon GameLift Servers launches a fleet instance.

    • ACTIVATING -- Amazon GameLift Servers is launching a game server process and testing its connectivity with the Amazon GameLift Servers service.

    • ACTIVE -- The fleet is now ready to host game sessions.

    • ERROR -- An error occurred when downloading, validating, building, or activating the fleet.

    • DELETING -- Hosts are responding to a delete fleet request.

    • TERMINATED -- The fleet no longer exists.

    " }, "BuildId":{ "shape":"BuildId", @@ -5820,7 +5808,7 @@ }, "BuildArn":{ "shape":"BuildArn", - "documentation":"

    The Amazon Resource Name (ARN) associated with the Amazon GameLift build resource that is deployed on instances in this fleet. In a GameLift build ARN, the resource ID matches the BuildId value. This attribute is used with fleets where ComputeType is \"EC2\".

    " + "documentation":"

    The Amazon Resource Name (ARN) associated with the Amazon GameLift Servers build resource that is deployed on instances in this fleet. In a GameLift build ARN, the resource ID matches the BuildId value. This attribute is used with fleets where ComputeType is \"EC2\".

    " }, "ScriptId":{ "shape":"ScriptId", @@ -5840,7 +5828,7 @@ }, "LogPaths":{ "shape":"StringList", - "documentation":"

    This parameter is no longer used. Game session log paths are now defined using the Amazon GameLift server API ProcessReady() logParameters. See more information in the Server API Reference.

    " + "documentation":"

    This parameter is no longer used. Game session log paths are now defined using the Amazon GameLift Servers server API ProcessReady() logParameters. See more information in the Server API Reference.

    " }, "NewGameSessionProtectionPolicy":{ "shape":"ProtectionPolicy", @@ -5848,7 +5836,7 @@ }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

    The operating system of the fleet's computing resources. A fleet's operating system is determined by the OS of the build or script that is deployed on this fleet. This attribute is used with fleets where ComputeType is EC2.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " + "documentation":"

    The operating system of the fleet's computing resources. A fleet's operating system is determined by the OS of the build or script that is deployed on this fleet. This attribute is used with fleets where ComputeType is EC2.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift Servers, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " }, "ResourceCreationLimitPolicy":{"shape":"ResourceCreationLimitPolicy"}, "MetricGroups":{ @@ -5869,7 +5857,7 @@ }, "ComputeType":{ "shape":"ComputeType", - "documentation":"

    The type of compute resource used to host your game servers. You can use your own compute resources with Amazon GameLift Anywhere or use Amazon EC2 instances with managed Amazon GameLift.

    " + "documentation":"

    The type of compute resource used to host your game servers. You can use your own compute resources with Amazon GameLift Servers Anywhere or use Amazon EC2 instances with managed Amazon GameLift Servers.

    " }, "AnywhereConfiguration":{ "shape":"AnywhereConfiguration", @@ -5880,7 +5868,7 @@ "documentation":"

    Indicates that fleet instances maintain a shared credentials file for the IAM role defined in InstanceRoleArn. Shared credentials allow applications that are deployed with the game server executable to communicate with other Amazon Web Services resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see Communicate with other Amazon Web Services resources from your fleets. This attribute is used with fleets where ComputeType is EC2.

    " } }, - "documentation":"

    Describes an Amazon GameLift fleet of game hosting resources. Attributes differ based on the fleet's compute type, as follows:

    • EC2 fleet attributes identify a Build resource (for fleets with customer game server builds) or a Script resource (for Amazon GameLift Realtime fleets).

    • Amazon GameLift Anywhere fleets have an abbreviated set of attributes, because most fleet configurations are set directly on the fleet's computes. Attributes include fleet identifiers and descriptive properties, creation/termination time, and fleet status.

    Returned by: https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetAttributes

    " + "documentation":"

    Describes an Amazon GameLift Servers fleet of game hosting resources. Attributes differ based on the fleet's compute type, as follows:

    • EC2 fleet attributes identify a Build resource (for fleets with customer game server builds) or a Script resource (for Amazon GameLift Servers Realtime fleets).

    • Amazon GameLift Servers Anywhere fleets have an abbreviated set of attributes, because most fleet configurations are set directly on the fleet's computes. Attributes include fleet identifiers and descriptive properties, creation/termination time, and fleet status.

    Returned by: https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetAttributes

    " }, "FleetAttributesList":{ "type":"list", @@ -5901,7 +5889,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "InstanceType":{ "shape":"EC2InstanceType", @@ -5951,7 +5939,7 @@ }, "RollbackGameServerBinaryArn":{ "shape":"FleetBinaryArn", - "documentation":"

    The unique identifier for the version of the game server container group definition to roll back to if deployment fails. Amazon GameLift sets this property to the container group definition version that the fleet used when it was last active.

    " + "documentation":"

    The unique identifier for the version of the game server container group definition to roll back to if deployment fails. Amazon GameLift Servers sets this property to the container group definition version that the fleet used when it was last active.

    " }, "PerInstanceBinaryArn":{ "shape":"FleetBinaryArn", @@ -5959,7 +5947,7 @@ }, "RollbackPerInstanceBinaryArn":{ "shape":"FleetBinaryArn", - "documentation":"

    The unique identifier for the version of the per-instance container group definition to roll back to if deployment fails. Amazon GameLift sets this property to the container group definition version that the fleet used when it was last active.

    " + "documentation":"

    The unique identifier for the version of the per-instance container group definition to roll back to if deployment fails. Amazon GameLift Servers sets this property to the container group definition version that the fleet used when it was last active.

    " }, "DeploymentStatus":{ "shape":"DeploymentStatus", @@ -6033,7 +6021,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "ActiveServerProcessCount":{ "shape":"WholeNumber", @@ -6160,7 +6148,7 @@ "documentation":"

    Timestamp that indicates the last time the game server was updated with health status. The format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\"). After game server registration, this property is only changed when a game server update specifies a health check value.

    " } }, - "documentation":"

    This data type is used with the Amazon GameLift FleetIQ and game server groups.

    Properties describing a game server that is running on an instance in a game server group.

    A game server is created by a successful call to RegisterGameServer and deleted by calling DeregisterGameServer. A game server is claimed to host a game session by calling ClaimGameServer.

    " + "documentation":"

    This data type is used with the Amazon GameLift Servers FleetIQ and game server groups.

    Properties describing a game server that is running on an instance in a game server group.

    A game server is created by a successful call to RegisterGameServer and deleted by calling DeregisterGameServer. A game server is claimed to host a game session by calling ClaimGameServer.

    " }, "GameServerClaimStatus":{ "type":"string", @@ -6193,11 +6181,11 @@ }, "ImageUri":{ "shape":"ImageUriString", - "documentation":"

    The URI to the image that Amazon GameLift uses when deploying this container to a container fleet. For a more specific identifier, see ResolvedImageDigest.

    " + "documentation":"

    The URI to the image that Amazon GameLift Servers uses when deploying this container to a container fleet. For a more specific identifier, see ResolvedImageDigest.

    " }, "PortConfiguration":{ "shape":"ContainerPortConfiguration", - "documentation":"

    The set of ports that are available to bind to processes in the container. For example, a game server process requires a container port to allow game clients to connect to it. Container ports aren't directly accessed by inbound traffic. Amazon GameLift maps these container ports to externally accessible connection ports, which are assigned as needed from the container fleet's ConnectionPortRange.

    " + "documentation":"

    The set of ports that are available to bind to processes in the container. For example, a game server process requires a container port to allow game clients to connect to it. Container ports aren't directly accessed by inbound traffic. Amazon GameLift Servers maps these container ports to externally accessible connection ports, which are assigned as needed from the container fleet's ConnectionPortRange.

    " }, "ResolvedImageDigest":{ "shape":"Sha256", @@ -6205,7 +6193,7 @@ }, "ServerSdkVersion":{ "shape":"ServerSdkVersion", - "documentation":"

    The Amazon GameLift server SDK version that the game server is integrated with. Only game servers using 5.2.0 or higher are compatible with container fleets.

    " + "documentation":"

    The Amazon GameLift Servers server SDK version that the game server is integrated with. Only game servers using 5.2.0 or higher are compatible with container fleets.

    " } }, "documentation":"

    Describes the game server container in an existing game server container group. A game server container identifies a container image with your game server build. A game server container is automatically considered essential; if an essential container fails, the entire container group restarts.

    You can update a container definition and deploy the updates to an existing fleet. When creating or updating a game server container group definition, use the property https://docs.aws.amazon.com/gamelift/latest/apireference/API_GameServerContainerDefinitionInput.

    Part of: ContainerGroupDefinition

    Returned by: DescribeContainerGroupDefinition, ListContainerGroupDefinitions, UpdateContainerGroupDefinition

    " @@ -6237,15 +6225,15 @@ }, "ImageUri":{ "shape":"ImageUriString", - "documentation":"

    The location of the container image to deploy to a container fleet. Provide an image in an Amazon Elastic Container Registry public or private repository. The repository must be in the same Amazon Web Services account and Amazon Web Services Region where you're creating the container group definition. For limits on image size, see Amazon GameLift endpoints and quotas. You can use any of the following image URI formats:

    • Image ID only: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]

    • Image ID and digest: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]@[digest]

    • Image ID and tag: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]:[tag]

    " + "documentation":"

    The location of the container image to deploy to a container fleet. Provide an image in an Amazon Elastic Container Registry public or private repository. The repository must be in the same Amazon Web Services account and Amazon Web Services Region where you're creating the container group definition. For limits on image size, see Amazon GameLift Servers endpoints and quotas. You can use any of the following image URI formats:

    • Image ID only: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]

    • Image ID and digest: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]@[digest]

    • Image ID and tag: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]:[tag]

    " }, "PortConfiguration":{ "shape":"ContainerPortConfiguration", - "documentation":"

    A set of ports that Amazon GameLift can assign to processes in the container. Processes, must be assigned a container port to accept inbound traffic connections. For example, a game server process requires a container port to allow game clients to connect to it. Container ports aren't directly accessed by inbound traffic. Instead, Amazon GameLift maps container ports to externally accessible connection ports (see the container fleet property ConnectionPortRange).

    " + "documentation":"

    A set of ports that Amazon GameLift Servers can assign to processes in a container. The container port configuration must have enough ports for each container process that accepts inbound traffic connections. For example, a game server process requires a container port to allow game clients to connect to it. A container port configuration can have can have one or more container port ranges. Each range specifies starting and ending values as well as the supported network protocol.

    Container ports aren't directly accessed by inbound traffic. Amazon GameLift Servers maps each container port to an externally accessible connection port (see the container fleet property ConnectionPortRange).

    " }, "ServerSdkVersion":{ "shape":"ServerSdkVersion", - "documentation":"

    The Amazon GameLift server SDK version that the game server is integrated with. Only game servers using 5.2.0 or higher are compatible with container fleets.

    " + "documentation":"

    The Amazon GameLift Servers server SDK version that the game server is integrated with. Only game servers using 5.2.0 or higher are compatible with container fleets.

    " } }, "documentation":"

    Describes the configuration for a container that runs your game server executable. This definition includes container configuration, resources, and start instructions. Use this data type when creating or updating a game server container group definition. For properties of a deployed container, see GameServerContainerDefinition. A game server container is automatically considered essential; if an essential container fails, the entire container group restarts.

    Use with: CreateContainerGroupDefinition, UpdateContainerGroupDefinition

    " @@ -6296,15 +6284,15 @@ }, "RoleArn":{ "shape":"IamRoleArn", - "documentation":"

    The Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift to access your Amazon EC2 Auto Scaling groups.

    " + "documentation":"

    The Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift Servers to access your Amazon EC2 Auto Scaling groups.

    " }, "InstanceDefinitions":{ "shape":"InstanceDefinitions", - "documentation":"

    The set of Amazon EC2 instance types that Amazon GameLift FleetIQ can use when balancing and automatically scaling instances in the corresponding Auto Scaling group.

    " + "documentation":"

    The set of Amazon EC2 instance types that Amazon GameLift Servers FleetIQ can use when balancing and automatically scaling instances in the corresponding Auto Scaling group.

    " }, "BalancingStrategy":{ "shape":"BalancingStrategy", - "documentation":"

    Indicates how Amazon GameLift FleetIQ balances the use of Spot Instances and On-Demand Instances in the game server group. Method options include the following:

    • SPOT_ONLY - Only Spot Instances are used in the game server group. If Spot Instances are unavailable or not viable for game hosting, the game server group provides no hosting capacity until Spot Instances can again be used. Until then, no new instances are started, and the existing nonviable Spot Instances are terminated (after current gameplay ends) and are not replaced.

    • SPOT_PREFERRED - (default value) Spot Instances are used whenever available in the game server group. If Spot Instances are unavailable, the game server group continues to provide hosting capacity by falling back to On-Demand Instances. Existing nonviable Spot Instances are terminated (after current gameplay ends) and are replaced with new On-Demand Instances.

    • ON_DEMAND_ONLY - Only On-Demand Instances are used in the game server group. No Spot Instances are used, even when available, while this balancing strategy is in force.

    " + "documentation":"

    Indicates how Amazon GameLift Servers FleetIQ balances the use of Spot Instances and On-Demand Instances in the game server group. Method options include the following:

    • SPOT_ONLY - Only Spot Instances are used in the game server group. If Spot Instances are unavailable or not viable for game hosting, the game server group provides no hosting capacity until Spot Instances can again be used. Until then, no new instances are started, and the existing nonviable Spot Instances are terminated (after current gameplay ends) and are not replaced.

    • SPOT_PREFERRED - (default value) Spot Instances are used whenever available in the game server group. If Spot Instances are unavailable, the game server group continues to provide hosting capacity by falling back to On-Demand Instances. Existing nonviable Spot Instances are terminated (after current gameplay ends) and are replaced with new On-Demand Instances.

    • ON_DEMAND_ONLY - Only On-Demand Instances are used in the game server group. No Spot Instances are used, even when available, while this balancing strategy is in force.

    " }, "GameServerProtectionPolicy":{ "shape":"GameServerProtectionPolicy", @@ -6316,7 +6304,7 @@ }, "Status":{ "shape":"GameServerGroupStatus", - "documentation":"

    The current status of the game server group. Possible statuses include:

    • NEW - Amazon GameLift FleetIQ has validated the CreateGameServerGroup() request.

    • ACTIVATING - Amazon GameLift FleetIQ is setting up a game server group, which includes creating an Auto Scaling group in your Amazon Web Services account.

    • ACTIVE - The game server group has been successfully created.

    • DELETE_SCHEDULED - A request to delete the game server group has been received.

    • DELETING - Amazon GameLift FleetIQ has received a valid DeleteGameServerGroup() request and is processing it. Amazon GameLift FleetIQ must first complete and release hosts before it deletes the Auto Scaling group and the game server group.

    • DELETED - The game server group has been successfully deleted.

    • ERROR - The asynchronous processes of activating or deleting a game server group has failed, resulting in an error state.

    " + "documentation":"

    The current status of the game server group. Possible statuses include:

    • NEW - Amazon GameLift Servers FleetIQ has validated the CreateGameServerGroup() request.

    • ACTIVATING - Amazon GameLift Servers FleetIQ is setting up a game server group, which includes creating an Auto Scaling group in your Amazon Web Services account.

    • ACTIVE - The game server group has been successfully created.

    • DELETE_SCHEDULED - A request to delete the game server group has been received.

    • DELETING - Amazon GameLift Servers FleetIQ has received a valid DeleteGameServerGroup() request and is processing it. Amazon GameLift Servers FleetIQ must first complete and release hosts before it deletes the Auto Scaling group and the game server group.

    • DELETED - The game server group has been successfully deleted.

    • ERROR - The asynchronous processes of activating or deleting a game server group has failed, resulting in an error state.

    " }, "StatusReason":{ "shape":"NonZeroAndMaxString", @@ -6335,7 +6323,7 @@ "documentation":"

    A timestamp that indicates when this game server group was last updated.

    " } }, - "documentation":"

    This data type is used with the Amazon GameLift FleetIQ and game server groups.

    Properties that describe a game server group resource. A game server group manages certain properties related to a corresponding Amazon EC2 Auto Scaling group.

    A game server group is created by a successful call to CreateGameServerGroup and deleted by calling DeleteGameServerGroup. Game server group activity can be temporarily suspended and resumed by calling SuspendGameServerGroup and ResumeGameServerGroup, respectively.

    " + "documentation":"

    This data type is used with the Amazon GameLift Servers FleetIQ and game server groups.

    Properties that describe a game server group resource. A game server group manages certain properties related to a corresponding Amazon EC2 Auto Scaling group.

    A game server group is created by a successful call to CreateGameServerGroup and deleted by calling DeleteGameServerGroup. Game server group activity can be temporarily suspended and resumed by calling SuspendGameServerGroup and ResumeGameServerGroup, respectively.

    " }, "GameServerGroupAction":{ "type":"string", @@ -6359,14 +6347,14 @@ "members":{ "EstimatedInstanceWarmup":{ "shape":"PositiveInteger", - "documentation":"

    Length of time, in seconds, it takes for a new instance to start new game server processes and register with Amazon GameLift FleetIQ. Specifying a warm-up time can be useful, particularly with game servers that take a long time to start up, because it avoids prematurely starting new instances.

    " + "documentation":"

    Length of time, in seconds, it takes for a new instance to start new game server processes and register with Amazon GameLift Servers FleetIQ. Specifying a warm-up time can be useful, particularly with game servers that take a long time to start up, because it avoids prematurely starting new instances.

    " }, "TargetTrackingConfiguration":{ "shape":"TargetTrackingConfiguration", - "documentation":"

    Settings for a target-based scaling policy applied to Auto Scaling group. These settings are used to create a target-based policy that tracks the Amazon GameLift FleetIQ metric \"PercentUtilizedGameServers\" and specifies a target value for the metric. As player usage changes, the policy triggers to adjust the game server group capacity so that the metric returns to the target value.

    " + "documentation":"

    Settings for a target-based scaling policy applied to Auto Scaling group. These settings are used to create a target-based policy that tracks the Amazon GameLift Servers FleetIQ metric \"PercentUtilizedGameServers\" and specifies a target value for the metric. As player usage changes, the policy triggers to adjust the game server group capacity so that the metric returns to the target value.

    " } }, - "documentation":"

    This data type is used with the Amazon GameLift FleetIQ and game server groups.

    Configuration settings for intelligent automatic scaling that uses target tracking. These settings are used to add an Auto Scaling policy when creating the corresponding Auto Scaling group. After the Auto Scaling group is created, all updates to Auto Scaling policies, including changing this policy and adding or removing other policies, is done directly on the Auto Scaling group.

    " + "documentation":"

    This data type is used with the Amazon GameLift Servers FleetIQ and game server groups.

    Configuration settings for intelligent automatic scaling that uses target tracking. These settings are used to add an Auto Scaling policy when creating the corresponding Auto Scaling group. After the Auto Scaling group is created, all updates to Auto Scaling policies, including changing this policy and adding or removing other policies, is done directly on the Auto Scaling group.

    " }, "GameServerGroupDeleteOption":{ "type":"string", @@ -6527,7 +6515,7 @@ "documentation":"

    Current status of the game server instance

    " } }, - "documentation":"

    This data type is used with the Amazon GameLift FleetIQ and game server groups.

    Additional properties, including status, that describe an EC2 instance in a game server group. Instance configurations are set with game server group properties (see DescribeGameServerGroup and with the EC2 launch template that was used when creating the game server group.

    Retrieve game server instances for a game server group by calling DescribeGameServerInstances.

    " + "documentation":"

    This data type is used with the Amazon GameLift Servers FleetIQ and game server groups.

    Additional properties, including status, that describe an EC2 instance in a game server group. Instance configurations are set with game server group properties (see DescribeGameServerGroup and with the EC2 launch template that was used when creating the game server group.

    Retrieve game server instances for a game server group by calling DescribeGameServerInstances.

    " }, "GameServerInstanceId":{ "type":"string", @@ -6620,7 +6608,7 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

    " + "documentation":"

    The IP address of the game session. To connect to a Amazon GameLift Servers game server, an app needs both the IP address and port number.

    " }, "DnsName":{ "shape":"DnsName", @@ -6628,7 +6616,7 @@ }, "Port":{ "shape":"PortNumber", - "documentation":"

    The port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

    " + "documentation":"

    The port number for the game session. To connect to a Amazon GameLift Servers game server, an app needs both the IP address and port number.

    " }, "PlayerSessionCreationPolicy":{ "shape":"PlayerSessionCreationPolicy", @@ -6651,7 +6639,7 @@ "documentation":"

    The fleet location where the game session is running. This value might specify the fleet's home Region or a remote location. Location is expressed as an Amazon Web Services Region code such as us-west-2.

    " } }, - "documentation":"

    Properties describing a game session.

    A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

    Amazon GameLift retains a game session resource for 30 days after the game session ends. You can reuse idempotency token values after this time. Game session logs are retained for 14 days.

    All APIs by task

    " + "documentation":"

    Properties describing a game session.

    A game session in ACTIVE status can host players. When a game session ends, its status is set to TERMINATED.

    Amazon GameLift Servers retains a game session resource for 30 days after the game session ends. You can reuse idempotency token values after this time. Game session logs are retained for 14 days.

    All APIs by task

    " }, "GameSessionActivationTimeoutSeconds":{ "type":"integer", @@ -6667,7 +6655,7 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

    " + "documentation":"

    The IP address of the game session. To connect to a Amazon GameLift Servers game server, an app needs both the IP address and port number.

    " }, "DnsName":{ "shape":"DnsName", @@ -6675,7 +6663,7 @@ }, "Port":{ "shape":"PositiveInteger", - "documentation":"

    The port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

    " + "documentation":"

    The port number for the game session. To connect to a Amazon GameLift Servers game server, an app needs both the IP address and port number.

    " }, "MatchedPlayerSessions":{ "shape":"MatchedPlayerSessionList", @@ -6689,14 +6677,14 @@ "members":{ "NewGameSessionsPerCreator":{ "shape":"WholeNumber", - "documentation":"

    A policy that puts limits on the number of game sessions that a player can create within a specified span of time. With this policy, you can control players' ability to consume available resources.

    The policy evaluates when a player tries to create a new game session. On receiving a CreateGameSession request, Amazon GameLift checks that the player (identified by CreatorId) has created fewer than game session limit in the specified time period.

    " + "documentation":"

    A policy that puts limits on the number of game sessions that a player can create within a specified span of time. With this policy, you can control players' ability to consume available resources.

    The policy evaluates when a player tries to create a new game session. On receiving a CreateGameSession request, Amazon GameLift Servers checks that the player (identified by CreatorId) has created fewer than game session limit in the specified time period.

    " }, "PolicyPeriodInMinutes":{ "shape":"WholeNumber", "documentation":"

    The time span used in evaluating the resource creation limit policy.

    " } }, - "documentation":"

    A policy that puts limits on the number of game sessions that a player can create within a specified span of time. With this policy, you can control players' ability to consume available resources.

    The policy is evaluated when a player tries to create a new game session. On receiving a CreateGameSession request, Amazon GameLift checks that the player (identified by CreatorId) has created fewer than game session limit in the specified time period.

    " + "documentation":"

    A policy that puts limits on the number of game sessions that a player can create within a specified span of time. With this policy, you can control players' ability to consume available resources.

    The policy is evaluated when a player tries to create a new game session. On receiving a CreateGameSession request, Amazon GameLift Servers checks that the player (identified by CreatorId) has created fewer than game session limit in the specified time period.

    " }, "GameSessionData":{ "type":"string", @@ -6746,7 +6734,7 @@ }, "Status":{ "shape":"GameSessionPlacementState", - "documentation":"

    Current status of the game session placement request.

    • PENDING -- The placement request is in the queue waiting to be processed. Game session properties are not yet final.

    • FULFILLED -- A new game session has been successfully placed. Game session properties are now final.

    • CANCELLED -- The placement request was canceled.

    • TIMED_OUT -- A new game session was not successfully created before the time limit expired. You can resubmit the placement request as needed.

    • FAILED -- Amazon GameLift is not able to complete the process of placing the game session. Common reasons are the game session terminated before the placement process was completed, or an unexpected internal error.

    " + "documentation":"

    Current status of the game session placement request.

    • PENDING -- The placement request is in the queue waiting to be processed. Game session properties are not yet final.

    • FULFILLED -- A new game session has been successfully placed. Game session properties are now final.

    • CANCELLED -- The placement request was canceled.

    • TIMED_OUT -- A new game session was not successfully created before the time limit expired. You can resubmit the placement request as needed.

    • FAILED -- Amazon GameLift Servers is not able to complete the process of placing the game session. Common reasons are the game session terminated before the placement process was completed, or an unexpected internal error.

    " }, "GameProperties":{ "shape":"GamePropertyList", @@ -6786,7 +6774,7 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value isn't final until placement status is FULFILLED.

    " + "documentation":"

    The IP address of the game session. To connect to a Amazon GameLift Servers game server, an app needs both the IP address and port number. This value isn't final until placement status is FULFILLED.

    " }, "DnsName":{ "shape":"DnsName", @@ -6794,7 +6782,7 @@ }, "Port":{ "shape":"PortNumber", - "documentation":"

    The port number for the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number. This value isn't final until placement status is FULFILLED.

    " + "documentation":"

    The port number for the game session. To connect to a Amazon GameLift Servers game server, an app needs both the IP address and port number. This value isn't final until placement status is FULFILLED.

    " }, "PlacedPlayerSessions":{ "shape":"PlacedPlayerSessionList", @@ -6810,10 +6798,10 @@ }, "PriorityConfigurationOverride":{ "shape":"PriorityConfigurationOverride", - "documentation":"

    An alternative priority list of locations that's included with a game session placement request. When provided, the list overrides a queue's location order list for this game session placement request only. The list might include Amazon Web Services Regions, local zones, and custom locations (for Anywhere fleets). The fallback strategy tells Amazon GameLift what action to take (if any) in the event that it failed to place a new game session.

    " + "documentation":"

    An alternative priority list of locations that's included with a game session placement request. When provided, the list overrides a queue's location order list for this game session placement request only. The list might include Amazon Web Services Regions, local zones, and custom locations (for Anywhere fleets). The fallback strategy tells Amazon GameLift Servers what action to take (if any) in the event that it failed to place a new game session.

    " } }, - "documentation":"

    Represents a potential game session placement, including the full details of the original placement request and the current status.

    If the game session placement status is PENDING, the properties for game session ID/ARN, region, IP address/DNS, and port aren't final. A game session is not active and ready to accept players until placement status reaches FULFILLED. When the placement is in PENDING status, Amazon GameLift may attempt to place a game session multiple times before succeeding. With each attempt it creates a https://docs.aws.amazon.com/gamelift/latest/apireference/API_GameSession object and updates this placement object with the new game session properties.

    " + "documentation":"

    Represents a potential game session placement, including the full details of the original placement request and the current status.

    If the game session placement status is PENDING, the properties for game session ID/ARN, region, IP address/DNS, and port aren't final. A game session is not active and ready to accept players until placement status reaches FULFILLED. When the placement is in PENDING status, Amazon GameLift Servers may attempt to place a game session multiple times before succeeding. With each attempt it creates a https://docs.aws.amazon.com/gamelift/latest/apireference/API_GameSession object and updates this placement object with the new game session properties.

    " }, "GameSessionPlacementState":{ "type":"string", @@ -6834,7 +6822,7 @@ }, "GameSessionQueueArn":{ "shape":"GameSessionQueueArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::gamesessionqueue/<queue name>. In a Amazon GameLift game session queue ARN, the resource ID matches the Name value.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::gamesessionqueue/<queue name>. In a Amazon GameLift Servers game session queue ARN, the resource ID matches the Name value.

    " }, "TimeoutInSeconds":{ "shape":"WholeNumber", @@ -6842,7 +6830,7 @@ }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", - "documentation":"

    A set of policies that enforce a sliding cap on player latency when processing game sessions placement requests. Use multiple policies to gradually relax the cap over time if Amazon GameLift can't make a placement. Policies are evaluated in order starting with the lowest maximum latency value.

    " + "documentation":"

    A set of policies that enforce a sliding cap on player latency when processing game sessions placement requests. Use multiple policies to gradually relax the cap over time if Amazon GameLift Servers can't make a placement. Policies are evaluated in order starting with the lowest maximum latency value.

    " }, "Destinations":{ "shape":"GameSessionQueueDestinationList", @@ -6951,7 +6939,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "ComputeName":{ "shape":"ComputeNameOrArn", @@ -6959,7 +6947,7 @@ }, "ComputeArn":{ "shape":"ComputeArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to an Amazon GameLift compute resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::compute/compute-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to an Amazon GameLift Servers compute resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::compute/compute-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "Credentials":{ "shape":"AwsCredentials", @@ -7001,7 +6989,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "ComputeName":{ "shape":"ComputeNameOrArn", @@ -7009,7 +6997,7 @@ }, "ComputeArn":{ "shape":"ComputeArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to an Amazon GameLift compute resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::compute/compute-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to an Amazon GameLift Servers compute resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::compute/compute-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "AuthToken":{ "shape":"ComputeAuthToken", @@ -7049,7 +7037,7 @@ "members":{ "FleetId":{ "shape":"FleetIdOrArn", - "documentation":"

    A unique identifier for the fleet that contains the instance you want to access. You can request access to instances in EC2 fleets with the following statuses: ACTIVATING, ACTIVE, or ERROR. Use either a fleet ID or an ARN value.

    You can access fleets in ERROR status for a short period of time before Amazon GameLift deletes them.

    " + "documentation":"

    A unique identifier for the fleet that contains the instance you want to access. You can request access to instances in EC2 fleets with the following statuses: ACTIVATING, ACTIVE, or ERROR. Use either a fleet ID or an ARN value.

    You can access fleets in ERROR status for a short period of time before Amazon GameLift Servers deletes them.

    " }, "InstanceId":{ "shape":"InstanceId", @@ -7101,7 +7089,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "InstanceId":{ "shape":"InstanceId", @@ -7117,7 +7105,7 @@ }, "OperatingSystem":{ "shape":"OperatingSystem", - "documentation":"

    Operating system that is running on this EC2 instance.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " + "documentation":"

    Operating system that is running on this EC2 instance.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift Servers, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " }, "Type":{ "shape":"EC2InstanceType", @@ -7125,7 +7113,7 @@ }, "Status":{ "shape":"InstanceStatus", - "documentation":"

    Current status of the instance. Possible statuses include the following:

    • PENDING -- The instance is in the process of being created and launching server processes as defined in the fleet's run-time configuration.

    • ACTIVE -- The instance has been successfully created and at least one server process has successfully launched and reported back to Amazon GameLift that it is ready to host a game session. The instance is now considered ready to host game sessions.

    • TERMINATING -- The instance is in the process of shutting down. This may happen to reduce capacity during a scaling down event or to recycle resources in the event of a problem.

    " + "documentation":"

    Current status of the instance. Possible statuses include the following:

    • PENDING -- The instance is in the process of being created and launching server processes as defined in the fleet's run-time configuration.

    • ACTIVE -- The instance has been successfully created and at least one server process has successfully launched and reported back to Amazon GameLift Servers that it is ready to host a game session. The instance is now considered ready to host game sessions.

    • TERMINATING -- The instance is in the process of shutting down. This may happen to reduce capacity during a scaling down event or to recycle resources in the event of a problem.

    " }, "CreationTime":{ "shape":"Timestamp", @@ -7136,7 +7124,7 @@ "documentation":"

    The fleet location of the instance, expressed as an Amazon Web Services Region code, such as us-west-2.

    " } }, - "documentation":"

    Represents a virtual computing instance that runs game server processes and hosts game sessions. In Amazon GameLift, one or more instances make up a managed EC2 fleet.

    " + "documentation":"

    Represents a virtual computing instance that runs game server processes and hosts game sessions. In Amazon GameLift Servers, one or more instances make up a managed EC2 fleet.

    " }, "InstanceAccess":{ "type":"structure", @@ -7176,7 +7164,7 @@ "documentation":"

    Secret string. For Windows instances, the secret is a password for use with Windows Remote Desktop. For Linux instances, it's a private key for use with SSH.

    " } }, - "documentation":"

    A set of credentials that allow remote access to an instance in an EC2 managed fleet. These credentials are returned in response to a call to https://docs.aws.amazon.com/gamelift/latest/apireference/API_GetInstanceAccess, which requests access for instances that are running game servers with the Amazon GameLift server SDK version 4.x or earlier.

    ", + "documentation":"

    A set of credentials that allow remote access to an instance in an EC2 managed fleet. These credentials are returned in response to a call to https://docs.aws.amazon.com/gamelift/latest/apireference/API_GetInstanceAccess, which requests access for instances that are running game servers with the Amazon GameLift Servers server SDK version 4.x or earlier.

    ", "sensitive":true }, "InstanceDefinition":{ @@ -7189,10 +7177,10 @@ }, "WeightedCapacity":{ "shape":"WeightedCapacity", - "documentation":"

    Instance weighting that indicates how much this instance type contributes to the total capacity of a game server group. Instance weights are used by Amazon GameLift FleetIQ to calculate the instance type's cost per unit hour and better identify the most cost-effective options. For detailed information on weighting instance capacity, see Instance Weighting in the Amazon Elastic Compute Cloud Auto Scaling User Guide. Default value is \"1\".

    " + "documentation":"

    Instance weighting that indicates how much this instance type contributes to the total capacity of a game server group. Instance weights are used by Amazon GameLift Servers FleetIQ to calculate the instance type's cost per unit hour and better identify the most cost-effective options. For detailed information on weighting instance capacity, see Instance Weighting in the Amazon Elastic Compute Cloud Auto Scaling User Guide. Default value is \"1\".

    " } }, - "documentation":"

    This data type is used with the Amazon GameLift FleetIQ and game server groups.

    An allowed instance type for a game server group. All game server groups must have at least two instance types defined for it. Amazon GameLift FleetIQ periodically evaluates each defined instance type for viability. It then updates the Auto Scaling group with the list of viable instance types.

    " + "documentation":"

    This data type is used with the Amazon GameLift Servers FleetIQ and game server groups.

    An allowed instance type for a game server group. All game server groups must have at least two instance types defined for it. Amazon GameLift Servers FleetIQ periodically evaluates each defined instance type for viability. It then updates the Auto Scaling group with the list of viable instance types.

    " }, "InstanceDefinitions":{ "type":"list", @@ -7293,7 +7281,7 @@ "documentation":"

    The network communication protocol used by the fleet.

    " } }, - "documentation":"

    A range of IP addresses and port settings that allow inbound traffic to connect to processes on an instance in a fleet. Processes are assigned an IP address/port number combination, which must fall into the fleet's allowed ranges.

    For Amazon GameLift Realtime fleets, Amazon GameLift automatically opens two port ranges, one for TCP messaging and one for UDP.

    " + "documentation":"

    A range of IP addresses and port settings that allow inbound traffic to connect to processes on an instance in a fleet. Processes are assigned an IP address/port number combination, which must fall into the fleet's allowed ranges.

    For Amazon GameLift Servers Realtime fleets, Amazon GameLift Servers automatically opens two port ranges, one for TCP messaging and one for UDP.

    " }, "IpPermissionsList":{ "type":"list", @@ -7362,7 +7350,7 @@ "documentation":"

    The version of the Amazon EC2 launch template to use. If no version is specified, the default version will be used. With Amazon EC2, you can specify a default version for a launch template. If none is set, the default is the first version created.

    " } }, - "documentation":"

    This data type is used with the Amazon GameLift FleetIQ and game server groups.

    An Amazon Elastic Compute Cloud launch template that contains configuration settings and game server code to be deployed to all instances in a game server group. The launch template is specified when creating a new game server group.

    " + "documentation":"

    This data type is used with the Amazon GameLift Servers FleetIQ and game server groups.

    An Amazon Elastic Compute Cloud launch template that contains configuration settings and game server code to be deployed to all instances in a game server group. The launch template is specified when creating a new game server group.

    " }, "LaunchTemplateVersion":{ "type":"string", @@ -7452,7 +7440,7 @@ }, "Location":{ "shape":"LocationStringModel", - "documentation":"

    The name of a location to retrieve compute resources for. For an Amazon GameLift Anywhere fleet, use a custom location. For a managed fleet, provide a Amazon Web Services Region or Local Zone code (for example: us-west-2 or us-west-2-lax-1).

    " + "documentation":"

    The name of a location to retrieve compute resources for. For an Amazon GameLift Servers Anywhere fleet, use a custom location. For a managed fleet, provide a Amazon Web Services Region or Local Zone code (for example: us-west-2 or us-west-2-lax-1).

    " }, "ContainerGroupDefinitionName":{ "shape":"ContainerGroupDefinitionNameOrArn", @@ -7563,7 +7551,7 @@ "members":{ "ContainerGroupType":{ "shape":"ContainerGroupType", - "documentation":"

    The type of container group to retrieve. Container group type determines how Amazon GameLift deploys the container group on each fleet instance.

    " + "documentation":"

    The type of container group to retrieve. Container group type determines how Amazon GameLift Servers deploys the container group on each fleet instance.

    " }, "Limit":{ "shape":"ListContainerGroupDefinitionsLimit", @@ -7723,7 +7711,7 @@ "members":{ "Filters":{ "shape":"LocationFilterList", - "documentation":"

    Filters the list for AWS or CUSTOM locations.

    " + "documentation":"

    Filters the list for AWS or CUSTOM locations. Use this parameter to narrow down results to only Amazon Web Services-managed locations (Amazon EC2 or container) or only your custom locations (such as an Amazon GameLift Servers Anywhere fleet).

    " }, "Limit":{ "shape":"ListLocationsLimit", @@ -7745,7 +7733,7 @@ "members":{ "Locations":{ "shape":"LocationModelList", - "documentation":"

    A collection of locations.

    " + "documentation":"

    A collection of locations, including both Amazon Web Services and custom locations. Each location includes a name and ping beacon information that can be used to measure network latency between player devices and the location.

    " }, "NextToken":{ "shape":"NonZeroAndMaxString", @@ -7785,7 +7773,7 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the Amazon GameLift resource that you want to retrieve tags for. Amazon GameLift includes resource ARNs in the data object for the resource. You can retrieve the ARN by calling a List or Describe operation for the resource type.

    " + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the Amazon GameLift Servers resource that you want to retrieve tags for. Amazon GameLift Servers includes resource ARNs in the data object for the resource. You can retrieve the ARN by calling a List or Describe operation for the resource type.

    " } } }, @@ -7832,7 +7820,7 @@ "members":{ "Location":{ "shape":"LocationStringModel", - "documentation":"

    An Amazon Web Services Region code, such as us-west-2. For a list of supported Regions and Local Zones, see Amazon GameLift service locations for managed hosting.

    " + "documentation":"

    An Amazon Web Services Region code, such as us-west-2. For a list of supported Regions and Local Zones, see Amazon GameLift Servers service locations for managed hosting.

    " } }, "documentation":"

    A remote location where a multi-location fleet can deploy game servers for game hosting.

    " @@ -7871,10 +7859,14 @@ }, "LocationArn":{ "shape":"LocationArnModel", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift location resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::location/location-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a custom location resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::location/location-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + }, + "PingBeacon":{ + "shape":"PingBeacon", + "documentation":"

    Information about the UDP ping beacon for this location.

    " } }, - "documentation":"

    Properties of a custom location for use in an Amazon GameLift Anywhere fleet. This data type is returned in response to a call to https://docs.aws.amazon.com/gamelift/latest/apireference/API_CreateLocation.

    " + "documentation":"

    Properties of a location, which can include its name, ARN (for custom locations), and ping beacon information.

    " }, "LocationModelList":{ "type":"list", @@ -7898,7 +7890,7 @@ "documentation":"

    The life-cycle status of a fleet location.

    " } }, - "documentation":"

    A fleet location and its life-cycle state. A location state object might be used to describe a fleet's remote location or home Region. Life-cycle state tracks the progress of launching the first instance in a new location and preparing it for game hosting, and then removing all instances and deleting the location from the fleet.

    • NEW -- A new fleet location has been defined and desired instances is set to 1.

    • DOWNLOADING/VALIDATING/BUILDING/ACTIVATING -- Amazon GameLift is setting up the new fleet location, creating new instances with the game build or Realtime script and starting server processes.

    • ACTIVE -- Hosts can now accept game sessions.

    • ERROR -- An error occurred when downloading, validating, building, or activating the fleet location.

    • DELETING -- Hosts are responding to a delete fleet location request.

    • TERMINATED -- The fleet location no longer exists.

    • NOT_FOUND -- The fleet location was not found. This could be because the custom location was removed or not created.

    " + "documentation":"

    A fleet location and its life-cycle state. A location state object might be used to describe a fleet's remote location or home Region. Life-cycle state tracks the progress of launching the first instance in a new location and preparing it for game hosting, and then removing all instances and deleting the location from the fleet.

    • NEW -- A new fleet location has been defined and desired instances is set to 1.

    • DOWNLOADING/VALIDATING/BUILDING/ACTIVATING -- Amazon GameLift Servers is setting up the new fleet location, creating new instances with the game build or Realtime script and starting server processes.

    • ACTIVE -- Hosts can now accept game sessions.

    • ERROR -- An error occurred when downloading, validating, building, or activating the fleet location.

    • DELETING -- Hosts are responding to a delete fleet location request.

    • TERMINATED -- The fleet location no longer exists.

    • NOT_FOUND -- The fleet location was not found. This could be because the custom location was removed or not created.

    " }, "LocationStateList":{ "type":"list", @@ -7945,7 +7937,7 @@ "documentation":"

    If log destination is CLOUDWATCH, logs are sent to the specified log group in Amazon CloudWatch.

    " } }, - "documentation":"

    A method for collecting container logs for the fleet. Amazon GameLift saves all standard output for each container in logs, including game session logs. You can select from the following methods:

    " + "documentation":"

    A method for collecting container logs for the fleet. Amazon GameLift Servers saves all standard output for each container in logs, including game session logs. You can select from the following methods:

    " }, "LogDestination":{ "type":"string", @@ -7998,7 +7990,7 @@ }, "ConfigurationArn":{ "shape":"MatchmakingConfigurationArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::matchmakingconfiguration/<matchmaking configuration name>. In a Amazon GameLift configuration ARN, the resource ID matches the Name value.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers matchmaking configuration resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::matchmakingconfiguration/<matchmaking configuration name>. In a Amazon GameLift Servers configuration ARN, the resource ID matches the Name value.

    " }, "Description":{ "shape":"NonZeroAndMaxString", @@ -8006,7 +7998,7 @@ }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::gamesessionqueue/<queue name>. Queues can be located in any Region. Queues are used to start new Amazon GameLift-hosted game sessions for matches that are created with this matchmaking configuration. This property is not set when FlexMatchMode is set to STANDALONE.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::gamesessionqueue/<queue name>. Queues can be located in any Region. Queues are used to start new Amazon GameLift Servers-hosted game sessions for matches that are created with this matchmaking configuration. This property is not set when FlexMatchMode is set to STANDALONE.

    " }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", @@ -8058,7 +8050,7 @@ }, "FlexMatchMode":{ "shape":"FlexMatchMode", - "documentation":"

    Indicates whether this matchmaking configuration is being used with Amazon GameLift hosting or as a standalone matchmaking solution.

    • STANDALONE - FlexMatch forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event.

    • WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift queue to start a game session for the match.

    " + "documentation":"

    Indicates whether this matchmaking configuration is being used with Amazon GameLift Servers hosting or as a standalone matchmaking solution.

    • STANDALONE - FlexMatch forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event.

    • WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift Servers queue to start a game session for the match.

    " } }, "documentation":"

    Guidelines for use with FlexMatch to match players into games. All matchmaking requests must specify a matchmaking configuration.

    " @@ -8119,7 +8111,7 @@ }, "RuleSetArn":{ "shape":"MatchmakingRuleSetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift matchmaking rule set resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::matchmakingruleset/<ruleset name>. In a GameLift rule set ARN, the resource ID matches the RuleSetName value.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers matchmaking rule set resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::matchmakingruleset/<ruleset name>. In a GameLift rule set ARN, the resource ID matches the RuleSetName value.

    " }, "RuleSetBody":{ "shape":"RuleSetBody", @@ -8298,7 +8290,7 @@ "members":{ "Message":{"shape":"NonEmptyString"} }, - "documentation":"

    The operation failed because Amazon GameLift has not yet finished validating this compute. We recommend attempting 8 to 10 retries over 3 to 5 minutes with exponential backoffs and jitter.

    ", + "documentation":"

    The operation failed because Amazon GameLift Servers has not yet finished validating this compute. We recommend attempting 8 to 10 retries over 3 to 5 minutes with exponential backoffs and jitter.

    ", "exception":true }, "OperatingSystem":{ @@ -8319,6 +8311,16 @@ "documentation":"

    The specified game server group has no available game servers to fulfill a ClaimGameServer request. Clients can retry such requests immediately or after a waiting period.

    ", "exception":true }, + "PingBeacon":{ + "type":"structure", + "members":{ + "UDPEndpoint":{ + "shape":"UDPEndpoint", + "documentation":"

    The domain name and port of the UDP ping beacon.

    " + } + }, + "documentation":"

    Information about a UDP ping beacon that can be used to measure network latency between a player device and an Amazon GameLift Servers hosting location.

    " + }, "PlacedPlayerSession":{ "type":"structure", "members":{ @@ -8494,7 +8496,7 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address of the game session. To connect to a Amazon GameLift game server, an app needs both the IP address and port number.

    " + "documentation":"

    The IP address of the game session. To connect to a Amazon GameLift Servers game server, an app needs both the IP address and port number.

    " }, "DnsName":{ "shape":"DnsName", @@ -8502,11 +8504,11 @@ }, "Port":{ "shape":"PortNumber", - "documentation":"

    Port number for the game session. To connect to a Amazon GameLift server process, an app needs both the IP address and port number.

    " + "documentation":"

    Port number for the game session. To connect to a Amazon GameLift Servers server process, an app needs both the IP address and port number.

    " }, "PlayerData":{ "shape":"PlayerData", - "documentation":"

    Developer-defined information related to a player. Amazon GameLift does not use this data, so it can be formatted as needed for use in the game.

    " + "documentation":"

    Developer-defined information related to a player. Amazon GameLift Servers does not use this data, so it can be formatted as needed for use in the game.

    " } }, "documentation":"

    Represents a player session. Player sessions are created either for a specific game session, or as part of a game session placement or matchmaking request. A player session can represents a reserved player slot in a game session (when status is RESERVED) or actual player activity in a game session (when status is ACTIVE). A player session object, including player data, is automatically passed to a game session when the player connects to the game session and is validated. After the game session ends, player sessions information is retained for 30 days and then removed.

    Related actions

    All APIs by task

    " @@ -8561,14 +8563,14 @@ "members":{ "PriorityOrder":{ "shape":"PriorityTypeList", - "documentation":"

    A custom sequence to use when prioritizing where to place new game sessions. Each priority type is listed once.

    • LATENCY -- Amazon GameLift prioritizes locations where the average player latency is lowest. Player latency data is provided in each game session placement request.

    • COST -- Amazon GameLift prioritizes queue destinations with the lowest current hosting costs. Cost is evaluated based on the destination's location, instance type, and fleet type (Spot or On-Demand).

    • DESTINATION -- Amazon GameLift prioritizes based on the list order of destinations in the queue configuration.

    • LOCATION -- Amazon GameLift prioritizes based on the provided order of locations, as defined in LocationOrder.

    " + "documentation":"

    A custom sequence to use when prioritizing where to place new game sessions. Each priority type is listed once.

    • LATENCY -- Amazon GameLift Servers prioritizes locations where the average player latency is lowest. Player latency data is provided in each game session placement request.

    • COST -- Amazon GameLift Servers prioritizes queue destinations with the lowest current hosting costs. Cost is evaluated based on the destination's location, instance type, and fleet type (Spot or On-Demand).

    • DESTINATION -- Amazon GameLift Servers prioritizes based on the list order of destinations in the queue configuration.

    • LOCATION -- Amazon GameLift Servers prioritizes based on the provided order of locations, as defined in LocationOrder.

    " }, "LocationOrder":{ "shape":"LocationList", - "documentation":"

    The prioritization order to use for fleet locations, when the PriorityOrder property includes LOCATION. Locations can include Amazon Web Services Region codes (such as us-west-2), local zones, and custom locations (for Anywhere fleets). Each location must be listed only once. For details, see Amazon GameLift service locations.

    " + "documentation":"

    The prioritization order to use for fleet locations, when the PriorityOrder property includes LOCATION. Locations can include Amazon Web Services Region codes (such as us-west-2), local zones, and custom locations (for Anywhere fleets). Each location must be listed only once. For details, see Amazon GameLift Servers service locations.

    " } }, - "documentation":"

    Custom prioritization settings to use with a game session queue. Prioritization settings determine how the queue selects a game hosting resource to start a new game session. This configuration replaces the default prioritization process for queues.

    By default, a queue makes game session placements based on the following criteria:

    • When a game session request does not include player latency data, Amazon GameLift places game sessions based on the following priorities: (1) the queue's default destination order, and (2) for multi-location fleets, an alphabetic list of locations.

    • When a game session request includes player latency data, Amazon GameLift re-orders the queue's destinations to make placements where the average player latency is lowest. It reorders based the following priorities: (1) the lowest average latency across all players, (2) the lowest hosting cost, (3) the queue's default destination order, and (4) for multi-location fleets, an alphabetic list of locations.

    " + "documentation":"

    Custom prioritization settings to use with a game session queue. Prioritization settings determine how the queue selects a game hosting resource to start a new game session. This configuration replaces the default prioritization process for queues.

    By default, a queue makes game session placements based on the following criteria:

    • When a game session request does not include player latency data, Amazon GameLift Servers places game sessions based on the following priorities: (1) the queue's default destination order, and (2) for multi-location fleets, an alphabetic list of locations.

    • When a game session request includes player latency data, Amazon GameLift Servers re-orders the queue's destinations to make placements where the average player latency is lowest. It reorders based the following priorities: (1) the lowest average latency across all players, (2) the lowest hosting cost, (3) the queue's default destination order, and (4) for multi-location fleets, an alphabetic list of locations.

    " }, "PriorityConfigurationOverride":{ "type":"structure", @@ -8580,10 +8582,10 @@ }, "LocationOrder":{ "shape":"LocationOrderOverrideList", - "documentation":"

    A prioritized list of hosting locations. The list can include Amazon Web Services Regions (such as us-west-2), local zones, and custom locations (for Anywhere fleets). Each location must be listed only once. For details, see Amazon GameLift service locations.

    " + "documentation":"

    A prioritized list of hosting locations. The list can include Amazon Web Services Regions (such as us-west-2), local zones, and custom locations (for Anywhere fleets). Each location must be listed only once. For details, see Amazon GameLift Servers service locations.

    " } }, - "documentation":"

    An alternate list of prioritized locations for use with a game session queue. When this property is included in a StartGameSessionPlacement request, the alternate list overrides the queue's default location priorities, as defined in the queue's PriorityConfiguration setting (LocationOrder). The override is valid for an individual placement request only. Use this property only with queues that have a PriorityConfiguration setting that prioritizes LOCATION first.

    A priority configuration override list does not override a queue's FilterConfiguration setting, if the queue has one. Filter configurations are used to limit placements to a subset of the locations in a queue's destinations. If the override list includes a location that's not on in the FilterConfiguration allowed list, Amazon GameLift won't attempt to place a game session there.

    " + "documentation":"

    An alternate list of prioritized locations for use with a game session queue. When this property is included in a StartGameSessionPlacement request, the alternate list overrides the queue's default location priorities, as defined in the queue's PriorityConfiguration setting (LocationOrder). The override is valid for an individual placement request only. Use this property only with queues that have a PriorityConfiguration setting that prioritizes LOCATION first.

    A priority configuration override list does not override a queue's FilterConfiguration setting, if the queue has one. Filter configurations are used to limit placements to a subset of the locations in a queue's destinations. If the override list includes a location that's not on in the FilterConfiguration allowed list, Amazon GameLift Servers won't attempt to place a game session there.

    " }, "PriorityType":{ "type":"string", @@ -8645,7 +8647,7 @@ }, "MetricName":{ "shape":"MetricName", - "documentation":"

    Name of the Amazon GameLift-defined metric that is used to trigger a scaling adjustment. For detailed descriptions of fleet metrics, see Monitor Amazon GameLift with Amazon CloudWatch.

    • ActivatingGameSessions -- Game sessions in the process of being created.

    • ActiveGameSessions -- Game sessions that are currently running.

    • ActiveInstances -- Fleet instances that are currently running at least one game session.

    • AvailableGameSessions -- Additional game sessions that fleet could host simultaneously, given current capacity.

    • AvailablePlayerSessions -- Empty player slots in currently active game sessions. This includes game sessions that are not currently accepting players. Reserved player slots are not included.

    • CurrentPlayerSessions -- Player slots in active game sessions that are being used by a player or are reserved for a player.

    • IdleInstances -- Active instances that are currently hosting zero game sessions.

    • PercentAvailableGameSessions -- Unused percentage of the total number of game sessions that a fleet could host simultaneously, given current capacity. Use this metric for a target-based scaling policy.

    • PercentIdleInstances -- Percentage of the total number of active instances that are hosting zero game sessions.

    • QueueDepth -- Pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

    • WaitTime -- Current wait time for pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

    " + "documentation":"

    Name of the Amazon GameLift Servers-defined metric that is used to trigger a scaling adjustment. For detailed descriptions of fleet metrics, see Monitor Amazon GameLift Servers with Amazon CloudWatch.

    • ActivatingGameSessions -- Game sessions in the process of being created.

    • ActiveGameSessions -- Game sessions that are currently running.

    • ActiveInstances -- Fleet instances that are currently running at least one game session.

    • AvailableGameSessions -- Additional game sessions that fleet could host simultaneously, given current capacity.

    • AvailablePlayerSessions -- Empty player slots in currently active game sessions. This includes game sessions that are not currently accepting players. Reserved player slots are not included.

    • CurrentPlayerSessions -- Player slots in active game sessions that are being used by a player or are reserved for a player.

    • IdleInstances -- Active instances that are currently hosting zero game sessions.

    • PercentAvailableGameSessions -- Unused percentage of the total number of game sessions that a fleet could host simultaneously, given current capacity. Use this metric for a target-based scaling policy.

    • PercentIdleInstances -- Percentage of the total number of active instances that are hosting zero game sessions.

    • QueueDepth -- Pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

    • WaitTime -- Current wait time for pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

    " }, "PolicyType":{ "shape":"PolicyType", @@ -8699,15 +8701,15 @@ }, "CertificatePath":{ "shape":"NonZeroAndMaxString", - "documentation":"

    The path to a TLS certificate on your compute resource. Amazon GameLift doesn't validate the path and certificate.

    " + "documentation":"

    The path to a TLS certificate on your compute resource. Amazon GameLift Servers doesn't validate the path and certificate.

    " }, "DnsName":{ "shape":"DnsNameInput", - "documentation":"

    The DNS name of the compute resource. Amazon GameLift requires either a DNS name or IP address.

    " + "documentation":"

    The DNS name of the compute resource. Amazon GameLift Servers requires either a DNS name or IP address.

    " }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

    The IP address of the compute resource. Amazon GameLift requires either a DNS name or IP address. When registering an Anywhere fleet, an IP address is required.

    " + "documentation":"

    The IP address of the compute resource. Amazon GameLift Servers requires either a DNS name or IP address. When registering an Anywhere fleet, an IP address is required.

    " }, "Location":{ "shape":"LocationStringModel", @@ -8814,14 +8816,14 @@ "members":{ "NewGameSessionsPerCreator":{ "shape":"WholeNumber", - "documentation":"

    A policy that puts limits on the number of game sessions that a player can create within a specified span of time. With this policy, you can control players' ability to consume available resources.

    The policy is evaluated when a player tries to create a new game session. On receiving a CreateGameSession request, Amazon GameLift checks that the player (identified by CreatorId) has created fewer than game session limit in the specified time period.

    " + "documentation":"

    A policy that puts limits on the number of game sessions that a player can create within a specified span of time. With this policy, you can control players' ability to consume available resources.

    The policy is evaluated when a player tries to create a new game session. On receiving a CreateGameSession request, Amazon GameLift Servers checks that the player (identified by CreatorId) has created fewer than game session limit in the specified time period.

    " }, "PolicyPeriodInMinutes":{ "shape":"WholeNumber", "documentation":"

    The time span used in evaluating the resource creation limit policy.

    " } }, - "documentation":"

    A policy that puts limits on the number of game sessions that a player can create within a specified span of time. With this policy, you can control players' ability to consume available resources.

    The policy is evaluated when a player tries to create a new game session. On receiving a CreateGameSession request, Amazon GameLift checks that the player (identified by CreatorId) has created fewer than game session limit in the specified time period.

    " + "documentation":"

    A policy that puts limits on the number of game sessions that a player can create within a specified span of time. With this policy, you can control players' ability to consume available resources.

    The policy is evaluated when a player tries to create a new game session. On receiving a CreateGameSession request, Amazon GameLift Servers checks that the player (identified by CreatorId) has created fewer than game session limit in the specified time period.

    " }, "ResumeGameServerGroupInput":{ "type":"structure", @@ -8900,14 +8902,14 @@ "documentation":"

    The maximum amount of time (in seconds) allowed to launch a new game session and have it report ready to host players. During this time, the game session is in status ACTIVATING. If the game session does not become active before the timeout, it is ended and the game session status is changed to TERMINATED.

    " } }, - "documentation":"

    A set of instructions that define the set of server processes to run on computes in a fleet. Server processes run either an executable in a custom game build or a Amazon GameLift Realtime script. Amazon GameLift launches the processes, manages their life cycle, and replaces them as needed. Computes check regularly for an updated runtime configuration.

    An Amazon GameLift instance is limited to 50 processes running concurrently. To calculate the total number of processes defined in a runtime configuration, add the values of the ConcurrentExecutions parameter for each server process. Learn more about Running Multiple Processes on a Fleet.

    " + "documentation":"

    A set of instructions that define the set of server processes to run on computes in a fleet. Server processes run either an executable in a custom game build or a Amazon GameLift Servers Realtime script. Amazon GameLift Servers launches the processes, manages their life cycle, and replaces them as needed. Computes check regularly for an updated runtime configuration.

    An Amazon GameLift Servers instance is limited to 50 processes running concurrently. To calculate the total number of processes defined in a runtime configuration, add the values of the ConcurrentExecutions parameter for each server process. Learn more about Running Multiple Processes on a Fleet.

    " }, "S3Location":{ "type":"structure", "members":{ "Bucket":{ "shape":"NonEmptyString", - "documentation":"

    An Amazon S3 bucket identifier. Thename of the S3 bucket.

    Amazon GameLift doesn't support uploading from Amazon S3 buckets with names that contain a dot (.).

    " + "documentation":"

    An Amazon S3 bucket identifier. Thename of the S3 bucket.

    Amazon GameLift Servers doesn't support uploading from Amazon S3 buckets with names that contain a dot (.).

    " }, "Key":{ "shape":"NonEmptyString", @@ -8915,14 +8917,14 @@ }, "RoleArn":{ "shape":"NonEmptyString", - "documentation":"

    The Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift to access the S3 bucket.

    " + "documentation":"

    The Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift Servers to access the S3 bucket.

    " }, "ObjectVersion":{ "shape":"NonEmptyString", - "documentation":"

    The version of the file, if object versioning is turned on for the bucket. Amazon GameLift uses this information when retrieving files from an S3 bucket that you own. Use this parameter to specify a specific version of the file. If not set, the latest version of the file is retrieved.

    " + "documentation":"

    The version of the file, if object versioning is turned on for the bucket. Amazon GameLift Servers uses this information when retrieving files from an S3 bucket that you own. Use this parameter to specify a specific version of the file. If not set, the latest version of the file is retrieved.

    " } }, - "documentation":"

    The location in Amazon S3 where build or script files are stored for access by Amazon GameLift.

    " + "documentation":"

    The location in Amazon S3 where build or script files are stored for access by Amazon GameLift Servers.

    " }, "ScalingAdjustmentType":{ "type":"string", @@ -8941,7 +8943,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "Name":{ "shape":"NonZeroAndMaxString", @@ -8973,7 +8975,7 @@ }, "MetricName":{ "shape":"MetricName", - "documentation":"

    Name of the Amazon GameLift-defined metric that is used to trigger a scaling adjustment. For detailed descriptions of fleet metrics, see Monitor Amazon GameLift with Amazon CloudWatch.

    • ActivatingGameSessions -- Game sessions in the process of being created.

    • ActiveGameSessions -- Game sessions that are currently running.

    • ActiveInstances -- Fleet instances that are currently running at least one game session.

    • AvailableGameSessions -- Additional game sessions that fleet could host simultaneously, given current capacity.

    • AvailablePlayerSessions -- Empty player slots in currently active game sessions. This includes game sessions that are not currently accepting players. Reserved player slots are not included.

    • CurrentPlayerSessions -- Player slots in active game sessions that are being used by a player or are reserved for a player.

    • IdleInstances -- Active instances that are currently hosting zero game sessions.

    • PercentAvailableGameSessions -- Unused percentage of the total number of game sessions that a fleet could host simultaneously, given current capacity. Use this metric for a target-based scaling policy.

    • PercentIdleInstances -- Percentage of the total number of active instances that are hosting zero game sessions.

    • QueueDepth -- Pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

    • WaitTime -- Current wait time for pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

    " + "documentation":"

    Name of the Amazon GameLift Servers-defined metric that is used to trigger a scaling adjustment. For detailed descriptions of fleet metrics, see Monitor Amazon GameLift Servers with Amazon CloudWatch.

    • ActivatingGameSessions -- Game sessions in the process of being created.

    • ActiveGameSessions -- Game sessions that are currently running.

    • ActiveInstances -- Fleet instances that are currently running at least one game session.

    • AvailableGameSessions -- Additional game sessions that fleet could host simultaneously, given current capacity.

    • AvailablePlayerSessions -- Empty player slots in currently active game sessions. This includes game sessions that are not currently accepting players. Reserved player slots are not included.

    • CurrentPlayerSessions -- Player slots in active game sessions that are being used by a player or are reserved for a player.

    • IdleInstances -- Active instances that are currently hosting zero game sessions.

    • PercentAvailableGameSessions -- Unused percentage of the total number of game sessions that a fleet could host simultaneously, given current capacity. Use this metric for a target-based scaling policy.

    • PercentIdleInstances -- Percentage of the total number of active instances that are hosting zero game sessions.

    • QueueDepth -- Pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

    • WaitTime -- Current wait time for pending game session placement requests, in any queue, where the current fleet is the top-priority destination.

    " }, "PolicyType":{ "shape":"PolicyType", @@ -9019,7 +9021,7 @@ }, "ScriptArn":{ "shape":"ScriptArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift script resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift script ARN, the resource ID matches the ScriptId value.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers script resource and uniquely identifies it. ARNs are unique across all Regions. In a GameLift script ARN, the resource ID matches the ScriptId value.

    " }, "Name":{ "shape":"NonZeroAndMaxString", @@ -9039,7 +9041,7 @@ }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

    The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

    " + "documentation":"

    The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift Servers to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift Servers uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

    " } }, "documentation":"

    Properties describing a Realtime script.

    Related actions

    All APIs by task

    " @@ -9115,7 +9117,7 @@ "members":{ "LaunchPath":{ "shape":"LaunchPathStringModel", - "documentation":"

    The location of a game build executable or Realtime script. Game builds and Realtime scripts are installed on instances at the root:

    • Windows (custom game builds only): C:\\game. Example: \"C:\\game\\MyGame\\server.exe\"

    • Linux: /local/game. Examples: \"/local/game/MyGame/server.exe\" or \"/local/game/MyRealtimeScript.js\"

    Amazon GameLift doesn't support the use of setup scripts that launch the game executable. For custom game builds, this parameter must indicate the executable that calls the server SDK operations initSDK() and ProcessReady().

    " + "documentation":"

    The location of a game build executable or Realtime script. Game builds and Realtime scripts are installed on instances at the root:

    • Windows (custom game builds only): C:\\game. Example: \"C:\\game\\MyGame\\server.exe\"

    • Linux: /local/game. Examples: \"/local/game/MyGame/server.exe\" or \"/local/game/MyRealtimeScript.js\"

    Amazon GameLift Servers doesn't support the use of setup scripts that launch the game executable. For custom game builds, this parameter must indicate the executable that calls the server SDK operations initSDK() and ProcessReady().

    " }, "Parameters":{ "shape":"LaunchParametersStringModel", @@ -9126,7 +9128,7 @@ "documentation":"

    The number of server processes using this configuration that run concurrently on each instance or compute.

    " } }, - "documentation":"

    A set of instructions for launching server processes on fleet computes. Server processes run either an executable in a custom game build or a Amazon GameLift Realtime script. Server process configurations are part of a fleet's runtime configuration.

    " + "documentation":"

    A set of instructions for launching server processes on fleet computes. Server processes run either an executable in a custom game build or a Amazon GameLift Servers Realtime script. Server process configurations are part of a fleet's runtime configuration.

    " }, "ServerProcessList":{ "type":"list", @@ -9192,7 +9194,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " } } }, @@ -9238,7 +9240,7 @@ }, "PriorityConfigurationOverride":{ "shape":"PriorityConfigurationOverride", - "documentation":"

    A prioritized list of locations to use for the game session placement and instructions on how to use it. This list overrides a queue's prioritized location list for this game session placement request only. You can include Amazon Web Services Regions, local zones, and custom locations (for Anywhere fleets). You can choose to limit placements to locations on the override list only, or you can prioritize locations on the override list first and then fall back to the queue's other locations if needed. Choose a fallback strategy to use in the event that Amazon GameLift fails to place a game session in any of the locations on the priority override list.

    " + "documentation":"

    A prioritized list of locations to use for the game session placement and instructions on how to use it. This list overrides a queue's prioritized location list for this game session placement request only. You can include Amazon Web Services Regions, local zones, and custom locations (for Anywhere fleets). You can choose to limit placements to locations on the override list only, or you can prioritize locations on the override list first and then fall back to the queue's other locations if needed. Choose a fallback strategy to use in the event that Amazon GameLift Servers fails to place a game session in any of the locations on the priority override list.

    " } } }, @@ -9260,7 +9262,7 @@ "members":{ "TicketId":{ "shape":"MatchmakingIdStringModel", - "documentation":"

    A unique identifier for a matchmaking ticket. If no ticket ID is specified here, Amazon GameLift will generate one in the form of a UUID. Use this identifier to track the match backfill ticket status and retrieve match results.

    " + "documentation":"

    A unique identifier for a matchmaking ticket. If no ticket ID is specified here, Amazon GameLift Servers will generate one in the form of a UUID. Use this identifier to track the match backfill ticket status and retrieve match results.

    " }, "ConfigurationName":{ "shape":"MatchmakingConfigurationName", @@ -9294,7 +9296,7 @@ "members":{ "TicketId":{ "shape":"MatchmakingIdStringModel", - "documentation":"

    A unique identifier for a matchmaking ticket. If no ticket ID is specified here, Amazon GameLift will generate one in the form of a UUID. Use this identifier to track the matchmaking ticket status and retrieve match results.

    " + "documentation":"

    A unique identifier for a matchmaking ticket. If no ticket ID is specified here, Amazon GameLift Servers will generate one in the form of a UUID. Use this identifier to track the matchmaking ticket status and retrieve match results.

    " }, "ConfigurationName":{ "shape":"MatchmakingConfigurationName", @@ -9345,7 +9347,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " } } }, @@ -9380,8 +9382,7 @@ }, "StopMatchmakingOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "StringList":{ "type":"list", @@ -9417,15 +9418,15 @@ }, "ImageUri":{ "shape":"ImageUriString", - "documentation":"

    The URI to the image that Amazon GameLift deploys to a container fleet. For a more specific identifier, see ResolvedImageDigest.

    " + "documentation":"

    The URI to the image that Amazon GameLift Servers deploys to a container fleet. For a more specific identifier, see ResolvedImageDigest.

    " }, "MemoryHardLimitMebibytes":{ "shape":"ContainerMemoryLimit", - "documentation":"

    The amount of memory that Amazon GameLift makes available to the container. If memory limits aren't set for an individual container, the container shares the container group's total memory allocation.

    Related data type: ContainerGroupDefinition TotalMemoryLimitMebibytes

    " + "documentation":"

    The amount of memory that Amazon GameLift Servers makes available to the container. If memory limits aren't set for an individual container, the container shares the container group's total memory allocation.

    Related data type: ContainerGroupDefinition TotalMemoryLimitMebibytes

    " }, "PortConfiguration":{ "shape":"ContainerPortConfiguration", - "documentation":"

    A set of ports that allow access to the container from external users. Processes running in the container can bind to a one of these ports. Container ports aren't directly accessed by inbound traffic. Amazon GameLift maps these container ports to externally accessible connection ports, which are assigned as needed from the container fleet's ConnectionPortRange.

    " + "documentation":"

    A set of ports that allow access to the container from external users. Processes running in the container can bind to a one of these ports. Container ports aren't directly accessed by inbound traffic. Amazon GameLift Servers maps these container ports to externally accessible connection ports, which are assigned as needed from the container fleet's ConnectionPortRange.

    " }, "ResolvedImageDigest":{ "shape":"Sha256", @@ -9471,7 +9472,7 @@ }, "ImageUri":{ "shape":"ImageUriString", - "documentation":"

    The location of the container image to deploy to a container fleet. Provide an image in an Amazon Elastic Container Registry public or private repository. The repository must be in the same Amazon Web Services account and Amazon Web Services Region where you're creating the container group definition. For limits on image size, see Amazon GameLift endpoints and quotas. You can use any of the following image URI formats:

    • Image ID only: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]

    • Image ID and digest: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]@[digest]

    • Image ID and tag: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]:[tag]

    " + "documentation":"

    The location of the container image to deploy to a container fleet. Provide an image in an Amazon Elastic Container Registry public or private repository. The repository must be in the same Amazon Web Services account and Amazon Web Services Region where you're creating the container group definition. For limits on image size, see Amazon GameLift Servers endpoints and quotas. You can use any of the following image URI formats:

    • Image ID only: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]

    • Image ID and digest: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]@[digest]

    • Image ID and tag: [AWS account].dkr.ecr.[AWS region].amazonaws.com/[repository ID]:[tag]

    " }, "MemoryHardLimitMebibytes":{ "shape":"ContainerMemoryLimit", @@ -9479,7 +9480,7 @@ }, "PortConfiguration":{ "shape":"ContainerPortConfiguration", - "documentation":"

    A set of ports that Amazon GameLift can assign to processes in the container. Any processes that accept inbound traffic connections must be assigned a port from this set. The container port range must be large enough to assign one to each process in the container that needs one.

    Container ports aren't directly accessed by inbound traffic. Amazon GameLift maps these container ports to externally accessible connection ports, which are assigned as needed from the container fleet's ConnectionPortRange.

    " + "documentation":"

    A set of ports that Amazon GameLift Servers can assign to processes in a container. The container port configuration must have enough ports for each container process that accepts inbound traffic connections. A container port configuration can have can have one or more container port ranges. Each range specifies starting and ending values as well as the supported network protocol.

    Container ports aren't directly accessed by inbound traffic. Amazon GameLift Servers maps each container port to an externally accessible connection port (see the container fleet property ConnectionPortRange).

    " }, "Vcpu":{ "shape":"ContainerVcpu", @@ -9542,7 +9543,7 @@ "documentation":"

    The value for a developer-defined key value pair for tagging an Amazon Web Services resource.

    " } }, - "documentation":"

    A label that you can assign to a Amazon GameLift resource.

    Learn more

    Tagging Amazon Web Services Resources in the Amazon Web Services General Reference

    Amazon Web Services Tagging Strategies

    Related actions

    All APIs by task

    " + "documentation":"

    A label that you can assign to a Amazon GameLift Servers resource.

    Learn more

    Tagging Amazon Web Services Resources in the Amazon Web Services General Reference

    Amazon Web Services Tagging Strategies

    Related actions

    All APIs by task

    " }, "TagKey":{ "type":"string", @@ -9570,18 +9571,17 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the Amazon GameLift resource that you want to assign tags to. Amazon GameLift includes resource ARNs in the data object for the resource. You can retrieve the ARN by calling a List or Describe operation for the resource type.

    " + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the Amazon GameLift Servers resource that you want to assign tags to. Amazon GameLift Servers includes resource ARNs in the data object for the resource. You can retrieve the ARN by calling a List or Describe operation for the resource type.

    " }, "Tags":{ "shape":"TagList", - "documentation":"

    A list of one or more tags to assign to the specified Amazon GameLift resource. Tags are developer-defined and structured as key-value pairs. The maximum tag limit may be lower than stated. See Tagging Amazon Web Services Resources for tagging limits.

    " + "documentation":"

    A list of one or more tags to assign to the specified Amazon GameLift Servers resource. Tags are developer-defined and structured as key-value pairs. The maximum tag limit may be lower than stated. See Tagging Amazon Web Services Resources for tagging limits.

    " } } }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -9605,7 +9605,7 @@ "documentation":"

    Desired value to use with a target-based scaling policy. The value must be relevant for whatever metric the scaling policy is using. For example, in a policy using the metric PercentAvailableGameSessions, the target value should be the preferred size of the fleet's buffer (the percent of capacity that should be idle and ready for new game sessions).

    " } }, - "documentation":"

    Settings for a target-based scaling policy. A target-based policy tracks a particular fleet metric specifies a target value for the metric. As player usage changes, the policy triggers Amazon GameLift to adjust capacity so that the metric returns to the target value. The target configuration specifies settings as needed for the target based policy, including the target value.

    " + "documentation":"

    Settings for a target-based scaling policy. A target-based policy tracks a particular fleet metric specifies a target value for the metric. As player usage changes, the policy triggers Amazon GameLift Servers to adjust capacity so that the metric returns to the target value. The target configuration specifies settings as needed for the target based policy, including the target value.

    " }, "TargetTrackingConfiguration":{ "type":"structure", @@ -9616,7 +9616,7 @@ "documentation":"

    Desired value to use with a game server group target-based scaling policy.

    " } }, - "documentation":"

    This data type is used with the Amazon GameLift FleetIQ and game server groups.

    Settings for a target-based scaling policy as part of a GameServerGroupAutoScalingPolicy . These settings are used to create a target-based policy that tracks the Amazon GameLift FleetIQ metric \"PercentUtilizedGameServers\" and specifies a target value for the metric. As player usage changes, the policy triggers to adjust the game server group capacity so that the metric returns to the target value.

    " + "documentation":"

    This data type is used with the Amazon GameLift Servers FleetIQ and game server groups.

    Settings for a target-based scaling policy as part of a GameServerGroupAutoScalingPolicy . These settings are used to create a target-based policy that tracks the Amazon GameLift Servers FleetIQ metric \"PercentUtilizedGameServers\" and specifies a target value for the metric. As player usage changes, the policy triggers to adjust the game server group capacity so that the metric returns to the target value.

    " }, "TerminalRoutingStrategyException":{ "type":"structure", @@ -9639,7 +9639,7 @@ }, "TerminationMode":{ "shape":"TerminationMode", - "documentation":"

    The method to use to terminate the game session. Available methods include:

    • TRIGGER_ON_PROCESS_TERMINATE – Prompts the Amazon GameLift service to send an OnProcessTerminate() callback to the server process and initiate the normal game session shutdown sequence. The OnProcessTerminate method, which is implemented in the game server code, must include a call to the server SDK action ProcessEnding(), which is how the server process signals to Amazon GameLift that a game session is ending. If the server process doesn't call ProcessEnding(), the game session termination won't conclude successfully.

    • FORCE_TERMINATE – Prompts the Amazon GameLift service to stop the server process immediately. Amazon GameLift takes action (depending on the type of fleet) to shut down the server process without the normal game session shutdown sequence.

      This method is not available for game sessions that are running on Anywhere fleets unless the fleet is deployed with the Amazon GameLift Agent. In this scenario, a force terminate request results in an invalid or bad request exception.

    " + "documentation":"

    The method to use to terminate the game session. Available methods include:

    • TRIGGER_ON_PROCESS_TERMINATE – Prompts the Amazon GameLift Servers service to send an OnProcessTerminate() callback to the server process and initiate the normal game session shutdown sequence. The OnProcessTerminate method, which is implemented in the game server code, must include a call to the server SDK action ProcessEnding(), which is how the server process signals to Amazon GameLift Servers that a game session is ending. If the server process doesn't call ProcessEnding(), the game session termination won't conclude successfully.

    • FORCE_TERMINATE – Prompts the Amazon GameLift Servers service to stop the server process immediately. Amazon GameLift Servers takes action (depending on the type of fleet) to shut down the server process without the normal game session shutdown sequence.

      This method is not available for game sessions that are running on Anywhere fleets unless the fleet is deployed with the Amazon GameLift Servers Agent. In this scenario, a force terminate request results in an invalid or bad request exception.

    " } } }, @@ -9657,6 +9657,20 @@ ] }, "Timestamp":{"type":"timestamp"}, + "UDPEndpoint":{ + "type":"structure", + "members":{ + "Domain":{ + "shape":"NonZeroAndMaxString", + "documentation":"

    The domain name of the UDP endpoint.

    " + }, + "Port":{ + "shape":"PositiveInteger", + "documentation":"

    The port number of the UDP endpoint.

    " + } + }, + "documentation":"

    The domain name and port information for a UDP endpoint.

    " + }, "UnauthorizedException":{ "type":"structure", "members":{ @@ -9682,18 +9696,17 @@ "members":{ "ResourceARN":{ "shape":"AmazonResourceName", - "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the Amazon GameLift resource that you want to remove tags from. Amazon GameLift includes resource ARNs in the data object for the resource. You can retrieve the ARN by calling a List or Describe operation for the resource type.

    " + "documentation":"

    The Amazon Resource Name (ARN) that uniquely identifies the Amazon GameLift Servers resource that you want to remove tags from. Amazon GameLift Servers includes resource ARNs in the data object for the resource. You can retrieve the ARN by calling a List or Describe operation for the resource type.

    " }, "TagKeys":{ "shape":"TagKeyList", - "documentation":"

    A list of one or more tag keys to remove from the specified Amazon GameLift resource.

    " + "documentation":"

    A list of one or more tag keys to remove from the specified Amazon GameLift Servers resource.

    " } } }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateAliasInput":{ "type":"structure", @@ -9771,11 +9784,11 @@ }, "GameServerContainerGroupsPerInstance":{ "shape":"GameServerContainerGroupsPerInstance", - "documentation":"

    The number of times to replicate the game server container group on each fleet instance. By default, Amazon GameLift calculates the maximum number of game server container groups that can fit on each instance. You can remove this property value to use the calculated value, or set it manually. If you set this number manually, Amazon GameLift uses your value as long as it's less than the calculated maximum.

    " + "documentation":"

    The number of times to replicate the game server container group on each fleet instance. By default, Amazon GameLift Servers calculates the maximum number of game server container groups that can fit on each instance. You can remove this property value to use the calculated value, or set it manually. If you set this number manually, Amazon GameLift Servers uses your value as long as it's less than the calculated maximum.

    " }, "InstanceConnectionPortRange":{ "shape":"ConnectionPortRange", - "documentation":"

    A revised set of port numbers to open on each fleet instance. By default, Amazon GameLift calculates an optimal port range based on your fleet configuration. If you previously set this parameter manually, you can't reset this to use the calculated settings.

    " + "documentation":"

    A revised set of port numbers to open on each fleet instance. By default, Amazon GameLift Servers calculates an optimal port range based on your fleet configuration. If you previously set this parameter manually, you can't reset this to use the calculated settings.

    " }, "InstanceInboundPermissionAuthorizations":{ "shape":"IpPermissionsList", @@ -9858,7 +9871,7 @@ }, "OperatingSystem":{ "shape":"ContainerOperatingSystem", - "documentation":"

    The platform that all containers in the group use. Containers in a group must run on the same operating system.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " + "documentation":"

    The platform that all containers in the group use. Containers in a group must run on the same operating system.

    Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use server SDK version 4.x for Amazon GameLift Servers, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to server SDK version 5.

    " } } }, @@ -9901,7 +9914,7 @@ }, "AnywhereConfiguration":{ "shape":"AnywhereConfiguration", - "documentation":"

    Amazon GameLift Anywhere configuration options.

    " + "documentation":"

    Amazon GameLift Servers Anywhere configuration options.

    " } } }, @@ -9914,7 +9927,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " } } }, @@ -9953,7 +9966,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " }, "Location":{ "shape":"LocationStringModel", @@ -9988,7 +10001,7 @@ }, "FleetArn":{ "shape":"FleetArn", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers fleet resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::fleet/fleet-a1234567-b8c9-0d1e-2fa3-b45c6d7e8912.

    " } } }, @@ -10002,11 +10015,11 @@ }, "RoleArn":{ "shape":"IamRoleArn", - "documentation":"

    The Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift to access your Amazon EC2 Auto Scaling groups.

    " + "documentation":"

    The Amazon Resource Name (ARN) for an IAM role that allows Amazon GameLift Servers to access your Amazon EC2 Auto Scaling groups.

    " }, "InstanceDefinitions":{ "shape":"InstanceDefinitions", - "documentation":"

    An updated list of Amazon EC2 instance types to use in the Auto Scaling group. The instance definitions must specify at least two different instance types that are supported by Amazon GameLift FleetIQ. This updated list replaces the entire current list of instance definitions for the game server group. For more information on instance types, see EC2 Instance Types in the Amazon EC2 User Guide. You can optionally specify capacity weighting for each instance type. If no weight value is specified for an instance type, it is set to the default value \"1\". For more information about capacity weighting, see Instance Weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

    " + "documentation":"

    An updated list of Amazon EC2 instance types to use in the Auto Scaling group. The instance definitions must specify at least two different instance types that are supported by Amazon GameLift Servers FleetIQ. This updated list replaces the entire current list of instance definitions for the game server group. For more information on instance types, see EC2 Instance Types in the Amazon EC2 User Guide. You can optionally specify capacity weighting for each instance type. If no weight value is specified for an instance type, it is set to the default value \"1\". For more information about capacity weighting, see Instance Weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

    " }, "GameServerProtectionPolicy":{ "shape":"GameServerProtectionPolicy", @@ -10014,7 +10027,7 @@ }, "BalancingStrategy":{ "shape":"BalancingStrategy", - "documentation":"

    Indicates how Amazon GameLift FleetIQ balances the use of Spot Instances and On-Demand Instances in the game server group. Method options include the following:

    • SPOT_ONLY - Only Spot Instances are used in the game server group. If Spot Instances are unavailable or not viable for game hosting, the game server group provides no hosting capacity until Spot Instances can again be used. Until then, no new instances are started, and the existing nonviable Spot Instances are terminated (after current gameplay ends) and are not replaced.

    • SPOT_PREFERRED - (default value) Spot Instances are used whenever available in the game server group. If Spot Instances are unavailable, the game server group continues to provide hosting capacity by falling back to On-Demand Instances. Existing nonviable Spot Instances are terminated (after current gameplay ends) and are replaced with new On-Demand Instances.

    • ON_DEMAND_ONLY - Only On-Demand Instances are used in the game server group. No Spot Instances are used, even when available, while this balancing strategy is in force.

    " + "documentation":"

    Indicates how Amazon GameLift Servers FleetIQ balances the use of Spot Instances and On-Demand Instances in the game server group. Method options include the following:

    • SPOT_ONLY - Only Spot Instances are used in the game server group. If Spot Instances are unavailable or not viable for game hosting, the game server group provides no hosting capacity until Spot Instances can again be used. Until then, no new instances are started, and the existing nonviable Spot Instances are terminated (after current gameplay ends) and are not replaced.

    • SPOT_PREFERRED - (default value) Spot Instances are used whenever available in the game server group. If Spot Instances are unavailable, the game server group continues to provide hosting capacity by falling back to On-Demand Instances. Existing nonviable Spot Instances are terminated (after current gameplay ends) and are replaced with new On-Demand Instances.

    • ON_DEMAND_ONLY - Only On-Demand Instances are used in the game server group. No Spot Instances are used, even when available, while this balancing strategy is in force.

    " } } }, @@ -10118,7 +10131,7 @@ }, "PlayerLatencyPolicies":{ "shape":"PlayerLatencyPolicyList", - "documentation":"

    A set of policies that enforce a sliding cap on player latency when processing game sessions placement requests. Use multiple policies to gradually relax the cap over time if Amazon GameLift can't make a placement. Policies are evaluated in order starting with the lowest maximum latency value. When updating policies, provide a complete collection of policies.

    " + "documentation":"

    A set of policies that enforce a sliding cap on player latency when processing game sessions placement requests. Use multiple policies to gradually relax the cap over time if Amazon GameLift Servers can't make a placement. Policies are evaluated in order starting with the lowest maximum latency value. When updating policies, provide a complete collection of policies.

    " }, "Destinations":{ "shape":"GameSessionQueueDestinationList", @@ -10165,7 +10178,7 @@ }, "GameSessionQueueArns":{ "shape":"QueueArnsList", - "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::gamesessionqueue/<queue name>. Queues can be located in any Region. Queues are used to start new Amazon GameLift-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode is set to STANDALONE, do not set this parameter.

    " + "documentation":"

    The Amazon Resource Name (ARN) that is assigned to a Amazon GameLift Servers game session queue resource and uniquely identifies it. ARNs are unique across all Regions. Format is arn:aws:gamelift:<region>::gamesessionqueue/<queue name>. Queues can be located in any Region. Queues are used to start new Amazon GameLift Servers-hosted game sessions for matches that are created with this matchmaking configuration. If FlexMatchMode is set to STANDALONE, do not set this parameter.

    " }, "RequestTimeoutSeconds":{ "shape":"MatchmakingRequestTimeoutInteger", @@ -10209,7 +10222,7 @@ }, "FlexMatchMode":{ "shape":"FlexMatchMode", - "documentation":"

    Indicates whether this matchmaking configuration is being used with Amazon GameLift hosting or as a standalone matchmaking solution.

    • STANDALONE - FlexMatch forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event.

    • WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift queue to start a game session for the match.

    " + "documentation":"

    Indicates whether this matchmaking configuration is being used with Amazon GameLift Servers hosting or as a standalone matchmaking solution.

    • STANDALONE - FlexMatch forms matches and returns match information, including players and team assignments, in a MatchmakingSucceeded event.

    • WITH_QUEUE - FlexMatch forms matches and uses the specified Amazon GameLift Servers queue to start a game session for the match.

    " } } }, @@ -10235,7 +10248,7 @@ }, "RuntimeConfiguration":{ "shape":"RuntimeConfiguration", - "documentation":"

    Instructions for launching server processes on fleet computes. Server processes run either a custom game build executable or a Amazon GameLift Realtime script. The runtime configuration lists the types of server processes to run, how to launch them, and the number of processes to run concurrently.

    " + "documentation":"

    Instructions for launching server processes on fleet computes. Server processes run either a custom game build executable or a Amazon GameLift Servers Realtime script. The runtime configuration lists the types of server processes to run, how to launch them, and the number of processes to run concurrently.

    " } } }, @@ -10266,7 +10279,7 @@ }, "StorageLocation":{ "shape":"S3Location", - "documentation":"

    The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

    " + "documentation":"

    The location of the Amazon S3 bucket where a zipped file containing your Realtime scripts is stored. The storage location must specify the Amazon S3 bucket name, the zip file name (the \"key\"), and a role ARN that allows Amazon GameLift Servers to access the Amazon S3 storage location. The S3 bucket must be in the same Region where you want to create a new script. By default, Amazon GameLift Servers uploads the latest version of the zip file; if you have S3 object versioning turned on, you can use the ObjectVersion parameter to specify an earlier version.

    " }, "ZipFile":{ "shape":"ZipBlob", @@ -10279,7 +10292,7 @@ "members":{ "Script":{ "shape":"Script", - "documentation":"

    The newly created script record with a unique script ID. The new script's storage location reflects an Amazon S3 location: (1) If the script was uploaded from an S3 bucket under your account, the storage location reflects the information that was provided in the CreateScript request; (2) If the script file was uploaded from a local zip file, the storage location reflects an S3 location controls by the Amazon GameLift service.

    " + "documentation":"

    The newly created script record with a unique script ID. The new script's storage location reflects an Amazon S3 location: (1) If the script was uploaded from an S3 bucket under your account, the storage location reflects the information that was provided in the CreateScript request; (2) If the script file was uploaded from a local zip file, the storage location reflects an S3 location controls by the Amazon GameLift Servers service.

    " } } }, @@ -10307,7 +10320,7 @@ "members":{ "GameLiftAwsAccountId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for the Amazon Web Services account that you use to manage your Amazon GameLift fleet. You can find your Account ID in the Amazon Web Services Management Console under account settings.

    " + "documentation":"

    A unique identifier for the Amazon Web Services account that you use to manage your Amazon GameLift Servers fleet. You can find your Account ID in the Amazon Web Services Management Console under account settings.

    " }, "PeerVpcAwsAccountId":{ "shape":"NonZeroAndMaxString", @@ -10315,7 +10328,7 @@ }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the Amazon Web Services Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

    " + "documentation":"

    A unique identifier for a VPC with resources to be accessed by your Amazon GameLift Servers fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the Amazon Web Services Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Servers Fleets.

    " }, "CreationTime":{ "shape":"Timestamp", @@ -10326,7 +10339,7 @@ "documentation":"

    Time stamp indicating when this authorization expires (24 hours after issuance). Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").

    " } }, - "documentation":"

    Represents an authorization for a VPC peering connection between the VPC for an Amazon GameLift fleet and another VPC on an account you have access to. This authorization must exist and be valid for the peering connection to be established. Authorizations are valid for 24 hours after they are issued.

    Related actions

    All APIs by task

    " + "documentation":"

    Represents an authorization for a VPC peering connection between the VPC for an Amazon GameLift Servers fleet and another VPC on an account you have access to. This authorization must exist and be valid for the peering connection to be established. Authorizations are valid for 24 hours after they are issued.

    Related actions

    All APIs by task

    " }, "VpcPeeringAuthorizationList":{ "type":"list", @@ -10337,7 +10350,7 @@ "members":{ "FleetId":{ "shape":"FleetId", - "documentation":"

    A unique identifier for the fleet. This ID determines the ID of the Amazon GameLift VPC for your fleet.

    " + "documentation":"

    A unique identifier for the fleet. This ID determines the ID of the Amazon GameLift Servers VPC for your fleet.

    " }, "FleetArn":{ "shape":"FleetArn", @@ -10357,14 +10370,14 @@ }, "PeerVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the Amazon Web Services Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Fleets.

    " + "documentation":"

    A unique identifier for a VPC with resources to be accessed by your Amazon GameLift Servers fleet. The VPC must be in the same Region as your fleet. To look up a VPC ID, use the VPC Dashboard in the Amazon Web Services Management Console. Learn more about VPC peering in VPC Peering with Amazon GameLift Servers Fleets.

    " }, "GameLiftVpcId":{ "shape":"NonZeroAndMaxString", - "documentation":"

    A unique identifier for the VPC that contains the Amazon GameLift fleet for this connection. This VPC is managed by Amazon GameLift and does not appear in your Amazon Web Services account.

    " + "documentation":"

    A unique identifier for the VPC that contains the Amazon GameLift Servers fleet for this connection. This VPC is managed by Amazon GameLift Servers and does not appear in your Amazon Web Services account.

    " } }, - "documentation":"

    Represents a peering connection between a VPC on one of your Amazon Web Services accounts and the VPC for your Amazon GameLift fleets. This record may be for an active peering connection or a pending connection that has not yet been established.

    Related actions

    All APIs by task

    " + "documentation":"

    Represents a peering connection between a VPC on one of your Amazon Web Services accounts and the VPC for your Amazon GameLift Servers fleets. This record may be for an active peering connection or a pending connection that has not yet been established.

    Related actions

    All APIs by task

    " }, "VpcPeeringConnectionList":{ "type":"list", @@ -10411,5 +10424,5 @@ "max":5000000 } }, - "documentation":"

    Amazon GameLift provides solutions for hosting session-based multiplayer game servers in the cloud, including tools for deploying, operating, and scaling game servers. Built on Amazon Web Services global computing infrastructure, GameLift helps you deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet player demand.

    About Amazon GameLift solutions

    Get more information on these Amazon GameLift solutions in the Amazon GameLift Developer Guide.

    • Amazon GameLift managed hosting -- Amazon GameLift offers a fully managed service to set up and maintain computing machines for hosting, manage game session and player session life cycle, and handle security, storage, and performance tracking. You can use automatic scaling tools to balance player demand and hosting costs, configure your game session management to minimize player latency, and add FlexMatch for matchmaking.

    • Managed hosting with Amazon GameLift Realtime -- With Amazon GameLift Amazon GameLift Realtime, you can quickly configure and set up ready-to-go game servers for your game. Amazon GameLift Realtime provides a game server framework with core Amazon GameLift infrastructure already built in. Then use the full range of Amazon GameLift managed hosting features, including FlexMatch, for your game.

    • Amazon GameLift FleetIQ -- Use Amazon GameLift FleetIQ as a standalone service while hosting your games using EC2 instances and Auto Scaling groups. Amazon GameLift FleetIQ provides optimizations for game hosting, including boosting the viability of low-cost Spot Instances gaming. For a complete solution, pair the Amazon GameLift FleetIQ and FlexMatch standalone services.

    • Amazon GameLift FlexMatch -- Add matchmaking to your game hosting solution. FlexMatch is a customizable matchmaking service for multiplayer games. Use FlexMatch as integrated with Amazon GameLift managed hosting or incorporate FlexMatch as a standalone service into your own hosting solution.

    About this API Reference

    This reference guide describes the low-level service API for Amazon GameLift. With each topic in this guide, you can find links to language-specific SDK guides and the Amazon Web Services CLI reference. Useful links:

    " + "documentation":"

    Amazon GameLift Servers provides solutions for hosting session-based multiplayer game servers in the cloud, including tools for deploying, operating, and scaling game servers. Built on Amazon Web Services global computing infrastructure, GameLift helps you deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet player demand.

    About Amazon GameLift Servers solutions

    Get more information on these Amazon GameLift Servers solutions in the Amazon GameLift Servers Developer Guide.

    • Amazon GameLift Servers managed hosting -- Amazon GameLift Servers offers a fully managed service to set up and maintain computing machines for hosting, manage game session and player session life cycle, and handle security, storage, and performance tracking. You can use automatic scaling tools to balance player demand and hosting costs, configure your game session management to minimize player latency, and add FlexMatch for matchmaking.

    • Managed hosting with Amazon GameLift Servers Realtime -- With Amazon GameLift Servers Amazon GameLift Servers Realtime, you can quickly configure and set up ready-to-go game servers for your game. Amazon GameLift Servers Realtime provides a game server framework with core Amazon GameLift Servers infrastructure already built in. Then use the full range of Amazon GameLift Servers managed hosting features, including FlexMatch, for your game.

    • Amazon GameLift Servers FleetIQ -- Use Amazon GameLift Servers FleetIQ as a standalone service while hosting your games using EC2 instances and Auto Scaling groups. Amazon GameLift Servers FleetIQ provides optimizations for game hosting, including boosting the viability of low-cost Spot Instances gaming. For a complete solution, pair the Amazon GameLift Servers FleetIQ and FlexMatch standalone services.

    • Amazon GameLift Servers FlexMatch -- Add matchmaking to your game hosting solution. FlexMatch is a customizable matchmaking service for multiplayer games. Use FlexMatch as integrated with Amazon GameLift Servers managed hosting or incorporate FlexMatch as a standalone service into your own hosting solution.

    About this API Reference

    This reference guide describes the low-level service API for Amazon GameLift Servers. With each topic in this guide, you can find links to language-specific SDK guides and the Amazon Web Services CLI reference. Useful links:

    " } diff --git a/services/gameliftstreams/pom.xml b/services/gameliftstreams/pom.xml index 6565fac34698..efbc3abdba9b 100644 --- a/services/gameliftstreams/pom.xml +++ b/services/gameliftstreams/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT gameliftstreams AWS Java SDK :: Services :: Game Lift Streams diff --git a/services/gameliftstreams/src/main/resources/codegen-resources/service-2.json b/services/gameliftstreams/src/main/resources/codegen-resources/service-2.json index 88e5938700dc..3c3bd7cdc87d 100644 --- a/services/gameliftstreams/src/main/resources/codegen-resources/service-2.json +++ b/services/gameliftstreams/src/main/resources/codegen-resources/service-2.json @@ -70,7 +70,7 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Creates an application resource in Amazon GameLift Streams, which specifies the application content you want to stream, such as a game build or other software, and configures the settings to run it.

    Before you create an application, upload your application content files to an Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.

    Make sure that your files in the Amazon S3 bucket are the correct version you want to use. As soon as you create a Amazon GameLift Streams application, you cannot change the files at a later time.

    If the request is successful, Amazon GameLift Streams begins to create an application and sets the status to INITIALIZED. When an application reaches READY status, you can use the application to set up stream groups and start streams. To track application status, call GetApplication.

    " + "documentation":"

    Creates an application resource in Amazon GameLift Streams, which specifies the application content you want to stream, such as a game build or other software, and configures the settings to run it.

    Before you create an application, upload your application content files to an Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.

    Make sure that your files in the Amazon S3 bucket are the correct version you want to use. If you change the files at a later time, you will need to create a new Amazon GameLift Streams application.

    If the request is successful, Amazon GameLift Streams begins to create an application and sets the status to INITIALIZED. When an application reaches READY status, you can use the application to set up stream groups and start streams. To track application status, call GetApplication.

    " }, "CreateStreamGroup":{ "name":"CreateStreamGroup", @@ -90,7 +90,7 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

    Manage how Amazon GameLift Streams streams your applications by using a stream group. A stream group is a collection of resources that Amazon GameLift Streams uses to stream your application to end-users. When you create a stream group, you specify an application to stream by default and the type of hardware to use, such as the graphical processing unit (GPU). You can also link additional applications, which allows you to stream those applications using this stream group. Depending on your expected users, you also scale the number of concurrent streams you want to support at one time, and in what locations.

    Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity: always-on and on-demand:

    • Always-on: The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

       </p> </li> <li> <p> <b>On-demand</b>: The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes). </p> </li> </ul> <p> To adjust the capacity of any <code>ACTIVE</code> stream group, call <a href="https://docs.aws.amazon.com/gameliftstreams/latest/apireference/API_UpdateStreamGroup.html">UpdateStreamGroup</a>. </p> <p> If the request is successful, Amazon GameLift Streams begins creating the stream group. Amazon GameLift Streams assigns a unique ID to the stream group resource and sets the status to <code>ACTIVATING</code>. When the stream group reaches <code>ACTIVE</code> status, you can start stream sessions by using <a href="https://docs.aws.amazon.com/gameliftstreams/latest/apireference/API_StartStreamSession.html">StartStreamSession</a>. To check the stream group's status, call <a href="https://docs.aws.amazon.com/gameliftstreams/latest/apireference/API_GetStreamGroup.html">GetStreamGroup</a>. </p> 
      ", + "documentation":"

      Manage how Amazon GameLift Streams streams your applications by using a stream group. A stream group is a collection of resources that Amazon GameLift Streams uses to stream your application to end-users. When you create a stream group, you specify an application to stream by default and the type of hardware to use, such as the graphical processing unit (GPU). You can also link additional applications, which allows you to stream those applications using this stream group. Depending on your expected users, you also scale the number of concurrent streams you want to support at one time, and in what locations.

      Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity, always-on and on-demand:

      • Always-on: The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

      • On-demand: The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).

      To adjust the capacity of any ACTIVE stream group, call UpdateStreamGroup.

      If the request is successful, Amazon GameLift Streams begins creating the stream group. Amazon GameLift Streams assigns a unique ID to the stream group resource and sets the status to ACTIVATING. When the stream group reaches ACTIVE status, you can start stream sessions by using StartStreamSession. To check the stream group's status, call GetStreamGroup.

      ", "idempotent":true }, "CreateStreamSessionConnection":{ @@ -363,7 +363,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"} ], - "documentation":"

      This action initiates a new stream session and outputs connection information that clients can use to access the stream. A stream session refers to an instance of a stream that Amazon GameLift Streams transmits from the server to the end-user. A stream session runs on a compute resource, or stream capacity, that a stream group has allocated.

      To start a new stream session, specify a stream group and application ID, along with the transport protocol and signal request settings to use with the stream. You must have associated at least one application to the stream group before starting a stream session, either when creating the stream group, or by using AssociateApplications.

      For stream groups that have multiple locations, provide a set of locations ordered by priority by setting Locations. Amazon GameLift Streams will start a single stream session in the next available location. An application must be finished replicating in a remote location before the remote location can host a stream.

      If the request is successful, Amazon GameLift Streams begins to prepare the stream. Amazon GameLift Streams assigns an Amazon Resource Name (ARN) value to the stream session resource and sets the status to ACTIVATING. During the stream preparation process, Amazon GameLift Streams queues the request and searches for available stream capacity to run the stream. This can result to one of the following:

      • Amazon GameLift Streams identifies an available compute resource to run the application content and start the stream. When the stream is ready, the stream session's status changes to ACTIVE and includes stream connection information. Provide the connection information to the requesting client to join the stream session.

      • Amazon GameLift Streams doesn't identify an available resource within a certain time, set by ClientToken. In this case, Amazon GameLift Streams stops processing the request, and the stream session object status changes to ERROR with status reason placementTimeout.

      " + "documentation":"

      This action initiates a new stream session and outputs connection information that clients can use to access the stream. A stream session refers to an instance of a stream that Amazon GameLift Streams transmits from the server to the end-user. A stream session runs on a compute resource that a stream group has allocated.

      To start a new stream session, specify a stream group and application ID, along with the transport protocol and signal request settings to use with the stream. You must have associated at least one application to the stream group before starting a stream session, either when creating the stream group, or by using AssociateApplications.

      For stream groups that have multiple locations, provide a set of locations ordered by priority using a Locations parameter. Amazon GameLift Streams will start a single stream session in the next available location. An application must be finished replicating in a remote location before the remote location can host a stream.

      If the request is successful, Amazon GameLift Streams begins to prepare the stream. Amazon GameLift Streams assigns an Amazon Resource Name (ARN) value to the stream session resource and sets the status to ACTIVATING. During the stream preparation process, Amazon GameLift Streams queues the request and searches for available stream capacity to run the stream. This results in one of the following:

      • Amazon GameLift Streams identifies an available compute resource to run the application content and start the stream. When the stream is ready, the stream session's status changes to ACTIVE and includes stream connection information. Provide the connection information to the requesting client to join the stream session.

      • Amazon GameLift Streams doesn't identify an available resource within a certain time, set by ClientToken. In this case, Amazon GameLift Streams stops processing the request, and the stream session object status changes to ERROR with status reason placementTimeout.

      " }, "TagResource":{ "name":"TagResource", @@ -455,7 +455,7 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

      Updates the configuration settings for an Amazon GameLift Streams stream group resource. You can change the description, the set of locations, and the requested capacity of a stream group per location. If you want to change the stream class, create a new stream group.

      Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity: always-on and on-demand:

      • Always-on: The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

         </p> </li> <li> <p> <b>On-demand</b>: The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes). </p> </li> </ul> <p>To update a stream group, specify the stream group's Amazon Resource Name (ARN) and provide the new values. If the request is successful, Amazon GameLift Streams returns the complete updated metadata for the stream group.</p> 
        " + "documentation":"

        Updates the configuration settings for an Amazon GameLift Streams stream group resource. You can change the description, the set of locations, and the requested capacity of a stream group per location. If you want to change the stream class, create a new stream group.

        Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity, always-on and on-demand:

        • Always-on: The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

        • On-demand: The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).

        To update a stream group, specify the stream group's Amazon Resource Name (ARN) and provide the new values. If the request is successful, Amazon GameLift Streams returns the complete updated metadata for the stream group.

        " } }, "shapes":{ @@ -484,7 +484,7 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        A stream group to add the specified locations to.

        This value is a Amazon Resource Name (ARN) that uniquely identifies the stream group resource. Format example: sg-1AB2C3De4.

        ", + "documentation":"

        A stream group to add the specified locations to.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" }, @@ -503,7 +503,7 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        This value is the Amazon Resource Name (ARN) that uniquely identifies the stream group resource. Format example: sg-1AB2C3De4.

        " + "documentation":"

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        " }, "Locations":{ "shape":"LocationStates", @@ -562,7 +562,7 @@ }, "Id":{ "shape":"Id", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        An ID that uniquely identifies the application resource. Example ID: a-9ZY8X7Wv6.

        " }, "LastUpdatedAt":{ "shape":"Timestamp", @@ -602,11 +602,11 @@ "members":{ "ApplicationIdentifiers":{ "shape":"Identifiers", - "documentation":"

        A set of applications to associate with the stream group.

        This value is a set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications to associate with the stream group.

        This value is a set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        " }, "Identifier":{ "shape":"Identifier", - "documentation":"

        A stream group to associate to the applications.

        This value is a Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        ", + "documentation":"

        A stream group to associate to the applications.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" } @@ -617,11 +617,11 @@ "members":{ "ApplicationArns":{ "shape":"ArnList", - "documentation":"

        A set of applications that are associated to the stream group.

        This value is a set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications that are associated to the stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "Arn":{ "shape":"Arn", - "documentation":"

        A stream group that is associated to the applications.

        This value is a Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        " + "documentation":"

        A stream group that is associated to the applications.

        This value is an Amazon Resource Name (ARN) that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4.

        " } } }, @@ -669,7 +669,7 @@ "members":{ "ApplicationLogOutputUri":{ "shape":"ApplicationLogOutputUri", - "documentation":"

        An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.

        The log bucket must have permissions that give Amazon GameLift Streams access to write the log files. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.

        The log bucket must have permissions that give Amazon GameLift Streams access to write the log files. For more information, see Application log bucket permission policy in the Amazon GameLift Streams Developer Guide.

        " }, "ApplicationLogPaths":{ "shape":"FilePaths", @@ -720,7 +720,7 @@ }, "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].

        " }, "AssociatedStreamGroups":{ "shape":"ArnList", @@ -740,7 +740,7 @@ }, "Id":{ "shape":"Id", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A unique ID value that is assigned to the resource when it's created. Format example: a-9ZY8X7Wv6.

        " }, "LastUpdatedAt":{ "shape":"Timestamp", @@ -778,7 +778,7 @@ }, "DefaultApplicationIdentifier":{ "shape":"Identifier", - "documentation":"

        The unique identifier of the Amazon GameLift Streams application that you want to associate to a stream group as the default application. The application must be in READY status. By setting the default application identifier, you will optimize startup performance of this application in your stream group. Once set, this application cannot be disassociated from the stream group, unlike applications that are associated using AssociateApplications. If not set when creating a stream group, you will need to call AssociateApplications later, before you can start streaming.

        " + "documentation":"

        The unique identifier of the Amazon GameLift Streams application that you want to associate to a stream group as the default application. The application must be in READY status. By setting the default application identifier, you will optimize startup performance of this application in your stream group. Once set, this application cannot be disassociated from the stream group, unlike applications that are associated using AssociateApplications. If not set when creating a stream group, you will need to call AssociateApplications later, before you can start streaming.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        " }, "Description":{ "shape":"Description", @@ -804,11 +804,11 @@ "members":{ "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].

        " }, "AssociatedApplications":{ "shape":"ArnList", - "documentation":"

        A set of applications that this stream group is associated to. You can stream any of these applications by using this stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications that this stream group is associated to. You can stream any of these applications by using this stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -863,7 +863,7 @@ }, "Identifier":{ "shape":"Identifier", - "documentation":"

        Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        The stream group that you want to run this stream session with. The stream group must be in ACTIVE status and have idle stream capacity.

        ", + "documentation":"

        Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        The stream group that you want to run this stream session with. The stream group must be in ACTIVE status and have idle stream capacity.

        ", "location":"uri", "locationName":"Identifier" }, @@ -873,7 +873,7 @@ }, "StreamSessionIdentifier":{ "shape":"Identifier", - "documentation":"

        Amazon Resource Name (ARN) that uniquely identifies the stream session resource. Format example: 1AB2C3De4. The stream session must be in PENDING_CLIENT_RECONNECTION or ACTIVE status.

        ", + "documentation":"

        Amazon Resource Name (ARN) or ID that uniquely identifies the stream session resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamsession/sg-1AB2C3De4/ABC123def4567. Example ID: ABC123def4567.

        The stream session must be in PENDING_CLIENT_RECONNECTION or ACTIVE status.

        ", "location":"uri", "locationName":"StreamSessionIdentifier" } @@ -893,11 +893,11 @@ "members":{ "Arn":{ "shape":"Arn", - "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the application resource. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.

        " + "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "Id":{ "shape":"Id", - "documentation":"

        An ID that uniquely identifies the application resource. For example: a-9ZY8X7Wv6.

        " + "documentation":"

        An ID that uniquely identifies the application resource. Example ID: a-9ZY8X7Wv6.

        " } }, "documentation":"

        Represents the default Amazon GameLift Streams application that a stream group hosts.

        " @@ -908,7 +908,7 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        ", "location":"uri", "locationName":"Identifier" } @@ -920,7 +920,7 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        The unique ID value of the stream group resource to delete. Format example: sg-1AB2C3De4.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" } @@ -941,11 +941,11 @@ "members":{ "ApplicationIdentifiers":{ "shape":"Identifiers", - "documentation":"

        A set of applications that you want to disassociate from the stream group.

        This value is a set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications that you want to disassociate from the stream group.

        This value is a set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        " }, "Identifier":{ "shape":"Identifier", - "documentation":"

        A stream group to disassociate these applications from.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        ", + "documentation":"

        A stream group to disassociate these applications from.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" } @@ -956,11 +956,11 @@ "members":{ "ApplicationArns":{ "shape":"ArnList", - "documentation":"

        A set of applications that are disassociated from this stream group.

        This value is a set of either Amazon Resource Names (ARN) or IDs that uniquely identify application resources. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications that are disassociated from this stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "Arn":{ "shape":"Arn", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        " + "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4.

        " } } }, @@ -992,7 +992,7 @@ "members":{ "OutputUri":{ "shape":"OutputUri", - "documentation":"

        The S3 bucket URI where Amazon GameLift Streams uploaded the set of compressed exported files for a stream session. Amazon GameLift Streams generates a ZIP file name based on the stream session metadata. Alternatively, you can provide a custom file name with a .zip file extension.

        Example 1: If you provide an S3 URI called s3://MyBucket/MyGame_Session1.zip, then Amazon GameLift Streams will save the files at that location.

        Example 2: If you provide an S3 URI called s3://MyBucket/MyGameSessions_ExportedFiles/, then Amazon GameLift Streams will save the files at s3://MyBucket/MyGameSessions_ExportedFiles/YYYYMMDD-HHMMSS-appId-sg-Id-sessionId.zip or another similar name.

        " + "documentation":"

        The S3 bucket URI where Amazon GameLift Streams uploaded the set of compressed exported files for a stream session. Amazon GameLift Streams generates a ZIP file name based on the stream session metadata. Alternatively, you can provide a custom file name with a .zip file extension.

        Example 1: If you provide an S3 URI called s3://amzn-s3-demo-destination-bucket/MyGame_Session1.zip, then Amazon GameLift Streams will save the files at that location.

        Example 2: If you provide an S3 URI called s3://amzn-s3-demo-destination-bucket/MyGameSessions_ExportedFiles/, then Amazon GameLift Streams will save the files at s3://amzn-s3-demo-destination-bucket/MyGameSessions_ExportedFiles/YYYYMMDD-HHMMSS-appId-sg-Id-sessionId.zip or another similar name.

        " }, "Status":{ "shape":"ExportFilesStatus", @@ -1028,17 +1028,17 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" }, "OutputUri":{ "shape":"OutputUri", - "documentation":"

        The S3 bucket URI where Amazon GameLift Streams uploads the set of compressed exported files for this stream session. Amazon GameLift Streams generates a ZIP file name based on the stream session metadata. Alternatively, you can provide a custom file name with a .zip file extension.

        Example 1: If you provide an S3 URI called s3://MyBucket/MyGame_Session1.zip, then Amazon GameLift Streams will save the files at that location.

        Example 2: If you provide an S3 URI called s3://MyBucket/MyGameSessions_ExportedFiles/, then Amazon GameLift Streams will save the files at s3://MyBucket/MyGameSessions_ExportedFiles/YYYYMMDD-HHMMSS-appId-sg-Id-sessionId.zip or another similar name.

        " + "documentation":"

        The S3 bucket URI where Amazon GameLift Streams uploads the set of compressed exported files for this stream session. Amazon GameLift Streams generates a ZIP file name based on the stream session metadata. Alternatively, you can provide a custom file name with a .zip file extension.

        Example 1: If you provide an S3 URI called s3://amzn-s3-demo-destination-bucket/MyGame_Session1.zip, then Amazon GameLift Streams will save the files at that location.

        Example 2: If you provide an S3 URI called s3://amzn-s3-demo-destination-bucket/MyGameSessions_ExportedFiles/, then Amazon GameLift Streams will save the files at s3://amzn-s3-demo-destination-bucket/MyGameSessions_ExportedFiles/YYYYMMDD-HHMMSS-appId-sg-Id-sessionId.zip or another similar name.

        " }, "StreamSessionIdentifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream session resource. Format example: 1AB2C3De4.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream session resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamsession/sg-1AB2C3De4/ABC123def4567. Example ID: ABC123def4567.

        ", "location":"uri", "locationName":"StreamSessionIdentifier" } @@ -1073,7 +1073,7 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        ", "location":"uri", "locationName":"Identifier" } @@ -1097,11 +1097,11 @@ }, "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].

        " }, "AssociatedStreamGroups":{ "shape":"ArnList", - "documentation":"

        A set of stream groups that this application is associated with. You can use any of these stream groups to stream your application.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify stream group resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4.

        " + "documentation":"

        A set of stream groups that this application is associated with. You can use any of these stream groups to stream your application.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify stream group resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -1117,7 +1117,7 @@ }, "Id":{ "shape":"Id", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A unique ID value that is assigned to the resource when it's created. Format example: a-9ZY8X7Wv6.

        " }, "LastUpdatedAt":{ "shape":"Timestamp", @@ -1147,7 +1147,7 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        The unique ID value of the stream group resource to retrieve. Format example: sg-1AB2C3De4.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" } @@ -1159,11 +1159,11 @@ "members":{ "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].

        " }, "AssociatedApplications":{ "shape":"ArnList", - "documentation":"

        A set of applications that this stream group is associated to. You can stream any of these applications by using this stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications that this stream group is associated to. You can stream any of these applications by using this stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -1212,13 +1212,13 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        The stream group that runs this stream session.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        ", + "documentation":"

        The stream group that runs this stream session.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" }, "StreamSessionIdentifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the stream session resource. Format example: 1AB2C3De4.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream session resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamsession/sg-1AB2C3De4/ABC123def4567. Example ID: ABC123def4567.

        ", "location":"uri", "locationName":"StreamSessionIdentifier" } @@ -1237,11 +1237,11 @@ }, "ApplicationArn":{ "shape":"Arn", - "documentation":"

        The application streaming in this session.

        This value is an Amazon Resource Name (ARN) that uniquely identifies the application resource. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.

        " + "documentation":"

        The application streaming in this session.

        This value is an Amazon Resource Name (ARN) that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "Arn":{ "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) assigned to the stream session resource. When combined with the stream group ARN, this value uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamsession/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that's assigned to a stream session resource. When combined with the stream group resource ID, this value uniquely identifies the stream session across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamsession/[stream group resource ID]/[stream session resource ID].

        " }, "ConnectionTimeoutSeconds":{ "shape":"ConnectionTimeoutSeconds", @@ -1265,7 +1265,7 @@ }, "Location":{ "shape":"LocationName", - "documentation":"

        The location where Amazon GameLift Streams is hosting the stream session.

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        The location where Amazon GameLift Streams is hosting the stream session.

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "LogFileLocationUri":{ "shape":"FileLocationUri", @@ -1297,7 +1297,7 @@ }, "StreamGroupId":{ "shape":"Id", - "documentation":"

        The unique identifier for the Amazon GameLift Streams stream group that is hosting the stream session.

        " + "documentation":"

        The unique identifier for the Amazon GameLift Streams stream group that is hosting the stream session. Format example: sg-1AB2C3De4.

        " }, "UserId":{ "shape":"UserId", @@ -1456,7 +1456,7 @@ }, "Identifier":{ "shape":"Identifier", - "documentation":"

        The unique identifier of a Amazon GameLift Streams stream group to retrieve the stream session for. You can use either the stream group ID or the Amazon Resource Name (ARN).

        ", + "documentation":"

        The unique identifier of a Amazon GameLift Streams stream group to retrieve the stream session for. You can use either the stream group ID or the Amazon Resource Name (ARN).

        ", "location":"uri", "locationName":"Identifier" }, @@ -1499,7 +1499,7 @@ "members":{ "ResourceArn":{ "shape":"Arn", - "documentation":"

        The (Amazon Resource Name (ARN) that you want to retrieve tags for. To get a Amazon GameLift Streams resource ARN, call a List or Get operation for the resource.

        ", + "documentation":"

        The Amazon Resource Name (ARN) that you want to retrieve tags for. To get an Amazon GameLift Streams resource ARN, call a List or Get operation for the resource.

        ", "location":"uri", "locationName":"ResourceArn" } @@ -1520,15 +1520,15 @@ "members":{ "AlwaysOnCapacity":{ "shape":"AlwaysOnCapacity", - "documentation":"

        The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

        " + "documentation":"

        The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

        " }, "LocationName":{ "shape":"LocationName", - "documentation":"

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "OnDemandCapacity":{ "shape":"OnDemandCapacity", - "documentation":"

        The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).

        " + "documentation":"

        The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).

        " } }, "documentation":"

        Configuration settings that define a stream group's stream capacity for a location. When configuring a location for the first time, you must specify a numeric value for at least one of the two capacity types. To update the capacity for an existing stream group, call UpdateStreamGroup. To add a new location and specify its capacity, call AddStreamGroupLocations.

        " @@ -1559,7 +1559,7 @@ }, "AlwaysOnCapacity":{ "shape":"AlwaysOnCapacity", - "documentation":"

        The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

        " + "documentation":"

        The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session.

        " }, "IdleCapacity":{ "shape":"CapacityValue", @@ -1567,11 +1567,11 @@ }, "LocationName":{ "shape":"LocationName", - "documentation":"

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "OnDemandCapacity":{ "shape":"OnDemandCapacity", - "documentation":"

        The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).

        " + "documentation":"

        The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes).

        " }, "RequestedCapacity":{ "shape":"CapacityValue", @@ -1625,13 +1625,13 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        A stream group to remove the specified locations from.

        This value is a Amazon Resource Name (ARN) that uniquely identifies the stream group resource. Format example: sg-1AB2C3De4.

        ", + "documentation":"

        A stream group to remove the specified locations from.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" }, "Locations":{ "shape":"LocationsList", - "documentation":"

        A set of locations to remove this stream group.

        A set of location names. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        ", + "documentation":"

        A set of locations to remove this stream group.

        A set of location names. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        ", "location":"querystring", "locationName":"locations" } @@ -1642,7 +1642,7 @@ "members":{ "Location":{ "shape":"LocationName", - "documentation":"

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "Status":{ "shape":"ReplicationStatusType", @@ -1759,7 +1759,7 @@ }, "ApplicationIdentifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        " }, "ClientToken":{ "shape":"ClientToken", @@ -1776,13 +1776,13 @@ }, "Identifier":{ "shape":"Identifier", - "documentation":"

        The stream group to run this stream session with.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        ", + "documentation":"

        The stream group to run this stream session with.

        This value is an Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" }, "Locations":{ "shape":"LocationList", - "documentation":"

        A list of locations, in order of priority, where you want Amazon GameLift Streams to start a stream from. Amazon GameLift Streams selects the location with the next available capacity to start a single stream session in. If this value is empty, Amazon GameLift Streams attempts to start a stream session in the primary location.

        This value is A set of location names. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        A list of locations, in order of priority, where you want Amazon GameLift Streams to start a stream from. Amazon GameLift Streams selects the location with the next available capacity to start a single stream session in. If this value is empty, Amazon GameLift Streams attempts to start a stream session in the primary location.

        This value is A set of location names. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "Protocol":{ "shape":"Protocol", @@ -1794,7 +1794,7 @@ }, "SignalRequest":{ "shape":"SignalRequest", - "documentation":"

        A WebRTC ICE offer string to use when initializing a WebRTC connection. The offer is a very long JSON string. Provide the string as a text value in quotes.

        " + "documentation":"

        A WebRTC ICE offer string to use when initializing a WebRTC connection. Typically, the offer is a very long JSON string. Provide the string as a text value in quotes.

        Amazon GameLift Streams also supports setting the field to \"NO_CLIENT_CONNECTION\". This will create a session without needing any browser request or Web SDK integration. The session starts up as usual and waits for a reconnection from a browser, which is accomplished using CreateStreamSessionConnection.

        " }, "UserId":{ "shape":"UserId", @@ -1815,11 +1815,11 @@ }, "ApplicationArn":{ "shape":"Arn", - "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the application resource. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.

        " + "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "Arn":{ "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) assigned to the stream session resource. When combined with the stream group ARN, this value uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamsession/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that's assigned to a stream session resource. When combined with the stream group resource ID, this value uniquely identifies the stream session across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamsession/[stream group resource ID]/[stream session resource ID].

        " }, "ConnectionTimeoutSeconds":{ "shape":"ConnectionTimeoutSeconds", @@ -1843,7 +1843,7 @@ }, "Location":{ "shape":"LocationName", - "documentation":"

        The location where Amazon GameLift Streams is streaming your application from.

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        The location where Amazon GameLift Streams is streaming your application from.

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "LogFileLocationUri":{ "shape":"FileLocationUri", @@ -1875,7 +1875,7 @@ }, "StreamGroupId":{ "shape":"Id", - "documentation":"

        The unique identifier for the Amazon GameLift Streams stream group that is hosting the stream session.

        " + "documentation":"

        The unique identifier for the Amazon GameLift Streams stream group that is hosting the stream session. Format example: sg-1AB2C3De4.

        " }, "UserId":{ "shape":"UserId", @@ -1931,7 +1931,7 @@ "members":{ "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        " + "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -1947,7 +1947,7 @@ }, "Id":{ "shape":"Id", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        " + "documentation":"

        An ID that uniquely identifies the stream group resource. Example ID: sg-1AB2C3De4.

        " }, "LastUpdatedAt":{ "shape":"Timestamp", @@ -1995,11 +1995,11 @@ "members":{ "ApplicationArn":{ "shape":"Arn", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "Arn":{ "shape":"Arn", - "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the stream session resource. Format example: 1AB2C3De4. .

        " + "documentation":"

        An Amazon Resource Name (ARN) that uniquely identifies the stream session resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamsession/sg-1AB2C3De4/ABC123def4567.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -2015,7 +2015,7 @@ }, "Location":{ "shape":"LocationName", - "documentation":"

        The location where Amazon GameLift Streams is hosting the stream session.

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions and quotas in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        The location where Amazon GameLift Streams is hosting the stream session.

        A location's name. For example, us-east-1. For a complete list of locations that Amazon GameLift Streams supports, refer to Regions, quotas, and limitations in the Amazon GameLift Streams Developer Guide.

        " }, "Protocol":{ "shape":"Protocol", @@ -2057,7 +2057,7 @@ "members":{ "ResourceArn":{ "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the Amazon GameLift Streams resource that you want to apply tags to.

        ", + "documentation":"

        The Amazon Resource Name (ARN) of the Amazon GameLift Streams resource that you want to apply tags to.

        ", "location":"uri", "locationName":"ResourceArn" }, @@ -2093,13 +2093,13 @@ "members":{ "Identifier":{ "shape":"Identifier", - "documentation":"

        Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        The stream group that runs this stream session.

        ", + "documentation":"

        Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        The stream group that runs this stream session.

        ", "location":"uri", "locationName":"Identifier" }, "StreamSessionIdentifier":{ "shape":"Identifier", - "documentation":"

        Amazon Resource Name (ARN) that uniquely identifies the stream session resource. Format example: 1AB2C3De4.

        ", + "documentation":"

        Amazon Resource Name (ARN) or ID that uniquely identifies the stream session resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamsession/sg-1AB2C3De4/ABC123def4567. Example ID: ABC123def4567.

        ", "location":"uri", "locationName":"StreamSessionIdentifier" } @@ -2132,7 +2132,7 @@ "members":{ "ResourceArn":{ "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the Amazon GameLift Streams resource that you want to remove tags from.

        ", + "documentation":"

        The Amazon Resource Name (ARN) of the Amazon GameLift Streams resource that you want to remove tags from.

        ", "location":"uri", "locationName":"ResourceArn" }, @@ -2155,7 +2155,7 @@ "members":{ "ApplicationLogOutputUri":{ "shape":"ApplicationLogOutputUri", - "documentation":"

        An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.

        The log bucket must have permissions that give Amazon GameLift Streams access to write the log files. For more information, see Getting Started in the Amazon GameLift Streams Developer Guide.

        " + "documentation":"

        An Amazon S3 URI to a bucket where you would like Amazon GameLift Streams to save application logs. Required if you specify one or more ApplicationLogPaths.

        The log bucket must have permissions that give Amazon GameLift Streams access to write the log files. For more information, see Application log bucket permission policy in the Amazon GameLift Streams Developer Guide.

        " }, "ApplicationLogPaths":{ "shape":"FilePaths", @@ -2167,7 +2167,7 @@ }, "Identifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6. Example ID: a-9ZY8X7Wv6.

        ", "location":"uri", "locationName":"Identifier" } @@ -2191,11 +2191,11 @@ }, "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that's assigned to an application resource and uniquely identifies it across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:application/[resource ID].

        " }, "AssociatedStreamGroups":{ "shape":"ArnList", - "documentation":"

        A set of stream groups that this application is associated with. You can use any of these stream groups to stream your application.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify stream group resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4.

        " + "documentation":"

        A set of stream groups that this application is associated with. You can use any of these stream groups to stream your application.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify stream group resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -2211,7 +2211,7 @@ }, "Id":{ "shape":"Id", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the application resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6 or ID-a-9ZY8X7Wv6.

        " + "documentation":"

        A unique ID value that is assigned to the resource when it's created. Format example: a-9ZY8X7Wv6.

        " }, "LastUpdatedAt":{ "shape":"Timestamp", @@ -2245,7 +2245,7 @@ }, "Identifier":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Format example: ARN-arn:aws:gameliftstreams:us-west-2:123456789012:streamgroup/sg-1AB2C3De4 or ID-sg-1AB2C3De4.

        ", + "documentation":"

        An Amazon Resource Name (ARN) or ID that uniquely identifies the stream group resource. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:streamgroup/sg-1AB2C3De4. Example ID: sg-1AB2C3De4.

        ", "location":"uri", "locationName":"Identifier" }, @@ -2261,11 +2261,11 @@ "members":{ "Arn":{ "shape":"Identifier", - "documentation":"

        An Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].

        " + "documentation":"

        The Amazon Resource Name (ARN) that is assigned to the stream group resource and that uniquely identifies the group across all Amazon Web Services Regions. Format is arn:aws:gameliftstreams:[AWS Region]:[AWS account]:streamgroup/[resource ID].

        " }, "AssociatedApplications":{ "shape":"ArnList", - "documentation":"

        A set of applications that this stream group is associated with. You can stream any of these applications with the stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Format example: arn:aws:gameliftstreams:us-west-2:123456789012:application/a-9ZY8X7Wv6.

        " + "documentation":"

        A set of applications that this stream group is associated with. You can stream any of these applications with the stream group.

        This value is a set of Amazon Resource Names (ARNs) that uniquely identify application resources. Example ARN: arn:aws:gameliftstreams:us-west-2:111122223333:application/a-9ZY8X7Wv6.

        " }, "CreatedAt":{ "shape":"Timestamp", @@ -2329,5 +2329,5 @@ }, "WebSdkProtocolUrl":{"type":"string"} }, - "documentation":"

        Amazon GameLift Streams

        Amazon GameLift Streams provides a global cloud solution for content streaming experiences. Use Amazon GameLift Streams tools to upload and configure content for streaming, deploy and scale computing resources to host streams, and manage stream session placement to meet customer demand.

        This Reference Guide describes the Amazon GameLift Streams service API. You can use the API through the Amazon Web Services SDK, the Command Line Interface (AWS CLI), or by making direct REST calls through HTTPS.

        See the Amazon GameLift Streams Developer Guide for more information on how Amazon GameLift Streams works and how to work with it.

        " + "documentation":"

        Amazon GameLift Streams

        Amazon GameLift Streams provides a global cloud solution for content streaming experiences. Use Amazon GameLift Streams tools to upload and configure content for streaming, deploy and scale computing resources to host streams, and manage stream session placement to meet customer demand.

        This Reference Guide describes the Amazon GameLift Streams service API. You can use the API through the Amazon Web Services SDK, the Command Line Interface (CLI), or by making direct REST calls through HTTPS.

        See the Amazon GameLift Streams Developer Guide for more information on how Amazon GameLift Streams works and how to work with it.

        " } diff --git a/services/geomaps/pom.xml b/services/geomaps/pom.xml index 412c6114f353..b9221d2fd66e 100644 --- a/services/geomaps/pom.xml +++ b/services/geomaps/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT geomaps AWS Java SDK :: Services :: Geo Maps diff --git a/services/geomaps/src/main/resources/codegen-resources/customization.config b/services/geomaps/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/geomaps/src/main/resources/codegen-resources/customization.config +++ b/services/geomaps/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/geoplaces/pom.xml b/services/geoplaces/pom.xml index 84084809490e..b0af48a3989f 100644 --- a/services/geoplaces/pom.xml +++ b/services/geoplaces/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT geoplaces AWS Java SDK :: Services :: Geo Places diff --git a/services/geoplaces/src/main/resources/codegen-resources/customization.config b/services/geoplaces/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/geoplaces/src/main/resources/codegen-resources/customization.config +++ b/services/geoplaces/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/geoplaces/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/geoplaces/src/main/resources/codegen-resources/endpoint-rule-set.json index d4ff8f54a267..87654796fef1 100644 --- a/services/geoplaces/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/geoplaces/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -328,7 +328,7 @@ } ], "endpoint": { - "url": "https://places.geo.{Region}.us-gov.{PartitionResult#dnsSuffix}/v2", + "url": "https://places.geo.{Region}.{PartitionResult#dnsSuffix}/v2", "properties": {}, "headers": {} }, @@ -371,7 +371,7 @@ } ], "endpoint": { - "url": "https://places.geo-fips.{Region}.us-gov.{PartitionResult#dualStackDnsSuffix}/v2", + "url": "https://places.geo-fips.{Region}.{PartitionResult#dualStackDnsSuffix}/v2", "properties": {}, "headers": {} }, @@ -414,7 +414,7 @@ } ], "endpoint": { - "url": "https://places.geo-fips.{Region}.us-gov.{PartitionResult#dnsSuffix}/v2", + "url": "https://places.geo-fips.{Region}.{PartitionResult#dnsSuffix}/v2", "properties": {}, "headers": {} }, @@ -457,7 +457,7 @@ } ], "endpoint": { - "url": "https://places.geo.{Region}.us-gov.{PartitionResult#dualStackDnsSuffix}/v2", + "url": "https://places.geo.{Region}.{PartitionResult#dualStackDnsSuffix}/v2", "properties": {}, "headers": {} }, diff --git a/services/geoplaces/src/main/resources/codegen-resources/endpoint-tests.json b/services/geoplaces/src/main/resources/codegen-resources/endpoint-tests.json index 7203f1386175..5a692c699b01 100644 --- a/services/geoplaces/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/geoplaces/src/main/resources/codegen-resources/endpoint-tests.json @@ -141,7 +141,7 @@ "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://places.geo-fips.us-gov-west-1.us-gov.api.aws/v2" + "url": "https://places.geo-fips.us-gov-west-1.api.aws/v2" } }, "params": { @@ -154,7 +154,7 @@ "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://places.geo-fips.us-gov-west-1.us-gov.amazonaws.com/v2" + "url": "https://places.geo-fips.us-gov-west-1.amazonaws.com/v2" } }, "params": { @@ -167,7 +167,7 @@ "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://places.geo.us-gov-west-1.us-gov.api.aws/v2" + "url": "https://places.geo.us-gov-west-1.api.aws/v2" } }, "params": { @@ -180,7 +180,7 @@ "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://places.geo.us-gov-west-1.us-gov.amazonaws.com/v2" + "url": "https://places.geo.us-gov-west-1.amazonaws.com/v2" } }, "params": { diff --git a/services/geoplaces/src/main/resources/codegen-resources/service-2.json b/services/geoplaces/src/main/resources/codegen-resources/service-2.json index cfd5e79babc9..b71dce1abc44 100644 --- a/services/geoplaces/src/main/resources/codegen-resources/service-2.json +++ b/services/geoplaces/src/main/resources/codegen-resources/service-2.json @@ -28,7 +28,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        The autocomplete operation speeds up and increases the accuracy of entering addresses by providing a list of address candidates matching a partially entered address. Results are sorted from most to least matching. Filtering and biasing can be used to increase the relevance of the results if additional search context is known

        " + "documentation":"

        Autocomplete completes potential places and addresses as the user types, based on the partial input. The API enhances the efficiency and accuracy of address by completing query based on a few entered keystrokes. It helps you by completing partial queries with valid address completion. Also, the API supports the filtering of results based on geographic location, country, or specific place types, and can be tailored using optional parameters like language and political views.

        " }, "Geocode":{ "name":"Geocode", @@ -45,7 +45,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        The Geocode action allows you to obtain coordinates, addresses, and other information about places.

        " + "documentation":"

        Geocode converts a textual address or place into geographic coordinates. You can obtain geographic coordinates, address component, and other related information. It supports flexible queries, including free-form text or structured queries with components like street names, postal codes, and regions. The Geocode API can also provide additional features such as time zone information and the inclusion of political views.

        " }, "GetPlace":{ "name":"GetPlace", @@ -62,7 +62,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        Finds a place by its unique ID. A PlaceId is returned by other place operations.

        " + "documentation":"

        GetPlace finds a place by its unique ID. A PlaceId is returned by other place operations.

        " }, "ReverseGeocode":{ "name":"ReverseGeocode", @@ -79,7 +79,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        The ReverseGeocode operation allows you to retrieve addresses and place information from coordinates.

        " + "documentation":"

        ReverseGeocode converts geographic coordinates into a human-readable address or place. You can obtain address component, and other related information such as place type, category, street information. The Reverse Geocode API supports filtering to on place type so that you can refine result based on your need. Also, The Reverse Geocode API can also provide additional features such as time zone information and the inclusion of political views.

        " }, "SearchNearby":{ "name":"SearchNearby", @@ -96,7 +96,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        Search nearby a specified location.

        " + "documentation":"

        SearchNearby queries for points of interest within a radius from a central coordinates, returning place results with optional filters such as categories, business chains, food types and more. The API returns details such as a place name, address, phone, category, food type, contact, opening hours. Also, the API can return phonemes, time zones and more based on requested parameters.

        " }, "SearchText":{ "name":"SearchText", @@ -113,7 +113,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        Use the SearchText operation to search for geocode and place information. You can then complete a follow-up query suggested from the Suggest API via a query id.

        " + "documentation":"

        SearchText searches for geocode and place information. You can then complete a follow-up query suggested from the Suggest API via a query id.

        " }, "Suggest":{ "name":"Suggest", @@ -130,7 +130,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        The Suggest operation finds addresses or place candidates based on incomplete or misspelled queries. You then select the best query to submit based on the returned results.

        " + "documentation":"

        Suggest provides intelligent predictions or recommendations based on the user's input or context, such as relevant places, points of interest, query terms or search category. It is designed to help users find places or point of interests candidates or identify a follow on query based on incomplete or misspelled queries. It returns a list of possible matches or refinements that can be used to formulate a more accurate query. Users can select the most appropriate suggestion and use it for further searching. The API provides options for filtering results by location and other attributes, and allows for additional features like phonemes and timezones. The response includes refined query terms and detailed place information.

        " } }, "shapes":{ @@ -170,7 +170,7 @@ "type":"structure", "members":{ "Restricted":{ - "shape":"Boolean", + "shape":"SensitiveBoolean", "documentation":"

        The restriction.

        " }, "Categories":{ @@ -207,7 +207,7 @@ }, "Locality":{ "shape":"AddressLocalityString", - "documentation":"

        The locality or city of the address.

        Example: Vancouver.

        " + "documentation":"

        The city or locality of the address.

        Example: Vancouver.

        " }, "District":{ "shape":"AddressDistrictString", @@ -219,7 +219,7 @@ }, "PostalCode":{ "shape":"AddressPostalCodeString", - "documentation":"

        An alphanumeric string included in a postal address to facilitate mail sorting, such as post code, postcode, or ZIP code, for which the result should posses.

        " + "documentation":"

        An alphanumeric string included in a postal address to facilitate mail sorting, such as post code, postcode, or ZIP code, for which the result should possess.

        " }, "Block":{ "shape":"AddressBlockString", @@ -230,7 +230,7 @@ "documentation":"

        Name of sub-block.

        Example: Sunny Mansion 203 sub-block: 4

        " }, "Intersection":{ - "shape":"IntersectionList", + "shape":"IntersectionStreetList", "documentation":"

        Name of the streets in the intersection.

        Example: [\"Friedrichstraße\",\"Unter den Linden\"]

        " }, "Street":{ @@ -248,6 +248,10 @@ "Building":{ "shape":"AddressBuildingString", "documentation":"

        The name of the building at the address.

        " + }, + "SecondaryAddressComponents":{ + "shape":"SecondaryAddressComponentList", + "documentation":"

        Components that correspond to secondary identifiers on an Address. Secondary address components include information such as Suite or Unit Number, Building, or Floor.

        " } }, "documentation":"

        The place address.

        " @@ -255,17 +259,20 @@ "AddressAddressNumberString":{ "type":"string", "max":10, - "min":0 + "min":0, + "sensitive":true }, "AddressBlockString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "AddressBuildingString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "AddressComponentMatchScores":{ "type":"structure", @@ -296,7 +303,7 @@ }, "PostalCode":{ "shape":"MatchScore", - "documentation":"

        An alphanumeric string included in a postal address to facilitate mail sorting, such as post code, postcode, or ZIP code, for which the result should posses.

        " + "documentation":"

        An alphanumeric string included in a postal address to facilitate mail sorting, such as post code, postcode, or ZIP code, for which the result should possess.

        " }, "Block":{ "shape":"MatchScore", @@ -317,6 +324,10 @@ "Building":{ "shape":"MatchScore", "documentation":"

        The name of the building at the address.

        " + }, + "SecondaryAddressComponents":{ + "shape":"SecondaryAddressComponentMatchScoreList", + "documentation":"

        Match scores for the secondary address components in the result.

        " } }, "documentation":"

        Indicates how well the entire input matches the returned. It is equal to 1 if all input tokens are recognized and matched.

        " @@ -372,37 +383,44 @@ "AddressDistrictString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "AddressLabelString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "AddressLocalityString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "AddressPostalCodeString":{ "type":"string", "max":50, - "min":0 + "min":0, + "sensitive":true }, "AddressStreetString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "AddressSubBlockString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "AddressSubDistrictString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "ApiKey":{ "type":"string", @@ -457,11 +475,11 @@ }, "Block":{ "shape":"HighlightList", - "documentation":"

        Name of the block. Example: Sunny Mansion 203 block: 2 Chome

        " + "documentation":"

        Name of the block.

        Example: Sunny Mansion 203 block: 2 Chome

        " }, "SubBlock":{ "shape":"HighlightList", - "documentation":"

        Name of sub-block. Example Sunny Mansion 203 sub-block: 4

        " + "documentation":"

        Name of sub-block.

        Example: Sunny Mansion 203 sub-block: 4

        " }, "Intersection":{ "shape":"IntersectionHighlightsList", @@ -469,7 +487,7 @@ }, "PostalCode":{ "shape":"HighlightList", - "documentation":"

        An alphanumeric string included in a postal address to facilitate mail sorting, such as post code, postcode, or ZIP code for which the result should posses.

        " + "documentation":"

        An alphanumeric string included in a postal address to facilitate mail sorting, such as post code, postcode, or ZIP code for which the result should possess.

        " }, "AddressNumber":{ "shape":"HighlightList", @@ -499,7 +517,7 @@ "documentation":"

        The included place types.

        " } }, - "documentation":"

        Autocomplete structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result.

        " + "documentation":"

        Autocomplete structure which contains a set of inclusion/exclusion properties that results must possess in order to be returned as a result.

        " }, "AutocompleteFilterPlaceType":{ "type":"string", @@ -538,7 +556,7 @@ "members":{ "QueryText":{ "shape":"AutocompleteRequestQueryTextString", - "documentation":"

        The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

        " + "documentation":"

        The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

        The fields QueryText, and QueryID are mutually exclusive.

        " }, "MaxResults":{ "shape":"AutocompleteRequestMaxResultsInteger", @@ -550,11 +568,11 @@ }, "Filter":{ "shape":"AutocompleteFilter", - "documentation":"

        A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result.

        " + "documentation":"

        A structure which contains a set of inclusion/exclusion properties that results must possess in order to be returned as a result.

        " }, "PostalCodeMode":{ "shape":"PostalCodeMode", - "documentation":"

        The PostalCodeMode affects how postal code results are returned. If a postal code spans multiple localities and this value is empty, partial district or locality information may be returned under a single postal code result entry. If it's populated with the value cityLookup, all cities in that postal code are returned.

        " + "documentation":"

        The PostalCodeMode affects how postal code results are returned. If a postal code spans multiple localities and this value is empty, partial district or locality information may be returned under a single postal code result entry. If it's populated with the value EnumerateSpannedLocalities, all cities in that postal code are returned.

        " }, "AdditionalFeatures":{ "shape":"AutocompleteAdditionalFeatureList", @@ -566,7 +584,7 @@ }, "PoliticalView":{ "shape":"CountryCode", - "documentation":"

        The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country.

        " + "documentation":"

        The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country.

        The following political views are currently supported:

        • ARG: Argentina's view on the Southern Patagonian Ice Field and Tierra Del Fuego, including the Falkland Islands, South Georgia, and South Sandwich Islands

        • EGY: Egypt's view on Bir Tawil

        • IND: India's view on Gilgit-Baltistan

        • KEN: Kenya's view on the Ilemi Triangle

        • MAR: Morocco's view on Western Sahara

        • RUS: Russia's view on Crimea

        • SDN: Sudan's view on the Halaib Triangle

        • SRB: Serbia's view on Kosovo, Vukovar, and Sarengrad Islands

        • SUR: Suriname's view on the Courantyne Headwaters and Lawa Headwaters

        • SYR: Syria's view on the Golan Heights

        • TUR: Turkey's view on Cyprus and Northern Cyprus

        • TZA: Tanzania's view on Lake Malawi

        • URY: Uruguay's view on Rincon de Artigas

        • VNM: Vietnam's view on the Paracel Islands and Spratly Islands

        " }, "IntendedUse":{ "shape":"AutocompleteIntendedUse", @@ -598,7 +616,7 @@ "members":{ "PricingBucket":{ "shape":"String", - "documentation":"

        The pricing bucket for which the query is charged at.

        For more inforamtion on pricing, please visit Amazon Location Service Pricing.

        ", + "documentation":"

        The pricing bucket for which the query is charged at.

        For more information on pricing, please visit Amazon Location Service Pricing.

        ", "location":"header", "locationName":"x-amz-geo-pricing-bucket" }, @@ -659,17 +677,15 @@ }, "AutocompleteResultItemPlaceIdString":{ "type":"string", - "max":200, - "min":0 + "max":500, + "min":0, + "sensitive":true }, "AutocompleteResultItemTitleString":{ "type":"string", "max":200, - "min":0 - }, - "Boolean":{ - "type":"boolean", - "box":true + "min":0, + "sensitive":true }, "BoundingBox":{ "type":"list", @@ -695,7 +711,8 @@ "BusinessChainIdString":{ "type":"string", "max":100, - "min":1 + "min":1, + "sensitive":true }, "BusinessChainList":{ "type":"list", @@ -706,7 +723,8 @@ "BusinessChainNameString":{ "type":"string", "max":100, - "min":1 + "min":1, + "sensitive":true }, "Category":{ "type":"structure", @@ -728,7 +746,7 @@ "documentation":"

        Localized name of the category type.

        " }, "Primary":{ - "shape":"Boolean", + "shape":"SensitiveBoolean", "documentation":"

        Boolean which indicates if this category is the primary offered by the place.

        " } }, @@ -737,7 +755,8 @@ "CategoryIdString":{ "type":"string", "max":100, - "min":1 + "min":1, + "sensitive":true }, "CategoryList":{ "type":"list", @@ -748,26 +767,28 @@ "CategoryLocalizedNameString":{ "type":"string", "max":100, - "min":1 + "min":1, + "sensitive":true }, "CategoryNameString":{ "type":"string", "max":100, - "min":1 + "min":1, + "sensitive":true }, "ComponentMatchScores":{ "type":"structure", "members":{ "Title":{ "shape":"MatchScore", - "documentation":"

        Indicates the starting and ending index of the title in the text query that match the found title.

        " + "documentation":"

        Indicates the match score of the title in the text query that match the found title.

        " }, "Address":{ "shape":"AddressComponentMatchScores", "documentation":"

        The place's address.

        " } }, - "documentation":"

        Indicates how well the input matches the returned element. It is equal to 1 if all input tokens are recognized and matched to the title in the result.

        " + "documentation":"

        Indicates how well the returned title and address components matches the input TextQuery. For each component a score is provied with 1 indicating all tokens were matched and 0 indicating no tokens were matched.

        " }, "ContactDetails":{ "type":"structure", @@ -790,7 +811,8 @@ "ContactDetailsLabelString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "ContactDetailsList":{ "type":"list", @@ -801,7 +823,8 @@ "ContactDetailsValueString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "Contacts":{ "type":"structure", @@ -847,19 +870,22 @@ "type":"string", "max":3, "min":2, - "pattern":"([A-Z]{2}|[A-Z]{3})" + "pattern":"([A-Z]{2}|[A-Z]{3})", + "sensitive":true }, "CountryCode2":{ "type":"string", "max":2, "min":2, - "pattern":"[A-Z]{2}" + "pattern":"[A-Z]{2}", + "sensitive":true }, "CountryCode3":{ "type":"string", "max":3, "min":3, - "pattern":"[A-Z]{3}" + "pattern":"[A-Z]{3}", + "sensitive":true }, "CountryCodeList":{ "type":"list", @@ -884,12 +910,14 @@ "CountryNameString":{ "type":"string", "max":100, - "min":0 + "min":0, + "sensitive":true }, "DistanceMeters":{ "type":"long", "max":4294967295, - "min":0 + "min":0, + "sensitive":true }, "Double":{ "type":"double", @@ -904,7 +932,8 @@ "FilterBusinessChainListMemberString":{ "type":"string", "max":100, - "min":1 + "min":1, + "sensitive":true }, "FilterCategoryList":{ "type":"list", @@ -915,7 +944,8 @@ "FilterCategoryListMemberString":{ "type":"string", "max":100, - "min":1 + "min":1, + "sensitive":true }, "FilterCircle":{ "type":"structure", @@ -940,7 +970,8 @@ "FilterCircleRadiusLong":{ "type":"long", "max":21000000, - "min":1 + "min":1, + "sensitive":true }, "FilterFoodTypeList":{ "type":"list", @@ -951,7 +982,8 @@ "FilterFoodTypeListMemberString":{ "type":"string", "max":100, - "min":1 + "min":1, + "sensitive":true }, "FoodType":{ "type":"structure", @@ -966,7 +998,7 @@ "documentation":"

        The Food Type Id.

        " }, "Primary":{ - "shape":"Boolean", + "shape":"SensitiveBoolean", "documentation":"

        Boolean which indicates if this food type is the primary offered by the place. For example, if a location serves fast food, but also dessert, he primary would likely be fast food.

        " } }, @@ -975,7 +1007,8 @@ "FoodTypeIdString":{ "type":"string", "max":100, - "min":1 + "min":1, + "sensitive":true }, "FoodTypeList":{ "type":"list", @@ -986,13 +1019,16 @@ "FoodTypeLocalizedNameString":{ "type":"string", "max":100, - "min":1 + "min":1, + "sensitive":true }, "GeocodeAdditionalFeature":{ "type":"string", "enum":[ "TimeZone", - "Access" + "Access", + "SecondaryAddresses", + "Intersections" ] }, "GeocodeAdditionalFeatureList":{ @@ -1013,7 +1049,7 @@ "documentation":"

        The included place types.

        " } }, - "documentation":"

        Geocode structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result.

        " + "documentation":"

        Geocode structure which contains a set of inclusion/exclusion properties that results must possess in order to be returned as a result.

        " }, "GeocodeFilterPlaceType":{ "type":"string", @@ -1024,7 +1060,8 @@ "Street", "PointAddress", "InterpolatedAddress" - ] + ], + "sensitive":true }, "GeocodeFilterPlaceTypeList":{ "type":"list", @@ -1039,6 +1076,78 @@ "Storage" ] }, + "GeocodeParsedQuery":{ + "type":"structure", + "members":{ + "Title":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        The localized display name of this result item based on request parameter language.

        " + }, + "Address":{ + "shape":"GeocodeParsedQueryAddressComponents", + "documentation":"

        The place address.

        " + } + }, + "documentation":"

        Parsed components in the provided QueryText.

        " + }, + "GeocodeParsedQueryAddressComponents":{ + "type":"structure", + "members":{ + "Country":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        The alpha-2 or alpha-3 character code for the country that the results will be present in.

        " + }, + "Region":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        The region or state results should be present in.

        Example: North Rhine-Westphalia.

        " + }, + "SubRegion":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        The sub-region or county for which results should be present in.

        " + }, + "Locality":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        The city or locality of the address.

        Example: Vancouver.

        " + }, + "District":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        The district or division of a city the results should be present in.

        " + }, + "SubDistrict":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        A subdivision of a district.

        Example: Minden-Lübbecke.

        " + }, + "PostalCode":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        An alphanumeric string included in a postal address to facilitate mail sorting, such as post code, postcode, or ZIP code, for which the result should possess.

        " + }, + "Block":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        Name of the block.

        Example: Sunny Mansion 203 block: 2 Chome

        " + }, + "SubBlock":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        Name of sub-block.

        Example: Sunny Mansion 203 sub-block: 4

        " + }, + "Street":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        The name of the street results should be present in.

        " + }, + "AddressNumber":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        The number that identifies an address within a street.

        " + }, + "Building":{ + "shape":"ParsedQueryComponentList", + "documentation":"

        The name of the building at the address.

        " + }, + "SecondaryAddressComponents":{ + "shape":"ParsedQuerySecondaryAddressComponentList", + "documentation":"

        Parsed secondary address components from the provided query text.

        " + } + }, + "documentation":"

        Parsed address components in the provided QueryText.

        " + }, "GeocodeQueryComponents":{ "type":"structure", "members":{ @@ -1056,7 +1165,7 @@ }, "Locality":{ "shape":"GeocodeQueryComponentsLocalityString", - "documentation":"

        City or locality results should be present in.

        Example: Vancouver.

        " + "documentation":"

        The city or locality results should be present in.

        Example: Vancouver.

        " }, "District":{ "shape":"GeocodeQueryComponentsDistrictString", @@ -1072,7 +1181,7 @@ }, "PostalCode":{ "shape":"GeocodeQueryComponentsPostalCodeString", - "documentation":"

        An alphanumeric string included in a postal address to facilitate mail sorting, such as post code, postcode, or ZIP code for which the result should posses.

        " + "documentation":"

        An alphanumeric string included in a postal address to facilitate mail sorting, such as post code, postcode, or ZIP code for which the result should possess.

        " } }, "documentation":"

        A structured free text query allows you to search for places by the name or text representation of specific properties of the place.

        " @@ -1138,7 +1247,7 @@ "members":{ "QueryText":{ "shape":"GeocodeRequestQueryTextString", - "documentation":"

        The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

        " + "documentation":"

        The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

        The fields QueryText, and QueryID are mutually exclusive.

        " }, "QueryComponents":{"shape":"GeocodeQueryComponents"}, "MaxResults":{ @@ -1151,7 +1260,7 @@ }, "Filter":{ "shape":"GeocodeFilter", - "documentation":"

        A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result.

        " + "documentation":"

        A structure which contains a set of inclusion/exclusion properties that results must possess in order to be returned as a result.

        " }, "AdditionalFeatures":{ "shape":"GeocodeAdditionalFeatureList", @@ -1167,7 +1276,7 @@ }, "IntendedUse":{ "shape":"GeocodeIntendedUse", - "documentation":"

        Indicates if the results will be stored. Defaults to SingleUse, if left empty.

        " + "documentation":"

        Indicates if the results will be stored. Defaults to SingleUse, if left empty.

        Storing the response of an Geocode query is required to comply with service terms, but charged at a higher cost per request. Please review the user agreement and service pricing structure to determine the correct setting for your use case.

        " }, "Key":{ "shape":"ApiKey", @@ -1195,7 +1304,7 @@ "members":{ "PricingBucket":{ "shape":"String", - "documentation":"

        The pricing bucket for which the query is charged at.

        For more inforamtion on pricing, please visit Amazon Location Service Pricing.

        ", + "documentation":"

        The pricing bucket for which the query is charged at.

        For more information on pricing, please visit Amazon Location Service Pricing.

        ", "location":"header", "locationName":"x-amz-geo-pricing-bucket" }, @@ -1215,7 +1324,7 @@ "members":{ "PlaceId":{ "shape":"GeocodeResultItemPlaceIdString", - "documentation":"

        The PlaceId of the place you wish to receive the information for.

        " + "documentation":"

        The PlaceId of the place result.

        " }, "PlaceType":{ "shape":"PlaceType", @@ -1230,7 +1339,7 @@ "documentation":"

        The place's address.

        " }, "AddressNumberCorrected":{ - "shape":"Boolean", + "shape":"SensitiveBoolean", "documentation":"

        Boolean indicating if the address provided has been corrected.

        " }, "PostalCodeDetails":{ @@ -1259,7 +1368,7 @@ }, "AccessPoints":{ "shape":"AccessPointList", - "documentation":"

        Position of the access point represent by longitude and latitude.

        " + "documentation":"

        Position of the access point represented by longitude and latitude.

        " }, "TimeZone":{ "shape":"TimeZone", @@ -1272,6 +1381,22 @@ "MatchScores":{ "shape":"MatchScoreDetails", "documentation":"

        Indicates how well the entire input matches the returned. It is equal to 1 if all input tokens are recognized and matched.

        " + }, + "ParsedQuery":{ + "shape":"GeocodeParsedQuery", + "documentation":"

        Free-form text query.

        " + }, + "Intersections":{ + "shape":"IntersectionList", + "documentation":"

        All Intersections that are near the provided address.

        " + }, + "MainAddress":{ + "shape":"RelatedPlace", + "documentation":"

        The main address corresponding to a place of type Secondary Address.

        " + }, + "SecondaryAddresses":{ + "shape":"RelatedPlaceList", + "documentation":"

        All secondary addresses that are associated with a main address. A secondary address is one that includes secondary designators, such as a Suite or Unit Number, Building, or Floor information.

        " } }, "documentation":"

        The Geocoded result.

        " @@ -1284,13 +1409,15 @@ }, "GeocodeResultItemPlaceIdString":{ "type":"string", - "max":200, - "min":0 + "max":500, + "min":0, + "sensitive":true }, "GeocodeResultItemTitleString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "GetPlaceAdditionalFeature":{ "type":"string", @@ -1298,7 +1425,8 @@ "TimeZone", "Phonemes", "Access", - "Contact" + "Contact", + "SecondaryAddresses" ] }, "GetPlaceAdditionalFeatureList":{ @@ -1344,7 +1472,7 @@ }, "IntendedUse":{ "shape":"GetPlaceIntendedUse", - "documentation":"

        Indicates if the results will be stored. Defaults to SingleUse, if left empty.

        ", + "documentation":"

        Indicates if the results will be stored. Defaults to SingleUse, if left empty.

        Storing the response of an GetPlace query is required to comply with service terms, but charged at a higher cost per request. Please review the user agreement and service pricing structure to determine the correct setting for your use case.

        ", "location":"querystring", "locationName":"intended-use" }, @@ -1358,7 +1486,7 @@ }, "GetPlaceRequestPlaceIdString":{ "type":"string", - "max":200, + "max":500, "min":0, "sensitive":true }, @@ -1381,11 +1509,11 @@ }, "Title":{ "shape":"GetPlaceResponseTitleString", - "documentation":"

        The localized display name of this result item based on request parameter language.

        " + "documentation":"

        The localized display name of this result item based on request parameter language.

        " }, "PricingBucket":{ "shape":"String", - "documentation":"

        The pricing bucket for which the query is charged at.

        For more inforamtion on pricing, please visit Amazon Location Service Pricing.

        ", + "documentation":"

        The pricing bucket for which the query is charged at.

        For more information on pricing, please visit Amazon Location Service Pricing.

        ", "location":"header", "locationName":"x-amz-geo-pricing-bucket" }, @@ -1394,7 +1522,7 @@ "documentation":"

        The place's address.

        " }, "AddressNumberCorrected":{ - "shape":"Boolean", + "shape":"SensitiveBoolean", "documentation":"

        Boolean indicating if the address provided has been corrected.

        " }, "PostalCodeDetails":{ @@ -1448,18 +1576,28 @@ "Phonemes":{ "shape":"PhonemeDetails", "documentation":"

        How the various components of the result's address are pronounced in various languages.

        " + }, + "MainAddress":{ + "shape":"RelatedPlace", + "documentation":"

        The main address corresponding to a place of type Secondary Address.

        " + }, + "SecondaryAddresses":{ + "shape":"RelatedPlaceList", + "documentation":"

        All secondary addresses that are associated with a main address. A secondary address is one that includes secondary designators, such as a Suite or Unit Number, Building, or Floor information.

        " } } }, "GetPlaceResponsePlaceIdString":{ "type":"string", - "max":200, - "min":0 + "max":500, + "min":0, + "sensitive":true }, "GetPlaceResponseTitleString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "Highlight":{ "type":"structure", @@ -1477,7 +1615,7 @@ "documentation":"

        The highlight's value.

        " } }, - "documentation":"

        Describes how parts of the result response match the input query.

        " + "documentation":"

        Indicates the starting and ending index of the text query that match the found title.

        " }, "HighlightEndIndexInteger":{ "type":"integer", @@ -1498,7 +1636,8 @@ "HighlightValueString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "InternalServerException":{ "type":"structure", @@ -1515,6 +1654,47 @@ "fault":true, "retryable":{"throttling":false} }, + "Intersection":{ + "type":"structure", + "required":[ + "PlaceId", + "Title" + ], + "members":{ + "PlaceId":{ + "shape":"IntersectionPlaceIdString", + "documentation":"

        The PlaceId of the place result.

        " + }, + "Title":{ + "shape":"IntersectionTitleString", + "documentation":"

        The localized display name of this result item based on request parameter language.

        " + }, + "Address":{"shape":"Address"}, + "Position":{ + "shape":"Position", + "documentation":"

        The position, in longitude and latitude.

        " + }, + "Distance":{ + "shape":"DistanceMeters", + "documentation":"

        The distance in meters from the QueryPosition.

        ", + "box":true + }, + "RouteDistance":{ + "shape":"DistanceMeters", + "documentation":"

        The distance from the routing position of the nearby address to the street result.

        ", + "box":true + }, + "MapView":{ + "shape":"BoundingBox", + "documentation":"

        The bounding box enclosing the geometric shape (area or line) that an individual result covers.

        The bounding box formed is defined as a set of four coordinates: [{westward lng}, {southern lat}, {eastward lng}, {northern lat}]

        " + }, + "AccessPoints":{ + "shape":"AccessPointList", + "documentation":"

        Position of the access point represented by longitude and latitude.

        " + } + }, + "documentation":"

        All Intersections that are near the provided address.

        " + }, "IntersectionHighlightsList":{ "type":"list", "member":{"shape":"HighlightList"}, @@ -1523,15 +1703,32 @@ }, "IntersectionList":{ "type":"list", - "member":{"shape":"IntersectionStreet"}, - "max":100, + "member":{"shape":"Intersection"}, "min":1 }, + "IntersectionPlaceIdString":{ + "type":"string", + "max":500, + "min":0, + "sensitive":true + }, "IntersectionStreet":{ "type":"string", "max":200, "min":0 }, + "IntersectionStreetList":{ + "type":"list", + "member":{"shape":"IntersectionStreet"}, + "max":100, + "min":1 + }, + "IntersectionTitleString":{ + "type":"string", + "max":200, + "min":0, + "sensitive":true + }, "LanguageTag":{ "type":"string", "max":35, @@ -1564,7 +1761,7 @@ "documentation":"

        List of opening hours in the format they are displayed in. This can vary by result and in most cases represents how the result uniquely formats their opening hours.

        " }, "OpenNow":{ - "shape":"Boolean", + "shape":"SensitiveBoolean", "documentation":"

        Boolean which indicates if the result/place is currently open.

        " }, "Components":{ @@ -1605,22 +1802,26 @@ "OpeningHoursComponentsOpenDurationString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "OpeningHoursComponentsOpenTimeString":{ "type":"string", "max":21, - "min":0 + "min":0, + "sensitive":true }, "OpeningHoursComponentsRecurrenceString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "OpeningHoursDisplay":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "OpeningHoursDisplayList":{ "type":"list", @@ -1634,6 +1835,123 @@ "max":100, "min":1 }, + "ParsedQueryComponent":{ + "type":"structure", + "members":{ + "StartIndex":{ + "shape":"ParsedQueryComponentStartIndexInteger", + "documentation":"

        Start index of the parsed query component.

        " + }, + "EndIndex":{ + "shape":"ParsedQueryComponentEndIndexInteger", + "documentation":"

        End index of the parsed query component.

        " + }, + "Value":{ + "shape":"ParsedQueryComponentValueString", + "documentation":"

        Value of the parsed query component.

        " + }, + "QueryComponent":{ + "shape":"ParsedQueryComponentQueryComponentString", + "documentation":"

        The address component that the parsed query component corresponds to.

        " + } + }, + "documentation":"

        Parsed components in the provided QueryText.

        " + }, + "ParsedQueryComponentEndIndexInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "ParsedQueryComponentList":{ + "type":"list", + "member":{"shape":"ParsedQueryComponent"}, + "max":200, + "min":0 + }, + "ParsedQueryComponentQueryComponentString":{ + "type":"string", + "max":11, + "min":0, + "sensitive":true + }, + "ParsedQueryComponentStartIndexInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "ParsedQueryComponentValueString":{ + "type":"string", + "max":200, + "min":0, + "sensitive":true + }, + "ParsedQuerySecondaryAddressComponent":{ + "type":"structure", + "required":[ + "StartIndex", + "EndIndex", + "Value", + "Number", + "Designator" + ], + "members":{ + "StartIndex":{ + "shape":"ParsedQuerySecondaryAddressComponentStartIndexInteger", + "documentation":"

        Start index of the parsed secondary address component in the query text.

        " + }, + "EndIndex":{ + "shape":"ParsedQuerySecondaryAddressComponentEndIndexInteger", + "documentation":"

        End index of the parsed secondary address component in the query text.

        " + }, + "Value":{ + "shape":"ParsedQuerySecondaryAddressComponentValueString", + "documentation":"

        Value of the parsed secondary address component.

        " + }, + "Number":{ + "shape":"ParsedQuerySecondaryAddressComponentNumberString", + "documentation":"

        Secondary address number provided in the query.

        " + }, + "Designator":{ + "shape":"ParsedQuerySecondaryAddressComponentDesignatorString", + "documentation":"

        Secondary address designator provided in the query.

        " + } + }, + "documentation":"

        Information about a secondary address component parsed from the query text.

        " + }, + "ParsedQuerySecondaryAddressComponentDesignatorString":{ + "type":"string", + "max":4, + "min":0, + "sensitive":true + }, + "ParsedQuerySecondaryAddressComponentEndIndexInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "ParsedQuerySecondaryAddressComponentList":{ + "type":"list", + "member":{"shape":"ParsedQuerySecondaryAddressComponent"}, + "max":200, + "min":0 + }, + "ParsedQuerySecondaryAddressComponentNumberString":{ + "type":"string", + "max":10, + "min":0, + "sensitive":true + }, + "ParsedQuerySecondaryAddressComponentStartIndexInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "ParsedQuerySecondaryAddressComponentValueString":{ + "type":"string", + "max":200, + "min":0, + "sensitive":true + }, "PhonemeDetails":{ "type":"structure", "members":{ @@ -1660,7 +1978,7 @@ "documentation":"

        A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry.

        " }, "Preferred":{ - "shape":"Boolean", + "shape":"SensitiveBoolean", "documentation":"

        Boolean which indicates if it the preferred pronunciation.

        " } }, @@ -1675,7 +1993,8 @@ "PhonemeTranscriptionValueString":{ "type":"string", "max":50, - "min":0 + "min":0, + "sensitive":true }, "PlaceType":{ "type":"string", @@ -1693,8 +2012,10 @@ "Street", "PointOfInterest", "PointAddress", - "InterpolatedAddress" - ] + "InterpolatedAddress", + "SecondaryAddress" + ], + "sensitive":true }, "Position":{ "type":"list", @@ -1705,14 +2026,15 @@ }, "PostalAuthority":{ "type":"string", - "enum":["Usps"] + "enum":["Usps"], + "sensitive":true }, "PostalCodeDetails":{ "type":"structure", "members":{ "PostalCode":{ "shape":"PostalCodeDetailsPostalCodeString", - "documentation":"

        An alphanumeric string included in a postal address to facilitate mail sorting, such as post code, postcode, or ZIP code for which the result should posses.

        " + "documentation":"

        An alphanumeric string included in a postal address to facilitate mail sorting, such as post code, postcode, or ZIP code for which the result should possess.

        " }, "PostalAuthority":{ "shape":"PostalAuthority", @@ -1742,7 +2064,8 @@ "PostalCodeDetailsPostalCodeString":{ "type":"string", "max":50, - "min":0 + "min":0, + "sensitive":true }, "PostalCodeMode":{ "type":"string", @@ -1756,7 +2079,8 @@ "enum":[ "UspsZip", "UspsZipPlus4" - ] + ], + "sensitive":true }, "QueryRefinement":{ "type":"structure", @@ -1800,12 +2124,14 @@ "QueryRefinementOriginalTermString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "QueryRefinementRefinedTermString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "QueryRefinementStartIndexInteger":{ "type":"integer", @@ -1828,7 +2154,8 @@ "PostOfficeBox", "Rural", "Street" - ] + ], + "sensitive":true }, "Region":{ "type":"structure", @@ -1847,7 +2174,8 @@ "RegionCodeString":{ "type":"string", "max":3, - "min":0 + "min":0, + "sensitive":true }, "RegionHighlights":{ "type":"structure", @@ -1866,13 +2194,64 @@ "RegionNameString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true + }, + "RelatedPlace":{ + "type":"structure", + "required":[ + "PlaceId", + "PlaceType", + "Title" + ], + "members":{ + "PlaceId":{ + "shape":"RelatedPlacePlaceIdString", + "documentation":"

        The PlaceId of the place result.

        " + }, + "PlaceType":{ + "shape":"PlaceType", + "documentation":"

        A PlaceType is a category that the result place must belong to.

        " + }, + "Title":{ + "shape":"RelatedPlaceTitleString", + "documentation":"

        The localized display name of this result item based on request parameter language.

        " + }, + "Address":{"shape":"Address"}, + "Position":{ + "shape":"Position", + "documentation":"

        The position, in longitude and latitude.

        " + }, + "AccessPoints":{ + "shape":"AccessPointList", + "documentation":"

        Position of the access point represented by longitude and latitude.

        " + } + }, + "documentation":"

        Place that is related to the result item.

        " + }, + "RelatedPlaceList":{ + "type":"list", + "member":{"shape":"RelatedPlace"}, + "min":1 + }, + "RelatedPlacePlaceIdString":{ + "type":"string", + "max":500, + "min":0, + "sensitive":true + }, + "RelatedPlaceTitleString":{ + "type":"string", + "max":200, + "min":0, + "sensitive":true }, "ReverseGeocodeAdditionalFeature":{ "type":"string", "enum":[ "TimeZone", - "Access" + "Access", + "Intersections" ] }, "ReverseGeocodeAdditionalFeatureList":{ @@ -1920,7 +2299,7 @@ "members":{ "QueryPosition":{ "shape":"Position", - "documentation":"

        The position, in [lng, lat] for which you are querying nearby resultsfor. Results closer to the position will be ranked higher then results further away from the position

        " + "documentation":"

        The position, in [lng, lat] for which you are querying nearby results for. Results closer to the position will be ranked higher then results further away from the position

        " }, "QueryRadius":{ "shape":"ReverseGeocodeRequestQueryRadiusLong", @@ -1933,7 +2312,7 @@ }, "Filter":{ "shape":"ReverseGeocodeFilter", - "documentation":"

        A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result.

        " + "documentation":"

        A structure which contains a set of inclusion/exclusion properties that results must possess in order to be returned as a result.

        " }, "AdditionalFeatures":{ "shape":"ReverseGeocodeAdditionalFeatureList", @@ -1949,7 +2328,7 @@ }, "IntendedUse":{ "shape":"ReverseGeocodeIntendedUse", - "documentation":"

        Indicates if the results will be stored. Defaults to SingleUse, if left empty.

        " + "documentation":"

        Indicates if the results will be stored. Defaults to SingleUse, if left empty.

        Storing the response of an ReverseGeocode query is required to comply with service terms, but charged at a higher cost per request. Please review the user agreement and service pricing structure to determine the correct setting for your use case.

        " }, "Key":{ "shape":"ApiKey", @@ -1968,7 +2347,8 @@ "ReverseGeocodeRequestQueryRadiusLong":{ "type":"long", "max":21000000, - "min":1 + "min":1, + "sensitive":true }, "ReverseGeocodeResponse":{ "type":"structure", @@ -1976,7 +2356,7 @@ "members":{ "PricingBucket":{ "shape":"String", - "documentation":"

        The pricing bucket for which the query is charged at.

        For more inforamtion on pricing, please visit Amazon Location Service Pricing.

        ", + "documentation":"

        The pricing bucket for which the query is charged at.

        For more information on pricing, please visit Amazon Location Service Pricing.

        ", "location":"header", "locationName":"x-amz-geo-pricing-bucket" }, @@ -2011,7 +2391,7 @@ "documentation":"

        The place's address.

        " }, "AddressNumberCorrected":{ - "shape":"Boolean", + "shape":"SensitiveBoolean", "documentation":"

        Boolean indicating if the address provided has been corrected.

        " }, "PostalCodeDetails":{ @@ -2040,7 +2420,7 @@ }, "AccessPoints":{ "shape":"AccessPointList", - "documentation":"

        Position of the access point represent by longitude and latitude.

        " + "documentation":"

        Position of the access point represented by longitude and latitude.

        " }, "TimeZone":{ "shape":"TimeZone", @@ -2049,6 +2429,10 @@ "PoliticalView":{ "shape":"CountryCode3", "documentation":"

        The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country.

        " + }, + "Intersections":{ + "shape":"IntersectionList", + "documentation":"

        All Intersections that are near the provided address.

        " } }, "documentation":"

        The returned location from the Reverse Geocode action.

        " @@ -2061,13 +2445,15 @@ }, "ReverseGeocodeResultItemPlaceIdString":{ "type":"string", - "max":200, - "min":0 + "max":500, + "min":0, + "sensitive":true }, "ReverseGeocodeResultItemTitleString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "SearchNearbyAdditionalFeature":{ "type":"string", @@ -2120,7 +2506,7 @@ "documentation":"

        Food types that results are excluded from.

        " } }, - "documentation":"

        SearchNearby structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result.

        " + "documentation":"

        SearchNearby structure which contains a set of inclusion/exclusion properties that results must possess in order to be returned as a result.

        " }, "SearchNearbyIntendedUse":{ "type":"string", @@ -2135,11 +2521,11 @@ "members":{ "QueryPosition":{ "shape":"Position", - "documentation":"

        The position, in [lng, lat] for which you are querying nearby resultsfor. Results closer to the position will be ranked higher then results further away from the position

        " + "documentation":"

        The position, in [lng, lat] for which you are querying nearby results for. Results closer to the position will be ranked higher then results further away from the position

        " }, "QueryRadius":{ "shape":"SearchNearbyRequestQueryRadiusLong", - "documentation":"

        The maximum distance in meters from the QueryPosition from which a result will be returned.

        ", + "documentation":"

        The maximum distance in meters from the QueryPosition from which a result will be returned.

        The fields QueryText, and QueryID are mutually exclusive.

        ", "box":true }, "MaxResults":{ @@ -2148,7 +2534,7 @@ }, "Filter":{ "shape":"SearchNearbyFilter", - "documentation":"

        A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result.

        " + "documentation":"

        A structure which contains a set of inclusion/exclusion properties that results must possess in order to be returned as a result.

        " }, "AdditionalFeatures":{ "shape":"SearchNearbyAdditionalFeatureList", @@ -2164,7 +2550,7 @@ }, "IntendedUse":{ "shape":"SearchNearbyIntendedUse", - "documentation":"

        Indicates if the results will be stored. Defaults to SingleUse, if left empty.

        " + "documentation":"

        Indicates if the results will be stored. Defaults to SingleUse, if left empty.

        Storing the response of an SearchNearby query is required to comply with service terms, but charged at a higher cost per request. Please review the user agreement and service pricing structure to determine the correct setting for your use case.

        " }, "NextToken":{ "shape":"Token", @@ -2187,7 +2573,8 @@ "SearchNearbyRequestQueryRadiusLong":{ "type":"long", "max":21000000, - "min":1 + "min":1, + "sensitive":true }, "SearchNearbyResponse":{ "type":"structure", @@ -2195,7 +2582,7 @@ "members":{ "PricingBucket":{ "shape":"String", - "documentation":"

        The pricing bucket for which the query is charged at.

        For more inforamtion on pricing, please visit Amazon Location Service Pricing.

        ", + "documentation":"

        The pricing bucket for which the query is charged at.

        For more information on pricing, please visit Amazon Location Service Pricing.

        ", "location":"header", "locationName":"x-amz-geo-pricing-bucket" }, @@ -2234,7 +2621,7 @@ "documentation":"

        The place's address.

        " }, "AddressNumberCorrected":{ - "shape":"Boolean", + "shape":"SensitiveBoolean", "documentation":"

        Boolean indicating if the address provided has been corrected.

        " }, "Position":{ @@ -2300,13 +2687,15 @@ }, "SearchNearbyResultItemPlaceIdString":{ "type":"string", - "max":200, - "min":0 + "max":500, + "min":0, + "sensitive":true }, "SearchNearbyResultItemTitleString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "SearchTextAdditionalFeature":{ "type":"string", @@ -2336,7 +2725,7 @@ "documentation":"

        A list of countries that all results must be in. Countries are represented by either their alpha-2 or alpha-3 character codes.

        " } }, - "documentation":"

        SearchText structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result.

        " + "documentation":"

        SearchText structure which contains a set of inclusion/exclusion properties that results must possess in order to be returned as a result.

        " }, "SearchTextIntendedUse":{ "type":"string", @@ -2350,11 +2739,11 @@ "members":{ "QueryText":{ "shape":"SearchTextRequestQueryTextString", - "documentation":"

        The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

        " + "documentation":"

        The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

        The fields QueryText, and QueryID are mutually exclusive.

        " }, "QueryId":{ "shape":"SearchTextRequestQueryIdString", - "documentation":"

        The query Id.

        " + "documentation":"

        The query Id returned by the suggest API. If passed in the request, the SearchText API will preform a SearchText query with the improved query terms for the original query made to the suggest API.

        The fields QueryText, and QueryID are mutually exclusive.

        " }, "MaxResults":{ "shape":"SearchTextRequestMaxResultsInteger", @@ -2366,7 +2755,7 @@ }, "Filter":{ "shape":"SearchTextFilter", - "documentation":"

        A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result.

        " + "documentation":"

        A structure which contains a set of inclusion/exclusion properties that results must possess in order to be returned as a result.

        " }, "AdditionalFeatures":{ "shape":"SearchTextAdditionalFeatureList", @@ -2382,7 +2771,7 @@ }, "IntendedUse":{ "shape":"SearchTextIntendedUse", - "documentation":"

        Indicates if the results will be stored. Defaults to SingleUse, if left empty.

        " + "documentation":"

        Indicates if the results will be stored. Defaults to SingleUse, if left empty.

        Storing the response of an SearchText query is required to comply with service terms, but charged at a higher cost per request. Please review the user agreement and service pricing structure to determine the correct setting for your use case.

        " }, "NextToken":{ "shape":"Token", @@ -2404,7 +2793,7 @@ }, "SearchTextRequestQueryIdString":{ "type":"string", - "max":400, + "max":500, "min":1, "sensitive":true }, @@ -2420,7 +2809,7 @@ "members":{ "PricingBucket":{ "shape":"String", - "documentation":"

        The pricing bucket for which the query is charged at.

        For more inforamtion on pricing, please visit Amazon Location Service Pricing.

        ", + "documentation":"

        The pricing bucket for which the query is charged at.

        For more information on pricing, please visit Amazon Location Service Pricing.

        ", "location":"header", "locationName":"x-amz-geo-pricing-bucket" }, @@ -2459,7 +2848,7 @@ "documentation":"

        The place's address.

        " }, "AddressNumberCorrected":{ - "shape":"Boolean", + "shape":"SensitiveBoolean", "documentation":"

        Boolean indicating if the address provided has been corrected.

        " }, "Position":{ @@ -2525,24 +2914,68 @@ }, "SearchTextResultItemPlaceIdString":{ "type":"string", - "max":200, - "min":0 + "max":500, + "min":0, + "sensitive":true }, "SearchTextResultItemTitleString":{ "type":"string", "max":200, + "min":0, + "sensitive":true + }, + "SecondaryAddressComponent":{ + "type":"structure", + "required":["Number"], + "members":{ + "Number":{ + "shape":"SecondaryAddressComponentNumberString", + "documentation":"

        Number that uniquely identifies a secondary address.

        " + } + }, + "documentation":"

        Components that correspond to secondary identifiers on an address. The only component type supported currently is Unit.

        " + }, + "SecondaryAddressComponentList":{ + "type":"list", + "member":{"shape":"SecondaryAddressComponent"}, + "max":1, "min":0 }, + "SecondaryAddressComponentMatchScore":{ + "type":"structure", + "members":{ + "Number":{ + "shape":"MatchScore", + "documentation":"

        Match score for the secondary address number.

        " + } + }, + "documentation":"

        Match score for a secondary address component in the result.

        " + }, + "SecondaryAddressComponentMatchScoreList":{ + "type":"list", + "member":{"shape":"SecondaryAddressComponentMatchScore"} + }, + "SecondaryAddressComponentNumberString":{ + "type":"string", + "max":10, + "min":0, + "sensitive":true + }, + "SensitiveBoolean":{ + "type":"boolean", + "box":true, + "sensitive":true + }, "StreetComponents":{ "type":"structure", "members":{ "BaseName":{ "shape":"StreetComponentsBaseNameString", - "documentation":"

        Base name part of the street name.

        Example: Younge from the “Younge street\".

        " + "documentation":"

        Base name part of the street name.

        Example: Younge from the \"Younge street\".

        " }, "Type":{ "shape":"StreetComponentsTypeString", - "documentation":"

        Street type part of the street name.

        Example: “avenue\".

        " + "documentation":"

        Street type part of the street name.

        Example: \"avenue\".

        " }, "TypePlacement":{ "shape":"TypePlacement", @@ -2550,7 +2983,7 @@ }, "TypeSeparator":{ "shape":"TypeSeparator", - "documentation":"

        What character(s) separates the string from its type.

        " + "documentation":"

        Defines a separator character such as \"\" or \" \" between the base name and type.

        " }, "Prefix":{ "shape":"StreetComponentsPrefixString", @@ -2574,12 +3007,14 @@ "StreetComponentsBaseNameString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "StreetComponentsDirectionString":{ "type":"string", "max":50, - "min":0 + "min":0, + "sensitive":true }, "StreetComponentsList":{ "type":"list", @@ -2590,17 +3025,20 @@ "StreetComponentsPrefixString":{ "type":"string", "max":50, - "min":0 + "min":0, + "sensitive":true }, "StreetComponentsSuffixString":{ "type":"string", "max":50, - "min":0 + "min":0, + "sensitive":true }, "StreetComponentsTypeString":{ "type":"string", "max":50, - "min":0 + "min":0, + "sensitive":true }, "String":{"type":"string"}, "SubRegion":{ @@ -2620,7 +3058,8 @@ "SubRegionCodeString":{ "type":"string", "max":3, - "min":0 + "min":0, + "sensitive":true }, "SubRegionHighlights":{ "type":"structure", @@ -2639,7 +3078,8 @@ "SubRegionNameString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "SuggestAdditionalFeature":{ "type":"string", @@ -2679,7 +3119,7 @@ "documentation":"

        A list of countries that all results must be in. Countries are represented by either their alpha-2 or alpha-3 character codes.

        " } }, - "documentation":"

        SuggestFilter structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result.

        " + "documentation":"

        SuggestFilter structure which contains a set of inclusion/exclusion properties that results must possess in order to be returned as a result.

        " }, "SuggestHighlights":{ "type":"structure", @@ -2763,27 +3203,29 @@ }, "SuggestPlaceResultPlaceIdString":{ "type":"string", - "max":200, - "min":1 + "max":500, + "min":1, + "sensitive":true }, "SuggestQueryResult":{ "type":"structure", "members":{ "QueryId":{ "shape":"SuggestQueryResultQueryIdString", - "documentation":"

        QueryId can be used to complete a follow up query through the SearchText API. The QueryId retains context from the original Suggest request such as filters, political view and language. See the SearchText API documentation for more details SearchText API docs.

        " + "documentation":"

        QueryId can be used to complete a follow up query through the SearchText API. The QueryId retains context from the original Suggest request such as filters, political view and language. See the SearchText API documentation for more details SearchText API docs.

        The fields QueryText, and QueryID are mutually exclusive.

        " }, "QueryType":{ "shape":"QueryType", - "documentation":"

        The query type. Category qeuries will search for places which have an entry matching the given category, for example \"doctor office\". BusinessChain queries will search for instances of a given business.

        " + "documentation":"

        The query type. Category queries will search for places which have an entry matching the given category, for example \"doctor office\". BusinessChain queries will search for instances of a given business.

        " } }, "documentation":"

        The suggested query results.

        " }, "SuggestQueryResultQueryIdString":{ "type":"string", - "max":400, - "min":0 + "max":500, + "min":0, + "sensitive":true }, "SuggestRequest":{ "type":"structure", @@ -2791,7 +3233,7 @@ "members":{ "QueryText":{ "shape":"SuggestRequestQueryTextString", - "documentation":"

        The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

        " + "documentation":"

        The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

        The fields QueryText, and QueryID are mutually exclusive.

        " }, "MaxResults":{ "shape":"SuggestRequestMaxResultsInteger", @@ -2807,7 +3249,7 @@ }, "Filter":{ "shape":"SuggestFilter", - "documentation":"

        A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result.

        " + "documentation":"

        A structure which contains a set of inclusion/exclusion properties that results must possess in order to be returned as a result.

        " }, "AdditionalFeatures":{ "shape":"SuggestAdditionalFeatureList", @@ -2857,7 +3299,7 @@ "members":{ "PricingBucket":{ "shape":"String", - "documentation":"

        The pricing bucket for which the query is charged at.

        For more inforamtion on pricing, please visit Amazon Location Service Pricing.

        ", + "documentation":"

        The pricing bucket for which the query is charged at.

        For more information on pricing, please visit Amazon Location Service Pricing.

        ", "location":"header", "locationName":"x-amz-geo-pricing-bucket" }, @@ -2907,7 +3349,8 @@ "SuggestResultItemTitleString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "SuggestResultItemType":{ "type":"string", @@ -2955,16 +3398,19 @@ "TimeZoneNameString":{ "type":"string", "max":200, - "min":0 + "min":0, + "sensitive":true }, "TimeZoneOffsetSecondsLong":{ "type":"long", - "min":0 + "min":0, + "sensitive":true }, "TimeZoneOffsetString":{ "type":"string", "max":6, - "min":0 + "min":0, + "sensitive":true }, "Token":{ "type":"string", @@ -3075,7 +3521,8 @@ "Military", "PostOfficeBoxes", "Unique" - ] + ], + "sensitive":true } }, "documentation":"

        The Places API enables powerful location search and geocoding capabilities for your applications, offering global coverage with rich, detailed information. Key features include:

        • Forward and reverse geocoding for addresses and coordinates

        • Comprehensive place searches with detailed information, including:

          • Business names and addresses

          • Contact information

          • Hours of operation

          • POI (Points of Interest) categories

          • Food types for restaurants

          • Chain affiliation for relevant businesses

        • Global data coverage with a wide range of POI categories

        • Regular data updates to ensure accuracy and relevance

        " diff --git a/services/georoutes/pom.xml b/services/georoutes/pom.xml index f569b19eb27e..de1beafa7dbb 100644 --- a/services/georoutes/pom.xml +++ b/services/georoutes/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT georoutes AWS Java SDK :: Services :: Geo Routes diff --git a/services/georoutes/src/main/resources/codegen-resources/customization.config b/services/georoutes/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/georoutes/src/main/resources/codegen-resources/customization.config +++ b/services/georoutes/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/glacier/pom.xml b/services/glacier/pom.xml index 4c65b1d27a15..14cc334b30ed 100644 --- a/services/glacier/pom.xml +++ b/services/glacier/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT glacier AWS Java SDK :: Services :: Amazon Glacier diff --git a/services/glacier/src/main/resources/codegen-resources/customization.config b/services/glacier/src/main/resources/codegen-resources/customization.config index 5f7bd3ca1c7e..599e24d4dcc5 100644 --- a/services/glacier/src/main/resources/codegen-resources/customization.config +++ b/services/glacier/src/main/resources/codegen-resources/customization.config @@ -27,6 +27,5 @@ "interceptors": [ "software.amazon.awssdk.services.glacier.internal.AcceptJsonInterceptor", "software.amazon.awssdk.services.glacier.internal.GlacierExecutionInterceptor" - ], - "enableFastUnmarshaller": true + ] } diff --git a/services/globalaccelerator/pom.xml b/services/globalaccelerator/pom.xml index 7b28e9dbfb11..38c9cfa72f4d 100644 --- a/services/globalaccelerator/pom.xml +++ b/services/globalaccelerator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT globalaccelerator AWS Java SDK :: Services :: Global Accelerator diff --git a/services/globalaccelerator/src/main/resources/codegen-resources/customization.config b/services/globalaccelerator/src/main/resources/codegen-resources/customization.config index a68860e2f24b..980868643100 100644 --- a/services/globalaccelerator/src/main/resources/codegen-resources/customization.config +++ b/services/globalaccelerator/src/main/resources/codegen-resources/customization.config @@ -6,6 +6,5 @@ "excludedSimpleMethods": [ "describeAcceleratorAttributes" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/glue/pom.xml b/services/glue/pom.xml index 0ca6553e825a..f5fa14bcf5bb 100644 --- a/services/glue/pom.xml +++ b/services/glue/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 glue diff --git a/services/glue/src/main/resources/codegen-resources/customization.config b/services/glue/src/main/resources/codegen-resources/customization.config index 4797aaf0e76e..da22aaad9103 100644 --- a/services/glue/src/main/resources/codegen-resources/customization.config +++ b/services/glue/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "excludedSimpleMethods": [ "*" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/glue/src/main/resources/codegen-resources/service-2.json b/services/glue/src/main/resources/codegen-resources/service-2.json index 20317ccf797e..940ea4c71403 100644 --- a/services/glue/src/main/resources/codegen-resources/service-2.json +++ b/services/glue/src/main/resources/codegen-resources/service-2.json @@ -3609,7 +3609,7 @@ {"shape":"InternalServiceException"}, {"shape":"ConcurrentRunsExceededException"} ], - "documentation":"

        Starts the active learning workflow for your machine learning transform to improve the transform's quality by generating label sets and adding labels.

        When the StartMLLabelingSetGenerationTaskRun finishes, Glue will have generated a \"labeling set\" or a set of questions for humans to answer.

        In the case of the FindMatches transform, these questions are of the form, “What is the correct way to group these rows together into groups composed entirely of matching records?”

        After the labeling process is finished, you can upload your labels with a call to StartImportLabelsTaskRun. After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform will use the new and improved labels and perform a higher-quality transformation.

        " + "documentation":"

        Starts the active learning workflow for your machine learning transform to improve the transform's quality by generating label sets and adding labels.

        When the StartMLLabelingSetGenerationTaskRun finishes, Glue will have generated a \"labeling set\" or a set of questions for humans to answer.

        In the case of the FindMatches transform, these questions are of the form, “What is the correct way to group these rows together into groups composed entirely of matching records?”

        After the labeling process is finished, you can upload your labels with a call to StartImportLabelsTaskRun. After StartImportLabelsTaskRun finishes, all future runs of the machine learning transform will use the new and improved labels and perform a higher-quality transformation.

        Note: The role used to write the generated labeling set to the OutputS3Path is the role associated with the Machine Learning Transform, specified in the CreateMLTransform API.

        " }, "StartTrigger":{ "name":"StartTrigger", @@ -6589,6 +6589,10 @@ "shape":"S3CsvSource", "documentation":"

        Specifies a command-separated value (CSV) data store stored in Amazon S3.

        " }, + "S3ExcelSource":{ + "shape":"S3ExcelSource", + "documentation":"

        Defines configuration parameters for reading Excel files from Amazon S3.

        " + }, "S3JsonSource":{ "shape":"S3JsonSource", "documentation":"

        Specifies a JSON data store stored in Amazon S3.

        " @@ -6629,10 +6633,18 @@ "shape":"S3GlueParquetTarget", "documentation":"

        Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.

        " }, + "S3HyperDirectTarget":{ + "shape":"S3HyperDirectTarget", + "documentation":"

        Defines configuration parameters for writing data to Amazon S3 using HyperDirect optimization.

        " + }, "S3DirectTarget":{ "shape":"S3DirectTarget", "documentation":"

        Specifies a data target that writes to Amazon S3.

        " }, + "S3IcebergDirectTarget":{ + "shape":"S3IcebergDirectTarget", + "documentation":"

        Defines configuration parameters for writing data to Amazon S3 as an Apache Iceberg table.

        " + }, "ApplyMapping":{ "shape":"ApplyMapping", "documentation":"

        Specifies a transform that maps data property keys in the data source to data property keys in the data target. You can rename keys, modify the data types for keys, and choose which keys to drop from the dataset.

        " @@ -7337,6 +7349,16 @@ "min":1, "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" }, + "CompactionConfiguration":{ + "type":"structure", + "members":{ + "icebergConfiguration":{ + "shape":"IcebergCompactionConfiguration", + "documentation":"

        The configuration for an Iceberg compaction optimizer.

        " + } + }, + "documentation":"

        The configuration for a compaction optimizer. This configuration defines how data files in your table will be compacted to improve query performance and reduce storage costs.

        " + }, "CompactionMetrics":{ "type":"structure", "members":{ @@ -7347,6 +7369,14 @@ }, "documentation":"

        A structure that contains compaction metrics for the optimizer run.

        " }, + "CompactionStrategy":{ + "type":"string", + "enum":[ + "binpack", + "sort", + "z-order" + ] + }, "Comparator":{ "type":"string", "enum":[ @@ -7912,13 +7942,33 @@ "shape":"ConnectionType", "documentation":"

        The name of the connection type.

        " }, + "DisplayName":{ + "shape":"DisplayName", + "documentation":"

        The human-readable name for the connection type that is displayed in the Glue console.

        " + }, + "Vendor":{ + "shape":"Vendor", + "documentation":"

        The name of the vendor or provider that created or maintains this connection type.

        " + }, "Description":{ "shape":"Description", "documentation":"

        A description of the connection type.

        " }, + "Categories":{ + "shape":"ListOfString", + "documentation":"

        A list of categories that this connection type belongs to. Categories help users filter and find appropriate connection types based on their use cases.

        " + }, "Capabilities":{ "shape":"Capabilities", "documentation":"

        The supported authentication types, data interface types (compute environments), and data operations of the connector.

        " + }, + "LogoUrl":{ + "shape":"UrlString", + "documentation":"

        The URL of the logo associated with a connection type.

        " + }, + "ConnectionTypeVariants":{ + "shape":"ConnectionTypeVariantList", + "documentation":"

        A list of variants available for this connection type. Different variants may provide specialized configurations for specific use cases or implementations of the same general connection type.

        " } }, "documentation":"

        Brief information about a supported connection type returned by the ListConnectionTypes API.

        " @@ -7927,6 +7977,32 @@ "type":"list", "member":{"shape":"ConnectionTypeBrief"} }, + "ConnectionTypeVariant":{ + "type":"structure", + "members":{ + "ConnectionTypeVariantName":{ + "shape":"DisplayName", + "documentation":"

        The unique identifier for the connection type variant. This name is used internally to identify the specific variant of a connection type.

        " + }, + "DisplayName":{ + "shape":"DisplayName", + "documentation":"

        The human-readable name for the connection type variant that is displayed in the Glue console.

        " + }, + "Description":{ + "shape":"Description", + "documentation":"

        A detailed description of the connection type variant, including its purpose, use cases, and any specific configuration requirements.

        " + }, + "LogoUrl":{ + "shape":"UrlString", + "documentation":"

        The URL of the logo associated with a connection type variant.

        " + } + }, + "documentation":"

        Represents a variant of a connection type in Glue Data Catalog. Connection type variants provide specific configurations and behaviors for different implementations of the same general connection type.

        " + }, + "ConnectionTypeVariantList":{ + "type":"list", + "member":{"shape":"ConnectionTypeVariant"} + }, "ConnectionsList":{ "type":"structure", "members":{ @@ -8730,7 +8806,8 @@ "shape":"HashString", "documentation":"

        Used for idempotency and is recommended to be set to a random ID (such as a UUID) to avoid creating or starting multiple instances of the same resource.

        " } - } + }, + "documentation":"

        A request to create a data quality ruleset.

        " }, "CreateDataQualityRulesetResponse":{ "type":"structure", @@ -8940,6 +9017,36 @@ }, "documentation":"

        Specifies a grok classifier for CreateClassifier to create.

        " }, + "CreateIcebergTableInput":{ + "type":"structure", + "required":[ + "Location", + "Schema" + ], + "members":{ + "Location":{ + "shape":"LocationString", + "documentation":"

        The S3 location where the Iceberg table data will be stored.

        " + }, + "Schema":{ + "shape":"IcebergSchema", + "documentation":"

        The schema definition that specifies the structure, field types, and metadata for the Iceberg table.

        " + }, + "PartitionSpec":{ + "shape":"IcebergPartitionSpec", + "documentation":"

        The partitioning specification that defines how the Iceberg table data will be organized and partitioned for optimal query performance.

        " + }, + "WriteOrder":{ + "shape":"IcebergSortOrder", + "documentation":"

        The sort order specification that defines how data should be ordered within each partition to optimize query performance.

        " + }, + "Properties":{ + "shape":"StringToStringMap", + "documentation":"

        Key-value pairs of additional table properties and configuration settings for the Iceberg table.

        " + } + }, + "documentation":"

        The configuration parameters required to create a new Iceberg table in the Glue Data Catalog, including table properties and metadata specifications.

        " + }, "CreateIntegrationRequest":{ "type":"structure", "required":[ @@ -9096,7 +9203,7 @@ "members":{ "ResourceArn":{ "shape":"String128", - "documentation":"

        The connection ARN of the source, or the database ARN of the target.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the target table for which to create integration table properties. Currently, this API only supports creating integration table properties for target tables, and the provided ARN should be the ARN of the target table in the Glue Data Catalog. Support for creating integration table properties for source connections (using the connection ARN) is not yet implemented and will be added in a future release.

        " }, "TableName":{ "shape":"String128", @@ -9715,6 +9822,10 @@ "shape":"NameString", "documentation":"

        The catalog database in which to create the new table. For Hive compatibility, this name is entirely lowercase.

        " }, + "Name":{ + "shape":"NameString", + "documentation":"

        The unique identifier for the table within the specified database that will be created in the Glue Data Catalog.

        " + }, "TableInput":{ "shape":"TableInput", "documentation":"

        The TableInput object that defines the metadata table to create in the catalog.

        " @@ -10285,6 +10396,36 @@ "type":"list", "member":{"shape":"DataOperation"} }, + "DataQualityAggregatedMetrics":{ + "type":"structure", + "members":{ + "TotalRowsProcessed":{ + "shape":"NullableDouble", + "documentation":"

        The total number of rows that were processed during the data quality evaluation.

        " + }, + "TotalRowsPassed":{ + "shape":"NullableDouble", + "documentation":"

        The total number of rows that passed all applicable data quality rules.

        " + }, + "TotalRowsFailed":{ + "shape":"NullableDouble", + "documentation":"

        The total number of rows that failed one or more data quality rules.

        " + }, + "TotalRulesProcessed":{ + "shape":"NullableDouble", + "documentation":"

        The total number of data quality rules that were evaluated.

        " + }, + "TotalRulesPassed":{ + "shape":"NullableDouble", + "documentation":"

        The total number of data quality rules that passed their evaluation criteria.

        " + }, + "TotalRulesFailed":{ + "shape":"NullableDouble", + "documentation":"

        The total number of data quality rules that failed their evaluation criteria.

        " + } + }, + "documentation":"

        A summary of metrics showing the total counts of processed rows and rules, including their pass/fail statistics based on row-level results.

        " + }, "DataQualityAnalyzerResult":{ "type":"structure", "members":{ @@ -10467,6 +10608,10 @@ "Observations":{ "shape":"DataQualityObservations", "documentation":"

        A list of DataQualityObservation objects representing the observations generated after evaluating the rules and analyzers.

        " + }, + "AggregatedMetrics":{ + "shape":"DataQualityAggregatedMetrics", + "documentation":"

        A summary of DataQualityAggregatedMetrics objects showing the total counts of processed rows and rules, including their pass/fail statistics based on row-level results.

        " } }, "documentation":"

        Describes a data quality result.

        " @@ -10614,6 +10759,10 @@ "EvaluatedRule":{ "shape":"DataQualityRuleResultDescription", "documentation":"

        The evaluated rule.

        " + }, + "RuleMetrics":{ + "shape":"RuleMetricsMap", + "documentation":"

        A map containing metrics associated with the evaluation of the rule based on row-level results.

        " } }, "documentation":"

        Describes the result of the evaluation of a data quality rule.

        " @@ -12229,6 +12378,11 @@ }, "documentation":"

        A policy that specifies update behavior for the crawler.

        " }, + "DisplayName":{ + "type":"string", + "max":128, + "min":1 + }, "Double":{"type":"double"}, "DoubleColumnStatisticsData":{ "type":"structure", @@ -12784,6 +12938,10 @@ "ConnectionName":{ "shape":"NameString", "documentation":"

        The name of the connection to an external data source, for example a Redshift-federated catalog.

        " + }, + "ConnectionType":{ + "shape":"NameString", + "documentation":"

        The type of connection used to access the federated catalog, specifying the protocol or method for connection to the external data source.

        " } }, "documentation":"

        A catalog that points to an entity outside the Glue Data Catalog.

        " @@ -12798,6 +12956,10 @@ "ConnectionName":{ "shape":"NameString", "documentation":"

        The name of the connection to the external metastore.

        " + }, + "ConnectionType":{ + "shape":"NameString", + "documentation":"

        The type of connection used to access the federated database, such as JDBC, ODBC, or other supported connection protocols.

        " } }, "documentation":"

        A database that points to an entity outside the Glue Data Catalog.

        " @@ -12831,6 +12993,10 @@ "ConnectionName":{ "shape":"NameString", "documentation":"

        The name of the connection to the external metastore.

        " + }, + "ConnectionType":{ + "shape":"NameString", + "documentation":"

        The type of connection used to access the federated table, specifying the protocol or method for connecting to the external data source.

        " } }, "documentation":"

        A table that points to an entity outside the Glue Data Catalog.

        " @@ -13980,8 +14146,13 @@ "Observations":{ "shape":"DataQualityObservations", "documentation":"

        A list of DataQualityObservation objects representing the observations generated after evaluating the rules and analyzers.

        " + }, + "AggregatedMetrics":{ + "shape":"DataQualityAggregatedMetrics", + "documentation":"

        A summary of DataQualityAggregatedMetrics objects showing the total counts of processed rows and rules, including their pass/fail statistics based on row-level results.

        " } - } + }, + "documentation":"

        The response for the data quality result.

        " }, "GetDataQualityRuleRecommendationRunRequest":{ "type":"structure", @@ -14052,7 +14223,8 @@ "shape":"NameString", "documentation":"

        The name of the security configuration created with the data quality encryption option.

        " } - } + }, + "documentation":"

        The response for the Data Quality rule recommendation run.

        " }, "GetDataQualityRulesetEvaluationRunRequest":{ "type":"structure", @@ -14174,7 +14346,8 @@ "shape":"NameString", "documentation":"

        The name of the security configuration created with the data quality encryption option.

        " } - } + }, + "documentation":"

        Returns the data quality ruleset response.

        " }, "GetDatabaseRequest":{ "type":"structure", @@ -14403,7 +14576,7 @@ "members":{ "ResourceArn":{ "shape":"String128", - "documentation":"

        The connection ARN of the source, or the database ARN of the target.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the target table for which to retrieve integration table properties. Currently, this API only supports retrieving properties for target tables, and the provided ARN should be the ARN of the target table in the Glue Data Catalog. Support for retrieving integration table properties for source connections (using the connection ARN) is not yet implemented and will be added in a future release.

        " }, "TableName":{ "shape":"String128", @@ -14416,7 +14589,7 @@ "members":{ "ResourceArn":{ "shape":"String128", - "documentation":"

        The connection ARN of the source, or the database ARN of the target.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the target table for which to retrieve integration table properties. Currently, this API only supports retrieving properties for target tables, and the provided ARN should be the ARN of the target table in the Glue Data Catalog. Support for retrieving integration table properties for source connections (using the connection ARN) is not yet implemented and will be added in a future release.

        " }, "TableName":{ "shape":"String128", @@ -16428,10 +16601,24 @@ "type":"list", "member":{"shape":"HudiTarget"} }, + "HyperTargetCompressionType":{ + "type":"string", + "enum":["uncompressed"] + }, "IAMRoleArn":{ "type":"string", "pattern":"^arn:aws(-(cn|us-gov|iso(-[bef])?))?:iam::[0-9]{12}:role/.+" }, + "IcebergCompactionConfiguration":{ + "type":"structure", + "members":{ + "strategy":{ + "shape":"CompactionStrategy", + "documentation":"

        The strategy to use for compaction. Valid values are:

        • binpack: Combines small files into larger files, typically targeting sizes over 100MB, while applying any pending deletes. This is the recommended compaction strategy for most use cases.

        • sort: Organizes data based on specified columns which are sorted hierarchically during compaction, improving query performance for filtered operations. This strategy is recommended when your queries frequently filter on specific columns. To use this strategy, you must first define a sort order in your Iceberg table properties using the sort_order table property.

        • z-order: Optimizes data organization by blending multiple attributes into a single scalar value that can be used for sorting, allowing efficient querying across multiple dimensions. This strategy is recommended when you need to query data across multiple dimensions simultaneously. To use this strategy, you must first define a sort order in your Iceberg table properties using the sort_order table property.

        If an input is not provided, the default value 'binpack' will be used.

        " + } + }, + "documentation":"

        The configuration for an Iceberg compaction optimizer. This configuration defines parameters for optimizing the layout of data files in Iceberg tables.

        " + }, "IcebergCompactionMetrics":{ "type":"structure", "members":{ @@ -16458,6 +16645,11 @@ }, "documentation":"

        Compaction metrics for Iceberg for the optimizer run.

        " }, + "IcebergDocument":{ + "type":"structure", + "members":{}, + "document":true + }, "IcebergInput":{ "type":"structure", "required":["MetadataOperation"], @@ -16469,10 +16661,21 @@ "Version":{ "shape":"VersionString", "documentation":"

        The table version for the Iceberg table. Defaults to 2.

        " + }, + "CreateIcebergTableInput":{ + "shape":"CreateIcebergTableInput", + "documentation":"

        The configuration parameters required to create a new Iceberg table in the Glue Data Catalog, including table properties and metadata specifications.

        " } }, "documentation":"

        A structure that defines an Apache Iceberg metadata table to create in the catalog.

        " }, + "IcebergNullOrder":{ + "type":"string", + "enum":[ + "nulls-first", + "nulls-last" + ] + }, "IcebergOrphanFileDeletionConfiguration":{ "type":"structure", "members":{ @@ -16509,6 +16712,52 @@ }, "documentation":"

        Orphan file deletion metrics for Iceberg for the optimizer run.

        " }, + "IcebergPartitionField":{ + "type":"structure", + "required":[ + "SourceId", + "Transform", + "Name" + ], + "members":{ + "SourceId":{ + "shape":"Integer", + "documentation":"

        The identifier of the source field from the table schema that this partition field is based on.

        " + }, + "Transform":{ + "shape":"IcebergTransformString", + "documentation":"

        The transformation function applied to the source field to create the partition, such as identity, bucket, truncate, year, month, day, or hour.

        " + }, + "Name":{ + "shape":"ColumnNameString", + "documentation":"

        The name of the partition field as it will appear in the partitioned table structure.

        " + }, + "FieldId":{ + "shape":"Integer", + "documentation":"

        The unique identifier assigned to this partition field within the Iceberg table's partition specification.

        " + } + }, + "documentation":"

        Defines a single partition field within an Iceberg partition specification, including the source field, transformation function, partition name, and unique identifier.

        " + }, + "IcebergPartitionSpec":{ + "type":"structure", + "required":["Fields"], + "members":{ + "Fields":{ + "shape":"IcebergPartitionSpecFieldList", + "documentation":"

        The list of partition fields that define how the table data should be partitioned, including source fields and their transformations.

        " + }, + "SpecId":{ + "shape":"Integer", + "documentation":"

        The unique identifier for this partition specification within the Iceberg table's metadata history.

        " + } + }, + "documentation":"

        Defines the partitioning specification for an Iceberg table, determining how table data will be organized and partitioned for optimal query performance.

        " + }, + "IcebergPartitionSpecFieldList":{ + "type":"list", + "member":{"shape":"IcebergPartitionField"} + }, "IcebergRetentionConfiguration":{ "type":"structure", "members":{ @@ -16557,6 +16806,160 @@ }, "documentation":"

        Snapshot retention metrics for Iceberg for the optimizer run.

        " }, + "IcebergSchema":{ + "type":"structure", + "required":["Fields"], + "members":{ + "SchemaId":{ + "shape":"Integer", + "documentation":"

        The unique identifier for this schema version within the Iceberg table's schema evolution history.

        " + }, + "IdentifierFieldIds":{ + "shape":"IntegerList", + "documentation":"

        The list of field identifiers that uniquely identify records in the table, used for row-level operations and deduplication.

        " + }, + "Type":{ + "shape":"IcebergStructTypeEnum", + "documentation":"

        The root type of the schema structure, typically \"struct\" for Iceberg table schemas.

        " + }, + "Fields":{ + "shape":"IcebergStructFieldList", + "documentation":"

        The list of field definitions that make up the table schema, including field names, types, and metadata.

        " + } + }, + "documentation":"

        Defines the schema structure for an Iceberg table, including field definitions, data types, and schema metadata.

        " + }, + "IcebergSortDirection":{ + "type":"string", + "enum":[ + "asc", + "desc" + ] + }, + "IcebergSortField":{ + "type":"structure", + "required":[ + "SourceId", + "Transform", + "Direction", + "NullOrder" + ], + "members":{ + "SourceId":{ + "shape":"Integer", + "documentation":"

        The identifier of the source field from the table schema that this sort field is based on.

        " + }, + "Transform":{ + "shape":"IcebergTransformString", + "documentation":"

        The transformation function applied to the source field before sorting, such as identity, bucket, or truncate.

        " + }, + "Direction":{ + "shape":"IcebergSortDirection", + "documentation":"

        The sort direction for this field, either ascending or descending.

        " + }, + "NullOrder":{ + "shape":"IcebergNullOrder", + "documentation":"

        The ordering behavior for null values in this field, specifying whether nulls should appear first or last in the sort order.

        " + } + }, + "documentation":"

        Defines a single field within an Iceberg sort order specification, including the source field, transformation, sort direction, and null value ordering.

        " + }, + "IcebergSortOrder":{ + "type":"structure", + "required":[ + "OrderId", + "Fields" + ], + "members":{ + "OrderId":{ + "shape":"Integer", + "documentation":"

        The unique identifier for this sort order specification within the Iceberg table's metadata.

        " + }, + "Fields":{ + "shape":"IcebergSortOrderFieldList", + "documentation":"

        The list of fields and their sort directions that define the ordering criteria for the Iceberg table data.

        " + } + }, + "documentation":"

        Defines the sort order specification for an Iceberg table, determining how data should be ordered within partitions to optimize query performance.

        " + }, + "IcebergSortOrderFieldList":{ + "type":"list", + "member":{"shape":"IcebergSortField"} + }, + "IcebergStructField":{ + "type":"structure", + "required":[ + "Id", + "Name", + "Type", + "Required" + ], + "members":{ + "Id":{ + "shape":"Integer", + "documentation":"

        The unique identifier assigned to this field within the Iceberg table schema, used for schema evolution and field tracking.

        " + }, + "Name":{ + "shape":"ColumnNameString", + "documentation":"

        The name of the field as it appears in the table schema and query operations.

        " + }, + "Type":{ + "shape":"IcebergDocument", + "documentation":"

        The data type definition for this field, specifying the structure and format of the data it contains.

        " + }, + "Required":{ + "shape":"Boolean", + "documentation":"

        Indicates whether this field is required (non-nullable) or optional (nullable) in the table schema.

        " + }, + "Doc":{ + "shape":"CommentString", + "documentation":"

        Optional documentation or description text that provides additional context about the purpose and usage of this field.

        " + } + }, + "documentation":"

        Defines a single field within an Iceberg table schema, including its identifier, name, data type, nullability, and documentation.

        " + }, + "IcebergStructFieldList":{ + "type":"list", + "member":{"shape":"IcebergStructField"} + }, + "IcebergStructTypeEnum":{ + "type":"string", + "enum":["struct"] + }, + "IcebergTableUpdate":{ + "type":"structure", + "required":[ + "Schema", + "Location" + ], + "members":{ + "Schema":{ + "shape":"IcebergSchema", + "documentation":"

        The updated schema definition for the Iceberg table, specifying any changes to field structure, data types, or schema metadata.

        " + }, + "PartitionSpec":{ + "shape":"IcebergPartitionSpec", + "documentation":"

        The updated partitioning specification that defines how the table data should be reorganized and partitioned.

        " + }, + "SortOrder":{ + "shape":"IcebergSortOrder", + "documentation":"

        The updated sort order specification that defines how data should be ordered within partitions for optimal query performance.

        " + }, + "Location":{ + "shape":"LocationString", + "documentation":"

        The updated S3 location where the Iceberg table data will be stored.

        " + }, + "Properties":{ + "shape":"StringToStringMap", + "documentation":"

        Updated key-value pairs of table properties and configuration settings for the Iceberg table.

        " + } + }, + "documentation":"

        Defines a complete set of updates to be applied to an Iceberg table, including schema changes, partitioning modifications, sort order adjustments, location updates, and property changes.

        " + }, + "IcebergTableUpdateList":{ + "type":"list", + "member":{"shape":"IcebergTableUpdate"} + }, "IcebergTarget":{ "type":"structure", "members":{ @@ -16579,10 +16982,20 @@ }, "documentation":"

        Specifies an Apache Iceberg data source where Iceberg tables are stored in Amazon S3.

        " }, + "IcebergTargetCompressionType":{ + "type":"string", + "enum":[ + "gzip", + "lzo", + "uncompressed", + "snappy" + ] + }, "IcebergTargetList":{ "type":"list", "member":{"shape":"IcebergTarget"} }, + "IcebergTransformString":{"type":"string"}, "IdString":{ "type":"string", "max":255, @@ -16726,6 +17139,10 @@ "max":1, "min":0 }, + "IntegerList":{ + "type":"list", + "member":{"shape":"Integer"} + }, "IntegerValue":{"type":"integer"}, "Integration":{ "type":"structure", @@ -16804,6 +17221,10 @@ "RefreshInterval":{ "shape":"String128", "documentation":"

        Specifies the frequency at which CDC (Change Data Capture) pulls or incremental loads should occur. This parameter provides flexibility to align the refresh rate with your specific data update patterns, system load considerations, and performance optimization goals. Time increment can be set from 15 minutes to 8640 minutes (six days). Currently supports creation of RefreshInterval only.

        " + }, + "SourceProperties":{ + "shape":"IntegrationSourcePropertiesMap", + "documentation":"

        A collection of key-value pairs that specify additional properties for the integration source. These properties provide configuration options that can be used to customize the behavior of the ODB source during data integration operations.

        " } }, "documentation":"

        Properties associated with the integration.

        " @@ -16887,6 +17308,10 @@ "FunctionSpec":{ "shape":"String128", "documentation":"

        Specifies the function used to partition data on the target. The only accepted value for this parameter is `'identity'` (string). The `'identity'` function ensures that the data partitioning on the target follows the same scheme as the source. In other words, the partitioning structure of the source data is preserved in the target destination.

        " + }, + "ConversionSpec":{ + "shape":"String128", + "documentation":"

        Specifies the timestamp format of the source data. Valid values are:

        • epoch_sec - Unix epoch timestamp in seconds

        • epoch_milli - Unix epoch timestamp in milliseconds

        • iso - ISO 8601 formatted timestamp

        Only specify ConversionSpec when using timestamp-based partition functions (year, month, day, or hour). Glue Zero-ETL uses this parameter to correctly transform source data into timestamp format before partitioning.

        Do not use high-cardinality columns with the identity partition function. High-cardinality columns include:

        • Primary keys

        • Timestamp fields (such as LastModifiedTimestamp, CreatedDate)

        • System-generated timestamps

        Using high-cardinality columns with identity partitioning creates many small partitions, which can significantly degrade ingestion performance.

        " } }, "documentation":"

        A structure that describes how data is partitioned on the target.

        " @@ -16906,6 +17331,11 @@ "documentation":"

        The data processed through your integration exceeded your quota.

        ", "exception":true }, + "IntegrationSourcePropertiesMap":{ + "type":"map", + "key":{"shape":"IntegrationString"}, + "value":{"shape":"IntegrationString"} + }, "IntegrationStatus":{ "type":"string", "enum":[ @@ -19755,6 +20185,7 @@ "type":"string", "box":true }, + "NumberTargetPartitionsString":{"type":"string"}, "OAuth2ClientApplication":{ "type":"structure", "members":{ @@ -20199,6 +20630,8 @@ "snappy", "lzo", "gzip", + "brotli", + "lz4", "uncompressed", "none" ] @@ -21549,6 +21982,12 @@ }, "RoleString":{"type":"string"}, "RowTag":{"type":"string"}, + "RuleMetricsMap":{ + "type":"map", + "key":{"shape":"NameString"}, + "value":{"shape":"NullableDouble"}, + "sensitive":true + }, "RulesetNames":{ "type":"list", "member":{"shape":"NameString"}, @@ -21914,6 +22353,10 @@ "shape":"DeltaTargetCompressionType", "documentation":"

        Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

        " }, + "NumberTargetPartitions":{ + "shape":"NumberTargetPartitionsString", + "documentation":"

        Specifies the number of target partitions for distributing Delta Lake dataset files across Amazon S3.

        " + }, "Format":{ "shape":"TargetFormat", "documentation":"

        Specifies the data output format for the target.

        " @@ -22010,6 +22453,10 @@ "shape":"EnclosedInStringProperty", "documentation":"

        Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

        " }, + "NumberTargetPartitions":{ + "shape":"NumberTargetPartitionsString", + "documentation":"

        Specifies the number of target partitions when writing data directly to Amazon S3.

        " + }, "Format":{ "shape":"TargetFormat", "documentation":"

        Specifies the data output format for the target.

        " @@ -22047,6 +22494,68 @@ "SSE-S3" ] }, + "S3ExcelSource":{ + "type":"structure", + "required":[ + "Name", + "Paths" + ], + "members":{ + "Name":{ + "shape":"NodeName", + "documentation":"

        The name of the S3 Excel data source.

        " + }, + "Paths":{ + "shape":"EnclosedInStringProperties", + "documentation":"

        The S3 paths where the Excel files are located.

        " + }, + "CompressionType":{ + "shape":"ParquetCompressionType", + "documentation":"

        The compression format used for the Excel files.

        " + }, + "Exclusions":{ + "shape":"EnclosedInStringProperties", + "documentation":"

        Patterns to exclude specific files or paths from processing.

        " + }, + "GroupSize":{ + "shape":"EnclosedInStringProperty", + "documentation":"

        Defines the size of file groups for batch processing.

        " + }, + "GroupFiles":{ + "shape":"EnclosedInStringProperty", + "documentation":"

        Specifies how files should be grouped for processing.

        " + }, + "Recurse":{ + "shape":"BoxedBoolean", + "documentation":"

        Indicates whether to recursively process subdirectories.

        " + }, + "MaxBand":{ + "shape":"BoxedNonNegativeInt", + "documentation":"

        The maximum number of processing bands to use.

        " + }, + "MaxFilesInBand":{ + "shape":"BoxedNonNegativeInt", + "documentation":"

        The maximum number of files to process in each band.

        " + }, + "AdditionalOptions":{ + "shape":"S3DirectSourceAdditionalOptions", + "documentation":"

        Additional configuration options for S3 direct source processing.

        " + }, + "NumberRows":{ + "shape":"BoxedLong", + "documentation":"

        The number of rows to process from each Excel file.

        " + }, + "SkipFooter":{ + "shape":"BoxedNonNegativeInt", + "documentation":"

        The number of rows to skip at the end of each Excel file.

        " + }, + "OutputSchemas":{ + "shape":"GlueSchemas", + "documentation":"

        The AWS Glue schemas to apply to the processed data.

        " + } + }, + "documentation":"

        Specifies an S3 Excel data source.

        " + }, "S3GlueParquetTarget":{ "type":"structure", "required":[ @@ -22075,6 +22584,10 @@ "shape":"ParquetCompressionType", "documentation":"

        Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

        " }, + "NumberTargetPartitions":{ + "shape":"NumberTargetPartitionsString", + "documentation":"

        Specifies the number of target partitions for Parquet files when writing to Amazon S3 using AWS Glue.

        " + }, "SchemaChangePolicy":{ "shape":"DirectSchemaChangePolicy", "documentation":"

        A policy that specifies update behavior for the crawler.

        " @@ -22150,6 +22663,10 @@ "shape":"HudiTargetCompressionType", "documentation":"

        Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are \"gzip\" and \"bzip\").

        " }, + "NumberTargetPartitions":{ + "shape":"NumberTargetPartitionsString", + "documentation":"

        Specifies the number of target partitions for distributing Hudi dataset files across Amazon S3.

        " + }, "PartitionKeys":{ "shape":"GlueStudioPathList", "documentation":"

        Specifies native partitioning using a sequence of keys.

        " @@ -22199,6 +22716,90 @@ }, "documentation":"

        Specifies a Hudi data source stored in Amazon S3.

        " }, + "S3HyperDirectTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Path" + ], + "members":{ + "Name":{ + "shape":"NodeName", + "documentation":"

        The unique identifier for the HyperDirect target node.

        " + }, + "Inputs":{ + "shape":"OneInput", + "documentation":"

        Specifies the input source for the HyperDirect target.

        " + }, + "PartitionKeys":{ + "shape":"GlueStudioPathList", + "documentation":"

        Defines the partitioning strategy for the output data.

        " + }, + "Path":{ + "shape":"EnclosedInStringProperty", + "documentation":"

        The S3 location where the output data will be written.

        " + }, + "Compression":{ + "shape":"HyperTargetCompressionType", + "documentation":"

        The compression type to apply to the output data.

        " + }, + "SchemaChangePolicy":{ + "shape":"DirectSchemaChangePolicy", + "documentation":"

        Defines how schema changes are handled during write operations.

        " + } + }, + "documentation":"

        Specifies a HyperDirect data target that writes to Amazon S3.

        " + }, + "S3IcebergDirectTarget":{ + "type":"structure", + "required":[ + "Name", + "Inputs", + "Path", + "Format", + "Compression" + ], + "members":{ + "Name":{ + "shape":"NodeName", + "documentation":"

        Specifies the unique identifier for the Iceberg target node in your data pipeline.

        " + }, + "Inputs":{ + "shape":"OneInput", + "documentation":"

        Defines the single input source that provides data to this Iceberg target.

        " + }, + "PartitionKeys":{ + "shape":"GlueStudioPathList", + "documentation":"

        Specifies the columns used to partition the Iceberg table data in S3.

        " + }, + "Path":{ + "shape":"EnclosedInStringProperty", + "documentation":"

        Defines the S3 location where the Iceberg table data will be stored.

        " + }, + "Format":{ + "shape":"TargetFormat", + "documentation":"

        Specifies the file format used for storing Iceberg table data (e.g., Parquet, ORC).

        " + }, + "AdditionalOptions":{ + "shape":"AdditionalOptions", + "documentation":"

        Provides additional configuration options for customizing the Iceberg table behavior.

        " + }, + "SchemaChangePolicy":{ + "shape":"DirectSchemaChangePolicy", + "documentation":"

        Defines how schema changes are handled when writing data to the Iceberg table.

        " + }, + "Compression":{ + "shape":"IcebergTargetCompressionType", + "documentation":"

        Specifies the compression codec used for Iceberg table files in S3.

        " + }, + "NumberTargetPartitions":{ + "shape":"NumberTargetPartitionsString", + "documentation":"

        Sets the number of target partitions for distributing Iceberg table files across S3.

        " + } + }, + "documentation":"

        Specifies a target that writes to an Iceberg data source in Amazon S3.

        " + }, "S3JsonSource":{ "type":"structure", "required":[ @@ -23593,7 +24194,8 @@ "shape":"HashString", "documentation":"

        Used for idempotency and is recommended to be set to a random ID (such as a UUID) to avoid creating or starting multiple instances of the same resource.

        " } - } + }, + "documentation":"

        The request of the Data Quality rule recommendation request.

        " }, "StartDataQualityRuleRecommendationRunResponse":{ "type":"structure", @@ -24372,6 +24974,11 @@ "type":"list", "member":{"shape":"GenericString"} }, + "StringToStringMap":{ + "type":"map", + "key":{"shape":"NullableString"}, + "value":{"shape":"NullableString"} + }, "SupportedDialect":{ "type":"structure", "members":{ @@ -24638,6 +25245,10 @@ "shape":"TableOptimizerVpcConfiguration", "documentation":"

        A TableOptimizerVpcConfiguration object representing the VPC configuration for a table optimizer.

        This configuration is necessary to perform optimization on tables that are in a customer VPC.

        " }, + "compactionConfiguration":{ + "shape":"CompactionConfiguration", + "documentation":"

        The configuration for a compaction optimizer. This configuration defines how data files in your table will be compacted to improve query performance and reduce storage costs.

        " + }, "retentionConfiguration":{ "shape":"RetentionConfiguration", "documentation":"

        The configuration for a snapshot retention optimizer.

        " @@ -24687,6 +25298,10 @@ "shape":"CompactionMetrics", "documentation":"

        A CompactionMetrics object containing metrics for the optimizer run.

        " }, + "compactionStrategy":{ + "shape":"CompactionStrategy", + "documentation":"

        The strategy used for the compaction run. Indicates which algorithm was applied to determine how files were selected and combined during the compaction process. Valid values are:

        • binpack: Combines small files into larger files, typically targeting sizes over 100MB, while applying any pending deletes. This is the recommended compaction strategy for most use cases.

        • sort: Organizes data based on specified columns which are sorted hierarchically during compaction, improving query performance for filtered operations. This strategy is recommended when your queries frequently filter on specific columns. To use this strategy, you must first define a sort order in your Iceberg table properties using the sort_order table property.

        • z-order: Optimizes data organization by blending multiple attributes into a single scalar value that can be used for sorting, allowing efficient querying across multiple dimensions. This strategy is recommended when you need to query data across multiple dimensions simultaneously. To use this strategy, you must first define a sort order in your Iceberg table properties using the sort_order table property.

        " + }, "retentionMetrics":{ "shape":"RetentionMetrics", "documentation":"

        A RetentionMetrics object containing metrics for the optimizer run.

        " @@ -24877,7 +25492,10 @@ "orc", "parquet", "hudi", - "delta" + "delta", + "iceberg", + "hyper", + "xml" ] }, "TargetProcessingProperties":{ @@ -26093,6 +26711,28 @@ }, "documentation":"

        Specifies a grok classifier to update when passed to UpdateClassifier.

        " }, + "UpdateIcebergInput":{ + "type":"structure", + "required":["UpdateIcebergTableInput"], + "members":{ + "UpdateIcebergTableInput":{ + "shape":"UpdateIcebergTableInput", + "documentation":"

        The specific update operations to be applied to the Iceberg table, containing a list of updates that define the new state of the table including schema, partitions, and properties.

        " + } + }, + "documentation":"

        Input parameters specific to updating Apache Iceberg tables in Glue Data Catalog, containing the update operations to be applied to an existing Iceberg table.

        " + }, + "UpdateIcebergTableInput":{ + "type":"structure", + "required":["Updates"], + "members":{ + "Updates":{ + "shape":"IcebergTableUpdateList", + "documentation":"

        The list of table update operations that specify the changes to be made to the Iceberg table, including schema modifications, partition specifications, and table properties.

        " + } + }, + "documentation":"

        Contains the update operations to be applied to an existing Iceberg table in AWS Glue Data Catalog, defining the new state of the table metadata.

        " + }, "UpdateIntegrationResourcePropertyRequest":{ "type":"structure", "required":["ResourceArn"], @@ -26307,6 +26947,16 @@ } } }, + "UpdateOpenTableFormatInput":{ + "type":"structure", + "members":{ + "UpdateIcebergInput":{ + "shape":"UpdateIcebergInput", + "documentation":"

        Apache Iceberg-specific update parameters that define the table modifications to be applied, including schema changes, partition specifications, and table properties.

        " + } + }, + "documentation":"

        Input parameters for updating open table format tables in GlueData Catalog, serving as a wrapper for format-specific update operations such as Apache Iceberg.

        " + }, "UpdatePartitionRequest":{ "type":"structure", "required":[ @@ -26499,10 +27149,7 @@ }, "UpdateTableRequest":{ "type":"structure", - "required":[ - "DatabaseName", - "TableInput" - ], + "required":["DatabaseName"], "members":{ "CatalogId":{ "shape":"CatalogIdString", @@ -26512,6 +27159,10 @@ "shape":"NameString", "documentation":"

        The name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.

        " }, + "Name":{ + "shape":"NameString", + "documentation":"

        The unique identifier for the table within the specified database that will be created in the Glue Data Catalog.

        " + }, "TableInput":{ "shape":"TableInput", "documentation":"

        An updated TableInput object to define the metadata table in the catalog.

        " @@ -26535,7 +27186,8 @@ "Force":{ "shape":"Boolean", "documentation":"

        A flag that can be set to true to ignore matching storage descriptor and subobject matching requirements.

        " - } + }, + "UpdateOpenTableFormatInput":{"shape":"UpdateOpenTableFormatInput"} } }, "UpdateTableResponse":{ @@ -26698,6 +27350,7 @@ "documentation":"

        The options to configure an upsert operation when writing to a Redshift target .

        " }, "UriString":{"type":"string"}, + "UrlString":{"type":"string"}, "UsageProfileDefinition":{ "type":"structure", "members":{ @@ -26828,6 +27481,11 @@ "type":"list", "member":{"shape":"ValueString"} }, + "Vendor":{ + "type":"string", + "max":128, + "min":1 + }, "VersionId":{"type":"long"}, "VersionLongNumber":{ "type":"long", diff --git a/services/grafana/pom.xml b/services/grafana/pom.xml index 6cd5b525df22..a8191bb9a2e7 100644 --- a/services/grafana/pom.xml +++ b/services/grafana/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT grafana AWS Java SDK :: Services :: Grafana diff --git a/services/grafana/src/main/resources/codegen-resources/customization.config b/services/grafana/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/grafana/src/main/resources/codegen-resources/customization.config +++ b/services/grafana/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/greengrass/pom.xml b/services/greengrass/pom.xml index c18df488424b..bbf7c13fdcde 100644 --- a/services/greengrass/pom.xml +++ b/services/greengrass/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT greengrass AWS Java SDK :: Services :: AWS Greengrass diff --git a/services/greengrass/src/main/resources/codegen-resources/customization.config b/services/greengrass/src/main/resources/codegen-resources/customization.config index 974d6cf3de53..96ee6ad2eca2 100644 --- a/services/greengrass/src/main/resources/codegen-resources/customization.config +++ b/services/greengrass/src/main/resources/codegen-resources/customization.config @@ -24,6 +24,5 @@ "createResourceDefinition", "createSoftwareUpdateJob" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/greengrassv2/pom.xml b/services/greengrassv2/pom.xml index 8c519efd373b..7e905c132ffd 100644 --- a/services/greengrassv2/pom.xml +++ b/services/greengrassv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT greengrassv2 AWS Java SDK :: Services :: Greengrass V2 diff --git a/services/greengrassv2/src/main/resources/codegen-resources/customization.config b/services/greengrassv2/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/greengrassv2/src/main/resources/codegen-resources/customization.config +++ b/services/greengrassv2/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/groundstation/pom.xml b/services/groundstation/pom.xml index d3ab7e8c8247..2550894b85ed 100644 --- a/services/groundstation/pom.xml +++ b/services/groundstation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT groundstation AWS Java SDK :: Services :: GroundStation diff --git a/services/groundstation/src/main/resources/codegen-resources/customization.config b/services/groundstation/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/groundstation/src/main/resources/codegen-resources/customization.config +++ b/services/groundstation/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/guardduty/pom.xml b/services/guardduty/pom.xml index 84e72e29df7e..b06ef89e3a18 100644 --- a/services/guardduty/pom.xml +++ b/services/guardduty/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 guardduty diff --git a/services/guardduty/src/main/resources/codegen-resources/customization.config b/services/guardduty/src/main/resources/codegen-resources/customization.config index fc09122c58c2..eddcd509d138 100644 --- a/services/guardduty/src/main/resources/codegen-resources/customization.config +++ b/services/guardduty/src/main/resources/codegen-resources/customization.config @@ -9,6 +9,5 @@ "declineInvitations", "deleteInvitations" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/guardduty/src/main/resources/codegen-resources/service-2.json b/services/guardduty/src/main/resources/codegen-resources/service-2.json index baa855c15071..19ece8110f6c 100644 --- a/services/guardduty/src/main/resources/codegen-resources/service-2.json +++ b/services/guardduty/src/main/resources/codegen-resources/service-2.json @@ -454,7 +454,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

        Provides the details of the GuardDuty administrator account associated with the current GuardDuty member account.

        If the organization's management account or a delegated administrator runs this API, it will return success (HTTP 200) but no content.

        " + "documentation":"

        Provides the details of the GuardDuty administrator account associated with the current GuardDuty member account.

        Based on the type of account that runs this API, the following list shows how the API behavior varies:

        • When the GuardDuty administrator account runs this API, it will return success (HTTP 200) but no content.

        • When a member account runs this API, it will return the details of the GuardDuty administrator account that is associated with this calling member account.

        • When an individual account (not associated with an organization) runs this API, it will return success (HTTP 200) but no content.

        " }, "GetCoverageStatistics":{ "name":"GetCoverageStatistics", @@ -1318,7 +1318,7 @@ }, "Email":{ "shape":"Email", - "documentation":"

        The email address of the member account.

        ", + "documentation":"

        The email address of the member account.

        The rules for a valid email address:

        • The email address must be a minimum of 6 and a maximum of 64 characters long.

        • All characters must be 7-bit ASCII characters.

        • There must be one and only one @ symbol, which separates the local name from the domain name.

        • The local name can't contain any of the following characters:

          whitespace, \" ' ( ) < > [ ] : ' , \\ | % &

        • The local name can't begin with a dot (.).

        • The domain name can consist of only the characters [a-z], [A-Z], [0-9], hyphen (-), or dot (.).

        • The domain name can't begin or end with a dot (.) or hyphen (-).

        • The domain name must contain at least one dot.

        ", "locationName":"email" } }, @@ -1474,6 +1474,11 @@ "shape":"Session", "documentation":"

        Contains information about the user session where the activity initiated.

        ", "locationName":"session" + }, + "Process":{ + "shape":"ActorProcess", + "documentation":"

        Contains information about the process associated with the threat actor. This includes details such as process name, path, execution time, and unique identifiers that help track the actor's activities within the system.

        ", + "locationName":"process" } }, "documentation":"

        Information about the actors involved in an attack sequence.

        " @@ -1483,11 +1488,40 @@ "member":{"shape":"String"}, "max":400 }, + "ActorProcess":{ + "type":"structure", + "required":[ + "Name", + "Path" + ], + "members":{ + "Name":{ + "shape":"ProcessName", + "documentation":"

        The name of the process as it appears in the system.

        ", + "locationName":"name" + }, + "Path":{ + "shape":"ProcessPath", + "documentation":"

        The full file path to the process executable on the system.

        ", + "locationName":"path" + }, + "Sha256":{ + "shape":"ProcessSha256", + "documentation":"

        The SHA256 hash of the process executable file, which can be used for identification and verification purposes.

        ", + "locationName":"sha256" + } + }, + "documentation":"

        Contains information about a process involved in a GuardDuty finding, including process identification, execution details, and file information.

        " + }, "Actors":{ "type":"list", "member":{"shape":"Actor"}, "max":400 }, + "AdditionalSequenceTypes":{ + "type":"list", + "member":{"shape":"FindingType"} + }, "AddonDetails":{ "type":"structure", "members":{ @@ -1862,6 +1896,17 @@ }, "documentation":"

        Contains information on the status of CloudTrail as a data source for the detector.

        " }, + "ClusterStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "FAILED", + "UPDATING", + "PENDING" + ] + }, "Condition":{ "type":"structure", "members":{ @@ -1993,6 +2038,28 @@ }, "documentation":"

        Details of a container.

        " }, + "ContainerFindingResource":{ + "type":"structure", + "required":["Image"], + "members":{ + "Image":{ + "shape":"String", + "documentation":"

        The container image information, including the image name and tag used to run the container that was involved in the finding.

        ", + "locationName":"image" + }, + "ImageUid":{ + "shape":"ContainerImageUid", + "documentation":"

        The unique ID associated with the container image.

        ", + "locationName":"imageUid" + } + }, + "documentation":"

        Contains information about container resources involved in a GuardDuty finding. This structure provides details about containers that were identified as part of suspicious or malicious activity.

        " + }, + "ContainerImageUid":{ + "type":"string", + "max":1024, + "min":1 + }, "ContainerInstanceDetails":{ "type":"structure", "members":{ @@ -2009,6 +2076,15 @@ }, "documentation":"

        Contains information about the Amazon EC2 instance that is running the Amazon ECS container.

        " }, + "ContainerUid":{ + "type":"string", + "max":256, + "min":0 + }, + "ContainerUids":{ + "type":"list", + "member":{"shape":"ContainerUid"} + }, "Containers":{ "type":"list", "member":{"shape":"Container"} @@ -3771,6 +3847,17 @@ }, "documentation":"

        Details about the potentially impacted Amazon EC2 instance resource.

        " }, + "Ec2InstanceUid":{ + "type":"string", + "max":256, + "min":0 + }, + "Ec2InstanceUids":{ + "type":"list", + "member":{"shape":"Ec2InstanceUid"}, + "max":25, + "min":0 + }, "Ec2NetworkInterface":{ "type":"structure", "members":{ @@ -3918,6 +4005,37 @@ }, "documentation":"

        Contains information about the task in an ECS cluster.

        " }, + "EksCluster":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) that uniquely identifies the Amazon EKS cluster involved in the finding.

        ", + "locationName":"arn" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp indicating when the Amazon EKS cluster was created, in UTC format.

        ", + "locationName":"createdAt" + }, + "Status":{ + "shape":"ClusterStatus", + "documentation":"

        The current status of the Amazon EKS cluster.

        ", + "locationName":"status" + }, + "VpcId":{ + "shape":"String", + "documentation":"

        The ID of the Amazon Virtual Private Cloud (Amazon VPC) associated with the Amazon EKS cluster.

        ", + "locationName":"vpcId" + }, + "Ec2InstanceUids":{ + "shape":"Ec2InstanceUids", + "documentation":"

        A list of unique identifiers for the Amazon EC2 instances that serve as worker nodes in the Amazon EKS cluster.

        ", + "locationName":"ec2InstanceUids" + } + }, + "documentation":"

        Contains information about the Amazon EKS cluster involved in a GuardDuty finding, including cluster identification, status, and network configuration.

        " + }, "EksClusterDetails":{ "type":"structure", "members":{ @@ -3957,7 +4075,8 @@ "Email":{ "type":"string", "max":64, - "min":1, + "min":6, + "pattern":"See rules in parameter description", "sensitive":true }, "EnableOrganizationAdminAccountRequest":{ @@ -4087,7 +4206,7 @@ "members":{ "CriterionKey":{ "shape":"CriterionKey", - "documentation":"

        An enum value representing possible scan properties to match with given scan entries.

        Replace the enum value CLUSTER_NAME with EKS_CLUSTER_NAME. CLUSTER_NAME has been deprecated.

        ", + "documentation":"

        An enum value representing possible scan properties to match with given scan entries.

        ", "locationName":"criterionKey" }, "FilterCondition":{ @@ -4177,7 +4296,7 @@ }, "Region":{ "shape":"String", - "documentation":"

        The Region where the finding was generated.

        ", + "documentation":"

        The Region where the finding was generated. For findings generated from Global Service Events, the Region value in the finding might differ from the Region where GuardDuty identifies the potential threat. For more information, see How GuardDuty handles Amazon Web Services CloudTrail global events in the Amazon GuardDuty User Guide.

        ", "locationName":"region" }, "Resource":{ @@ -4258,7 +4377,10 @@ "EC2_NETWORK_INTERFACE", "S3_BUCKET", "S3_OBJECT", - "ACCESS_KEY" + "ACCESS_KEY", + "EKS_CLUSTER", + "KUBERNETES_WORKLOAD", + "CONTAINER" ] }, "FindingStatisticType":{ @@ -5244,7 +5366,13 @@ "ATTACK_TECHNIQUE", "UNUSUAL_API_FOR_ACCOUNT", "UNUSUAL_ASN_FOR_ACCOUNT", - "UNUSUAL_ASN_FOR_USER" + "UNUSUAL_ASN_FOR_USER", + "SUSPICIOUS_PROCESS", + "MALICIOUS_DOMAIN", + "MALICIOUS_PROCESS", + "CRYPTOMINING_IP", + "CRYPTOMINING_DOMAIN", + "CRYPTOMINING_PROCESS" ] }, "IndicatorValueString":{ @@ -5659,6 +5787,19 @@ }, "documentation":"

        Information about the Kubernetes API for which you check if you have permission to call.

        " }, + "KubernetesResourcesTypes":{ + "type":"string", + "enum":[ + "PODS", + "JOBS", + "CRONJOBS", + "DEPLOYMENTS", + "DAEMONSETS", + "STATEFULSETS", + "REPLICASETS", + "REPLICATIONCONTROLLERS" + ] + }, "KubernetesRoleBindingDetails":{ "type":"structure", "members":{ @@ -5742,6 +5883,27 @@ }, "documentation":"

        Details about the Kubernetes user involved in a Kubernetes finding.

        " }, + "KubernetesWorkload":{ + "type":"structure", + "members":{ + "ContainerUids":{ + "shape":"ContainerUids", + "documentation":"

        A list of unique identifiers for the containers that are part of the Kubernetes workload.

        ", + "locationName":"containerUids" + }, + "Namespace":{ + "shape":"String", + "documentation":"

        The Kubernetes namespace in which the workload is running, providing logical isolation within the cluster.

        ", + "locationName":"namespace" + }, + "KubernetesResourcesTypes":{ + "shape":"KubernetesResourcesTypes", + "documentation":"

        The types of Kubernetes resources involved in the workload.

        ", + "locationName":"type" + } + }, + "documentation":"

        Contains information about Kubernetes workloads involved in a GuardDuty finding, including pods, deployments, and other Kubernetes resources.

        " + }, "KubernetesWorkloadDetails":{ "type":"structure", "members":{ @@ -7620,6 +7782,21 @@ }, "documentation":"

        Information about the observed process.

        " }, + "ProcessName":{ + "type":"string", + "max":4096, + "min":0 + }, + "ProcessPath":{ + "type":"string", + "max":4096, + "min":0 + }, + "ProcessSha256":{ + "type":"string", + "max":1024, + "min":0 + }, "ProductCode":{ "type":"structure", "members":{ @@ -8018,6 +8195,21 @@ "shape":"S3Object", "documentation":"

        Contains information about the Amazon S3 object.

        ", "locationName":"s3Object" + }, + "EksCluster":{ + "shape":"EksCluster", + "documentation":"

        Contains detailed information about the Amazon EKS cluster associated with the activity that prompted GuardDuty to generate a finding.

        ", + "locationName":"eksCluster" + }, + "KubernetesWorkload":{ + "shape":"KubernetesWorkload", + "documentation":"

        Contains detailed information about the Kubernetes workload associated with the activity that prompted GuardDuty to generate a finding.

        ", + "locationName":"kubernetesWorkload" + }, + "Container":{ + "shape":"ContainerFindingResource", + "documentation":"

        Contains detailed information about the container associated with the activity that prompted GuardDuty to generate a finding.

        ", + "locationName":"container" } }, "documentation":"

        Contains information about the Amazon Web Services resource that is associated with the activity that prompted GuardDuty to generate a finding.

        " @@ -8883,6 +9075,11 @@ "shape":"Indicators", "documentation":"

        Contains information about the indicators observed in the attack sequence.

        ", "locationName":"sequenceIndicators" + }, + "AdditionalSequenceTypes":{ + "shape":"AdditionalSequenceTypes", + "documentation":"

        Additional types of sequences that may be associated with the attack sequence finding, providing further context about the nature of the detected threat.

        ", + "locationName":"additionalSequenceTypes" } }, "documentation":"

        Contains information about the GuardDuty attack sequence finding.

        " @@ -9139,7 +9336,11 @@ "enum":[ "FINDING", "CLOUD_TRAIL", - "S3_DATA_EVENTS" + "S3_DATA_EVENTS", + "EKS_AUDIT_LOGS", + "FLOW_LOGS", + "DNS_LOGS", + "RUNTIME_MONITORING" ] }, "Signals":{ diff --git a/services/health/pom.xml b/services/health/pom.xml index 1b22271688a6..e924c05177f3 100644 --- a/services/health/pom.xml +++ b/services/health/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT health AWS Java SDK :: Services :: AWS Health APIs and Notifications diff --git a/services/health/src/main/resources/codegen-resources/customization.config b/services/health/src/main/resources/codegen-resources/customization.config index 7f02f5376523..dbff7768d1f9 100644 --- a/services/health/src/main/resources/codegen-resources/customization.config +++ b/services/health/src/main/resources/codegen-resources/customization.config @@ -4,6 +4,5 @@ "describeEntityAggregates", "describeEventTypes" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/healthlake/pom.xml b/services/healthlake/pom.xml index 9a7ae28d9ab3..e09d8b96f4bc 100644 --- a/services/healthlake/pom.xml +++ b/services/healthlake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT healthlake AWS Java SDK :: Services :: Health Lake diff --git a/services/healthlake/src/main/resources/codegen-resources/customization.config b/services/healthlake/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/healthlake/src/main/resources/codegen-resources/customization.config +++ b/services/healthlake/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iam/pom.xml b/services/iam/pom.xml index b31c763ce7de..afa91ca7d3f5 100644 --- a/services/iam/pom.xml +++ b/services/iam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iam AWS Java SDK :: Services :: AWS IAM diff --git a/services/iam/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/iam/src/main/resources/codegen-resources/endpoint-rule-set.json index 9f9403833d3c..399734ee9b30 100644 --- a/services/iam/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/iam/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -819,6 +819,56 @@ }, "type": "endpoint" }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-eusc" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://iam.eusc-de-east-1.amazonaws.eu", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eusc-de-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { diff --git a/services/iam/src/main/resources/codegen-resources/service-2.json b/services/iam/src/main/resources/codegen-resources/service-2.json index d207cca2521b..56eba9e361ea 100644 --- a/services/iam/src/main/resources/codegen-resources/service-2.json +++ b/services/iam/src/main/resources/codegen-resources/service-2.json @@ -124,7 +124,7 @@ {"shape":"PasswordPolicyViolationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Changes the password of the IAM user who is calling this operation. This operation can be performed using the CLI, the Amazon Web Services API, or the My Security Credentials page in the Amazon Web Services Management Console. The Amazon Web Services account root user password is not affected by this operation.

        Use UpdateLoginProfile to use the CLI, the Amazon Web Services API, or the Users page in the IAM console to change the password for any IAM user. For more information about modifying passwords, see Managing passwords in the IAM User Guide.

        " + "documentation":"

        Changes the password of the IAM user who is calling this operation. This operation can be performed using the CLI, the Amazon Web Services API, or the My Security Credentials page in the Amazon Web Services Management Console. The Amazon Web Services account root user password is not affected by this operation.

        Use UpdateLoginProfile to use the CLI, the Amazon Web Services API, or the Users page in the IAM console to change the password for any IAM user. For more information about modifying passwords, see Managing passwords in the IAM User Guide.

        " }, "CreateAccessKey":{ "name":"CreateAccessKey", @@ -216,7 +216,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Creates a password for the specified IAM user. A password allows an IAM user to access Amazon Web Services services through the Amazon Web Services Management Console.

        You can use the CLI, the Amazon Web Services API, or the Users page in the IAM console to create a password for any IAM user. Use ChangePassword to update your own existing password in the My Security Credentials page in the Amazon Web Services Management Console.

        For more information about managing passwords, see Managing passwords in the IAM User Guide.

        " + "documentation":"

        Creates a password for the specified IAM user. A password allows an IAM user to access Amazon Web Services services through the Amazon Web Services Management Console.

        You can use the CLI, the Amazon Web Services API, or the Users page in the IAM console to create a password for any IAM user. Use ChangePassword to update your own existing password in the My Security Credentials page in the Amazon Web Services Management Console.

        For more information about managing passwords, see Managing passwords in the IAM User Guide.

        " }, "CreateOpenIDConnectProvider":{ "name":"CreateOpenIDConnectProvider", @@ -237,7 +237,7 @@ {"shape":"ServiceFailureException"}, {"shape":"OpenIdIdpCommunicationErrorException"} ], - "documentation":"

        Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

        The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.

        If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.

        When you create the IAM OIDC provider, you specify the following:

        • The URL of the OIDC identity provider (IdP) to trust

        • A list of client IDs (also known as audiences) that identify the application or applications allowed to authenticate using the OIDC provider

        • A list of tags that are attached to the specified IAM OIDC provider

        • A list of thumbprints of one or more server certificates that the IdP uses

        You get all of this information from the OIDC IdP you want to use to access Amazon Web Services.

        Amazon Web Services secures communication with OIDC identity providers (IdPs) using our library of trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS) endpoint's TLS certificate. If your OIDC IdP relies on a certificate that is not signed by one of these trusted CAs, only then we secure communication using the thumbprints set in the IdP's configuration.

        The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

        " + "documentation":"

        Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

        The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.

        If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.

        When you create the IAM OIDC provider, you specify the following:

        • The URL of the OIDC identity provider (IdP) to trust

        • A list of client IDs (also known as audiences) that identify the application or applications allowed to authenticate using the OIDC provider

        • A list of tags that are attached to the specified IAM OIDC provider

        • A list of thumbprints of one or more server certificates that the IdP uses

        You get all of this information from the OIDC IdP you want to use to access Amazon Web Services.

        Amazon Web Services secures communication with OIDC identity providers (IdPs) using our library of trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS) endpoint's TLS certificate. If your OIDC IdP relies on a certificate that is not signed by one of these trusted CAs, only then we secure communication using the thumbprints set in the IdP's configuration.

        The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

        " }, "CreatePolicy":{ "name":"CreatePolicy", @@ -278,7 +278,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Creates a new version of the specified managed policy. To update a managed policy, you create a new policy version. A managed policy can have up to five versions. If the policy has five versions, you must delete an existing version using DeletePolicyVersion before you create a new version.

        Optionally, you can set the new version as the policy's default version. The default version is the version that is in effect for the IAM users, groups, and roles to which the policy is attached.

        For more information about managed policy versions, see Versioning for managed policies in the IAM User Guide.

        " + "documentation":"

        Creates a new version of the specified managed policy. To update a managed policy, you create a new policy version. A managed policy can have up to five versions. If the policy has five versions, you must delete an existing version using DeletePolicyVersion before you create a new version.

        Optionally, you can set the new version as the policy's default version. The default version is the version that is in effect for the IAM users, groups, and roles to which the policy is attached.

        For more information about managed policy versions, see Versioning for managed policies in the IAM User Guide.

        " }, "CreateRole":{ "name":"CreateRole", @@ -356,7 +356,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceNotSupportedException"} ], - "documentation":"

        Generates a set of credentials consisting of a user name and password that can be used to access the service specified in the request. These credentials are generated by IAM, and can be used only for the specified service.

        You can have a maximum of two sets of service-specific credentials for each supported service per user.

        You can create service-specific credentials for CodeCommit and Amazon Keyspaces (for Apache Cassandra).

        You can reset the password to a new service-generated value by calling ResetServiceSpecificCredential.

        For more information about service-specific credentials, see Using IAM with CodeCommit: Git credentials, SSH keys, and Amazon Web Services access keys in the IAM User Guide.

        " + "documentation":"

        Generates a set of credentials consisting of a user name and password that can be used to access the service specified in the request. These credentials are generated by IAM, and can be used only for the specified service.

        You can have a maximum of two sets of service-specific credentials for each supported service per user.

        You can create service-specific credentials for Amazon Bedrock, CodeCommit and Amazon Keyspaces (for Apache Cassandra).

        You can reset the password to a new service-generated value by calling ResetServiceSpecificCredential.

        For more information about service-specific credentials, see Service-specific credentials for IAM users in the IAM User Guide.

        " }, "CreateUser":{ "name":"CreateUser", @@ -397,7 +397,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Creates a new virtual MFA device for the Amazon Web Services account. After creating the virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. For more information about creating and working with virtual MFA devices, see Using a virtual MFA device in the IAM User Guide.

        For information about the maximum number of MFA devices you can create, see IAM and STS quotas in the IAM User Guide.

        The seed information contained in the QR code and the Base32 string should be treated like any other secret access information. In other words, protect the seed information as you would your Amazon Web Services access keys or your passwords. After you provision your virtual device, you should ensure that the information is destroyed following secure procedures.

        " + "documentation":"

        Creates a new virtual MFA device for the Amazon Web Services account. After creating the virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. For more information about creating and working with virtual MFA devices, see Using a virtual MFA device in the IAM User Guide.

        For information about the maximum number of MFA devices you can create, see IAM and STS quotas in the IAM User Guide.

        The seed information contained in the QR code and the Base32 string should be treated like any other secret access information. In other words, protect the seed information as you would your Amazon Web Services access keys or your passwords. After you provision your virtual device, you should ensure that the information is destroyed following secure procedures.

        " }, "DeactivateMFADevice":{ "name":"DeactivateMFADevice", @@ -484,7 +484,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Deletes the specified inline policy that is embedded in the specified IAM group.

        A group can also have managed policies attached to it. To detach a managed policy from a group, use DetachGroupPolicy. For more information about policies, refer to Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Deletes the specified inline policy that is embedded in the specified IAM group.

        A group can also have managed policies attached to it. To detach a managed policy from a group, use DetachGroupPolicy. For more information about policies, refer to Managed policies and inline policies in the IAM User Guide.

        " }, "DeleteInstanceProfile":{ "name":"DeleteInstanceProfile", @@ -514,7 +514,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Deletes the password for the specified IAM user, For more information, see Managing passwords for IAM users.

        You can use the CLI, the Amazon Web Services API, or the Users page in the IAM console to delete a password for any IAM user. You can use ChangePassword to update, but not delete, your own password in the My Security Credentials page in the Amazon Web Services Management Console.

        Deleting a user's password does not prevent a user from accessing Amazon Web Services through the command line interface or the API. To prevent all user access, you must also either make any access keys inactive or delete them. For more information about making keys inactive or deleting them, see UpdateAccessKey and DeleteAccessKey.

        " + "documentation":"

        Deletes the password for the specified IAM user or root user, For more information, see Managing passwords for IAM users.

        You can use the CLI, the Amazon Web Services API, or the Users page in the IAM console to delete a password for any IAM user. You can use ChangePassword to update, but not delete, your own password in the My Security Credentials page in the Amazon Web Services Management Console.

        Deleting a user's password does not prevent a user from accessing Amazon Web Services through the command line interface or the API. To prevent all user access, you must also either make any access keys inactive or delete them. For more information about making keys inactive or deleting them, see UpdateAccessKey and DeleteAccessKey.

        " }, "DeleteOpenIDConnectProvider":{ "name":"DeleteOpenIDConnectProvider", @@ -544,7 +544,7 @@ {"shape":"DeleteConflictException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Deletes the specified managed policy.

        Before you can delete a managed policy, you must first detach the policy from all users, groups, and roles that it is attached to. In addition, you must delete all the policy's versions. The following steps describe the process for deleting a managed policy:

        • Detach the policy from all users, groups, and roles that the policy is attached to, using DetachUserPolicy, DetachGroupPolicy, or DetachRolePolicy. To list all the users, groups, and roles that a policy is attached to, use ListEntitiesForPolicy.

        • Delete all versions of the policy using DeletePolicyVersion. To list the policy's versions, use ListPolicyVersions. You cannot use DeletePolicyVersion to delete the version that is marked as the default version. You delete the policy's default version in the next step of the process.

        • Delete the policy (this automatically deletes the policy's default version) using this operation.

        For information about managed policies, see Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Deletes the specified managed policy.

        Before you can delete a managed policy, you must first detach the policy from all users, groups, and roles that it is attached to. In addition, you must delete all the policy's versions. The following steps describe the process for deleting a managed policy:

        • Detach the policy from all users, groups, and roles that the policy is attached to, using DetachUserPolicy, DetachGroupPolicy, or DetachRolePolicy. To list all the users, groups, and roles that a policy is attached to, use ListEntitiesForPolicy.

        • Delete all versions of the policy using DeletePolicyVersion. To list the policy's versions, use ListPolicyVersions. You cannot use DeletePolicyVersion to delete the version that is marked as the default version. You delete the policy's default version in the next step of the process.

        • Delete the policy (this automatically deletes the policy's default version) using this operation.

        For information about managed policies, see Managed policies and inline policies in the IAM User Guide.

        " }, "DeletePolicyVersion":{ "name":"DeletePolicyVersion", @@ -560,7 +560,7 @@ {"shape":"DeleteConflictException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Deletes the specified version from the specified managed policy.

        You cannot delete the default version from a policy using this operation. To delete the default version from a policy, use DeletePolicy. To find out which version of a policy is marked as the default version, use ListPolicyVersions.

        For information about versions for managed policies, see Versioning for managed policies in the IAM User Guide.

        " + "documentation":"

        Deletes the specified version from the specified managed policy.

        You cannot delete the default version from a policy using this operation. To delete the default version from a policy, use DeletePolicy. To find out which version of a policy is marked as the default version, use ListPolicyVersions.

        For information about versions for managed policies, see Versioning for managed policies in the IAM User Guide.

        " }, "DeleteRole":{ "name":"DeleteRole", @@ -577,7 +577,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Deletes the specified role. Unlike the Amazon Web Services Management Console, when you delete a role programmatically, you must delete the items attached to the role manually, or the deletion fails. For more information, see Deleting an IAM role. Before attempting to delete a role, remove the following attached items:

        Make sure that you do not have any Amazon EC2 instances running with the role you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.

        " + "documentation":"

        Deletes the specified role. Unlike the Amazon Web Services Management Console, when you delete a role programmatically, you must delete the items attached to the role manually, or the deletion fails. For more information, see Deleting an IAM role. Before attempting to delete a role, remove the following attached items:

        Make sure that you do not have any Amazon EC2 instances running with the role you are about to delete. Deleting a role or instance profile that is associated with a running instance will break any applications running on the instance.

        " }, "DeleteRolePermissionsBoundary":{ "name":"DeleteRolePermissionsBoundary", @@ -606,7 +606,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Deletes the specified inline policy that is embedded in the specified IAM role.

        A role can also have managed policies attached to it. To detach a managed policy from a role, use DetachRolePolicy. For more information about policies, refer to Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Deletes the specified inline policy that is embedded in the specified IAM role.

        A role can also have managed policies attached to it. To detach a managed policy from a role, use DetachRolePolicy. For more information about policies, refer to Managed policies and inline policies in the IAM User Guide.

        " }, "DeleteSAMLProvider":{ "name":"DeleteSAMLProvider", @@ -666,7 +666,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Submits a service-linked role deletion request and returns a DeletionTaskId, which you can use to check the status of the deletion. Before you call this operation, confirm that the role has no active sessions and that any resources used by the role in the linked service are deleted. If you call this operation more than once for the same service-linked role and an earlier deletion task is not complete, then the DeletionTaskId of the earlier request is returned.

        If you submit a deletion request for a service-linked role whose linked service is still accessing a resource, then the deletion task fails. If it fails, the GetServiceLinkedRoleDeletionStatus operation returns the reason for the failure, usually including the resources that must be deleted. To delete the service-linked role, you must first remove those resources from the linked service and then submit the deletion request again. Resources are specific to the service that is linked to the role. For more information about removing resources from a service, see the Amazon Web Services documentation for your service.

        For more information about service-linked roles, see Roles terms and concepts: Amazon Web Services service-linked role in the IAM User Guide.

        " + "documentation":"

        Submits a service-linked role deletion request and returns a DeletionTaskId, which you can use to check the status of the deletion. Before you call this operation, confirm that the role has no active sessions and that any resources used by the role in the linked service are deleted. If you call this operation more than once for the same service-linked role and an earlier deletion task is not complete, then the DeletionTaskId of the earlier request is returned.

        If you submit a deletion request for a service-linked role whose linked service is still accessing a resource, then the deletion task fails. If it fails, the GetServiceLinkedRoleDeletionStatus operation returns the reason for the failure, usually including the resources that must be deleted. To delete the service-linked role, you must first remove those resources from the linked service and then submit the deletion request again. Resources are specific to the service that is linked to the role. For more information about removing resources from a service, see the Amazon Web Services documentation for your service.

        For more information about service-linked roles, see Roles terms and concepts: Amazon Web Services service-linked role in the IAM User Guide.

        " }, "DeleteServiceSpecificCredential":{ "name":"DeleteServiceSpecificCredential", @@ -709,7 +709,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Deletes the specified IAM user. Unlike the Amazon Web Services Management Console, when you delete a user programmatically, you must delete the items attached to the user manually, or the deletion fails. For more information, see Deleting an IAM user. Before attempting to delete a user, remove the following items:

        " + "documentation":"

        Deletes the specified IAM user. Unlike the Amazon Web Services Management Console, when you delete a user programmatically, you must delete the items attached to the user manually, or the deletion fails. For more information, see Deleting an IAM user. Before attempting to delete a user, remove the following items:

        " }, "DeleteUserPermissionsBoundary":{ "name":"DeleteUserPermissionsBoundary", @@ -736,7 +736,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Deletes the specified inline policy that is embedded in the specified IAM user.

        A user can also have managed policies attached to it. To detach a managed policy from a user, use DetachUserPolicy. For more information about policies, refer to Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Deletes the specified inline policy that is embedded in the specified IAM user.

        A user can also have managed policies attached to it. To detach a managed policy from a user, use DetachUserPolicy. For more information about policies, refer to Managed policies and inline policies in the IAM User Guide.

        " }, "DeleteVirtualMFADevice":{ "name":"DeleteVirtualMFADevice", @@ -752,7 +752,7 @@ {"shape":"ServiceFailureException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

        Deletes a virtual MFA device.

        You must deactivate a user's virtual MFA device before you can delete it. For information about deactivating MFA devices, see DeactivateMFADevice.

        " + "documentation":"

        Deletes a virtual MFA device.

        You must deactivate a user's virtual MFA device before you can delete it. For information about deactivating MFA devices, see DeactivateMFADevice.

        " }, "DetachGroupPolicy":{ "name":"DetachGroupPolicy", @@ -767,7 +767,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Removes the specified managed policy from the specified IAM group.

        A group can also have inline policies embedded with it. To delete an inline policy, use DeleteGroupPolicy. For information about policies, see Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Removes the specified managed policy from the specified IAM group.

        A group can also have inline policies embedded with it. To delete an inline policy, use DeleteGroupPolicy. For information about policies, see Managed policies and inline policies in the IAM User Guide.

        " }, "DetachRolePolicy":{ "name":"DetachRolePolicy", @@ -783,7 +783,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Removes the specified managed policy from the specified role.

        A role can also have inline policies embedded with it. To delete an inline policy, use DeleteRolePolicy. For information about policies, see Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Removes the specified managed policy from the specified role.

        A role can also have inline policies embedded with it. To delete an inline policy, use DeleteRolePolicy. For information about policies, see Managed policies and inline policies in the IAM User Guide.

        " }, "DetachUserPolicy":{ "name":"DetachUserPolicy", @@ -798,7 +798,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Removes the specified managed policy from the specified user.

        A user can also have inline policies embedded with it. To delete an inline policy, use DeleteUserPolicy. For information about policies, see Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Removes the specified managed policy from the specified user.

        A user can also have inline policies embedded with it. To delete an inline policy, use DeleteUserPolicy. For information about policies, see Managed policies and inline policies in the IAM User Guide.

        " }, "DisableOrganizationsRootCredentialsManagement":{ "name":"DisableOrganizationsRootCredentialsManagement", @@ -926,7 +926,7 @@ "errors":[ {"shape":"ReportGenerationLimitExceededException"} ], - "documentation":"

        Generates a report for service last accessed data for Organizations. You can generate a report for any entities (organization root, organizational unit, or account) or policies in your organization.

        To call this operation, you must be signed in using your Organizations management account credentials. You can use your long-term IAM user or root user credentials, or temporary credentials from assuming an IAM role. SCPs must be enabled for your organization root. You must have the required IAM and Organizations permissions. For more information, see Refining permissions using service last accessed data in the IAM User Guide.

        You can generate a service last accessed data report for entities by specifying only the entity's path. This data includes a list of services that are allowed by any service control policies (SCPs) that apply to the entity.

        You can generate a service last accessed data report for a policy by specifying an entity's path and an optional Organizations policy ID. This data includes a list of services that are allowed by the specified SCP.

        For each service in both report types, the data includes the most recent account activity that the policy allows to account principals in the entity or the entity's children. For important information about the data, reporting period, permissions required, troubleshooting, and supported Regions see Reducing permissions using service last accessed data in the IAM User Guide.

        The data includes all attempts to access Amazon Web Services, not just the successful ones. This includes all attempts that were made using the Amazon Web Services Management Console, the Amazon Web Services API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that an account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM events with CloudTrail in the IAM User Guide.

        This operation returns a JobId. Use this parameter in the GetOrganizationsAccessReport operation to check the status of the report generation. To check the status of this request, use the JobId parameter in the GetOrganizationsAccessReport operation and test the JobStatus response parameter. When the job is complete, you can retrieve the report.

        To generate a service last accessed data report for entities, specify an entity path without specifying the optional Organizations policy ID. The type of entity that you specify determines the data returned in the report.

        • Root – When you specify the organizations root as the entity, the resulting report lists all of the services allowed by SCPs that are attached to your root. For each service, the report includes data for all accounts in your organization except the management account, because the management account is not limited by SCPs.

        • OU – When you specify an organizational unit (OU) as the entity, the resulting report lists all of the services allowed by SCPs that are attached to the OU and its parents. For each service, the report includes data for all accounts in the OU or its children. This data excludes the management account, because the management account is not limited by SCPs.

        • management account – When you specify the management account, the resulting report lists all Amazon Web Services services, because the management account is not limited by SCPs. For each service, the report includes data for only the management account.

        • Account – When you specify another account as the entity, the resulting report lists all of the services allowed by SCPs that are attached to the account and its parents. For each service, the report includes data for only the specified account.

        To generate a service last accessed data report for policies, specify an entity path and the optional Organizations policy ID. The type of entity that you specify determines the data returned for each service.

        • Root – When you specify the root entity and a policy ID, the resulting report lists all of the services that are allowed by the specified SCP. For each service, the report includes data for all accounts in your organization to which the SCP applies. This data excludes the management account, because the management account is not limited by SCPs. If the SCP is not attached to any entities in the organization, then the report will return a list of services with no data.

        • OU – When you specify an OU entity and a policy ID, the resulting report lists all of the services that are allowed by the specified SCP. For each service, the report includes data for all accounts in the OU or its children to which the SCP applies. This means that other accounts outside the OU that are affected by the SCP might not be included in the data. This data excludes the management account, because the management account is not limited by SCPs. If the SCP is not attached to the OU or one of its children, the report will return a list of services with no data.

        • management account – When you specify the management account, the resulting report lists all Amazon Web Services services, because the management account is not limited by SCPs. If you specify a policy ID in the CLI or API, the policy is ignored. For each service, the report includes data for only the management account.

        • Account – When you specify another account entity and a policy ID, the resulting report lists all of the services that are allowed by the specified SCP. For each service, the report includes data for only the specified account. This means that other accounts in the organization that are affected by the SCP might not be included in the data. If the SCP is not attached to the account, the report will return a list of services with no data.

        Service last accessed data does not use other policy types when determining whether a principal could access a service. These other policy types include identity-based policies, resource-based policies, access control lists, IAM permissions boundaries, and STS assume role policies. It only applies SCP logic. For more about the evaluation of policy types, see Evaluating policies in the IAM User Guide.

        For more information about service last accessed data, see Reducing policy scope by viewing user activity in the IAM User Guide.

        " + "documentation":"

        Generates a report for service last accessed data for Organizations. You can generate a report for any entities (organization root, organizational unit, or account) or policies in your organization.

        To call this operation, you must be signed in using your Organizations management account credentials. You can use your long-term IAM user or root user credentials, or temporary credentials from assuming an IAM role. SCPs must be enabled for your organization root. You must have the required IAM and Organizations permissions. For more information, see Refining permissions using service last accessed data in the IAM User Guide.

        You can generate a service last accessed data report for entities by specifying only the entity's path. This data includes a list of services that are allowed by any service control policies (SCPs) that apply to the entity.

        You can generate a service last accessed data report for a policy by specifying an entity's path and an optional Organizations policy ID. This data includes a list of services that are allowed by the specified SCP.

        For each service in both report types, the data includes the most recent account activity that the policy allows to account principals in the entity or the entity's children. For important information about the data, reporting period, permissions required, troubleshooting, and supported Regions see Reducing permissions using service last accessed data in the IAM User Guide.

        The data includes all attempts to access Amazon Web Services, not just the successful ones. This includes all attempts that were made using the Amazon Web Services Management Console, the Amazon Web Services API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that an account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM events with CloudTrail in the IAM User Guide.

        This operation returns a JobId. Use this parameter in the GetOrganizationsAccessReport operation to check the status of the report generation. To check the status of this request, use the JobId parameter in the GetOrganizationsAccessReport operation and test the JobStatus response parameter. When the job is complete, you can retrieve the report.

        To generate a service last accessed data report for entities, specify an entity path without specifying the optional Organizations policy ID. The type of entity that you specify determines the data returned in the report.

        • Root – When you specify the organizations root as the entity, the resulting report lists all of the services allowed by SCPs that are attached to your root. For each service, the report includes data for all accounts in your organization except the management account, because the management account is not limited by SCPs.

        • OU – When you specify an organizational unit (OU) as the entity, the resulting report lists all of the services allowed by SCPs that are attached to the OU and its parents. For each service, the report includes data for all accounts in the OU or its children. This data excludes the management account, because the management account is not limited by SCPs.

        • management account – When you specify the management account, the resulting report lists all Amazon Web Services services, because the management account is not limited by SCPs. For each service, the report includes data for only the management account.

        • Account – When you specify another account as the entity, the resulting report lists all of the services allowed by SCPs that are attached to the account and its parents. For each service, the report includes data for only the specified account.

        To generate a service last accessed data report for policies, specify an entity path and the optional Organizations policy ID. The type of entity that you specify determines the data returned for each service.

        • Root – When you specify the root entity and a policy ID, the resulting report lists all of the services that are allowed by the specified SCP. For each service, the report includes data for all accounts in your organization to which the SCP applies. This data excludes the management account, because the management account is not limited by SCPs. If the SCP is not attached to any entities in the organization, then the report will return a list of services with no data.

        • OU – When you specify an OU entity and a policy ID, the resulting report lists all of the services that are allowed by the specified SCP. For each service, the report includes data for all accounts in the OU or its children to which the SCP applies. This means that other accounts outside the OU that are affected by the SCP might not be included in the data. This data excludes the management account, because the management account is not limited by SCPs. If the SCP is not attached to the OU or one of its children, the report will return a list of services with no data.

        • management account – When you specify the management account, the resulting report lists all Amazon Web Services services, because the management account is not limited by SCPs. If you specify a policy ID in the CLI or API, the policy is ignored. For each service, the report includes data for only the management account.

        • Account – When you specify another account entity and a policy ID, the resulting report lists all of the services that are allowed by the specified SCP. For each service, the report includes data for only the specified account. This means that other accounts in the organization that are affected by the SCP might not be included in the data. If the SCP is not attached to the account, the report will return a list of services with no data.

        Service last accessed data does not use other policy types when determining whether a principal could access a service. These other policy types include identity-based policies, resource-based policies, access control lists, IAM permissions boundaries, and STS assume role policies. It only applies SCP logic. For more about the evaluation of policy types, see Evaluating policies in the IAM User Guide.

        For more information about service last accessed data, see Reducing policy scope by viewing user activity in the IAM User Guide.

        " }, "GenerateServiceLastAccessedDetails":{ "name":"GenerateServiceLastAccessedDetails", @@ -943,7 +943,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"InvalidInputException"} ], - "documentation":"

        Generates a report that includes details about when an IAM resource (user, group, role, or policy) was last used in an attempt to access Amazon Web Services services. Recent activity usually appears within four hours. IAM reports activity for at least the last 400 days, or less if your Region began supporting this feature within the last year. For more information, see Regions where data is tracked. For more information about services and actions for which action last accessed information is displayed, see IAM action last accessed information services and actions.

        The service last accessed data includes all attempts to access an Amazon Web Services API, not just the successful ones. This includes all attempts that were made using the Amazon Web Services Management Console, the Amazon Web Services API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that your account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM events with CloudTrail in the IAM User Guide.

        The GenerateServiceLastAccessedDetails operation returns a JobId. Use this parameter in the following operations to retrieve the following details from your report:

        • GetServiceLastAccessedDetails – Use this operation for users, groups, roles, or policies to list every Amazon Web Services service that the resource could access using permissions policies. For each service, the response includes information about the most recent access attempt.

          The JobId returned by GenerateServiceLastAccessedDetail must be used by the same role within a session, or by the same user when used to call GetServiceLastAccessedDetail.

        • GetServiceLastAccessedDetailsWithEntities – Use this operation for groups and policies to list information about the associated entities (users or roles) that attempted to access a specific Amazon Web Services service.

        To check the status of the GenerateServiceLastAccessedDetails request, use the JobId parameter in the same operations and test the JobStatus response parameter.

        For additional information about the permissions policies that allow an identity (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess operation.

        Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, Organizations policies, IAM permissions boundaries, and STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating policies in the IAM User Guide.

        For more information about service and action last accessed data, see Reducing permissions using service last accessed data in the IAM User Guide.

        " + "documentation":"

        Generates a report that includes details about when an IAM resource (user, group, role, or policy) was last used in an attempt to access Amazon Web Services services. Recent activity usually appears within four hours. IAM reports activity for at least the last 400 days, or less if your Region began supporting this feature within the last year. For more information, see Regions where data is tracked. For more information about services and actions for which action last accessed information is displayed, see IAM action last accessed information services and actions.

        The service last accessed data includes all attempts to access an Amazon Web Services API, not just the successful ones. This includes all attempts that were made using the Amazon Web Services Management Console, the Amazon Web Services API through any of the SDKs, or any of the command line tools. An unexpected entry in the service last accessed data does not mean that your account has been compromised, because the request might have been denied. Refer to your CloudTrail logs as the authoritative source for information about all API calls and whether they were successful or denied access. For more information, see Logging IAM events with CloudTrail in the IAM User Guide.

        The GenerateServiceLastAccessedDetails operation returns a JobId. Use this parameter in the following operations to retrieve the following details from your report:

        • GetServiceLastAccessedDetails – Use this operation for users, groups, roles, or policies to list every Amazon Web Services service that the resource could access using permissions policies. For each service, the response includes information about the most recent access attempt.

          The JobId returned by GenerateServiceLastAccessedDetail must be used by the same role within a session, or by the same user when used to call GetServiceLastAccessedDetail.

        • GetServiceLastAccessedDetailsWithEntities – Use this operation for groups and policies to list information about the associated entities (users or roles) that attempted to access a specific Amazon Web Services service.

        To check the status of the GenerateServiceLastAccessedDetails request, use the JobId parameter in the same operations and test the JobStatus response parameter.

        For additional information about the permissions policies that allow an identity (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess operation.

        Service last accessed data does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, Organizations policies, IAM permissions boundaries, and STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating policies in the IAM User Guide.

        For more information about service and action last accessed data, see Reducing permissions using service last accessed data in the IAM User Guide.

        " }, "GetAccessKeyLastUsed":{ "name":"GetAccessKeyLastUsed", @@ -972,7 +972,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

        Retrieves information about all IAM users, groups, roles, and policies in your Amazon Web Services account, including their relationships to one another. Use this operation to obtain a snapshot of the configuration of IAM permissions (users, groups, roles, and policies) in your account.

        Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

        You can optionally filter the results using the Filter parameter. You can paginate the results using the MaxItems and Marker parameters.

        " + "documentation":"

        Retrieves information about all IAM users, groups, roles, and policies in your Amazon Web Services account, including their relationships to one another. Use this operation to obtain a snapshot of the configuration of IAM permissions (users, groups, roles, and policies) in your account.

        Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality, and some SDKs do this decoding automatically.

        You can optionally filter the results using the Filter parameter. You can paginate the results using the MaxItems and Marker parameters.

        " }, "GetAccountPasswordPolicy":{ "name":"GetAccountPasswordPolicy", @@ -1019,7 +1019,7 @@ "errors":[ {"shape":"InvalidInputException"} ], - "documentation":"

        Gets a list of all of the context keys referenced in the input policies. The policies are supplied as a list of one or more strings. To get the context keys from policies associated with an IAM user, group, or role, use GetContextKeysForPrincipalPolicy.

        Context keys are variables maintained by Amazon Web Services and its services that provide details about the context of an API query request. Context keys can be evaluated by testing against a value specified in an IAM policy. Use GetContextKeysForCustomPolicy to understand what key names and values you must supply when you call SimulateCustomPolicy. Note that all parameters are shown in unencoded form here for clarity but must be URL encoded to be included as a part of a real HTML request.

        " + "documentation":"

        Gets a list of all of the context keys referenced in the input policies. The policies are supplied as a list of one or more strings. To get the context keys from policies associated with an IAM user, group, or role, use GetContextKeysForPrincipalPolicy.

        Context keys are variables maintained by Amazon Web Services and its services that provide details about the context of an API query request. Context keys can be evaluated by testing against a value specified in an IAM policy. Use GetContextKeysForCustomPolicy to understand what key names and values you must supply when you call SimulateCustomPolicy. Note that all parameters are shown in unencoded form here for clarity but must be URL encoded to be included as a part of a real HTML request.

        " }, "GetContextKeysForPrincipalPolicy":{ "name":"GetContextKeysForPrincipalPolicy", @@ -1036,7 +1036,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"InvalidInputException"} ], - "documentation":"

        Gets a list of all of the context keys referenced in all the IAM policies that are attached to the specified IAM entity. The entity can be an IAM user, group, or role. If you specify a user, then the request also includes all of the policies attached to groups that the user is a member of.

        You can optionally include a list of one or more additional policies, specified as strings. If you want to include only a list of policies by string, use GetContextKeysForCustomPolicy instead.

        Note: This operation discloses information about the permissions granted to other users. If you do not want users to see other user's permissions, then consider allowing them to use GetContextKeysForCustomPolicy instead.

        Context keys are variables maintained by Amazon Web Services and its services that provide details about the context of an API query request. Context keys can be evaluated by testing against a value in an IAM policy. Use GetContextKeysForPrincipalPolicy to understand what key names and values you must supply when you call SimulatePrincipalPolicy.

        " + "documentation":"

        Gets a list of all of the context keys referenced in all the IAM policies that are attached to the specified IAM entity. The entity can be an IAM user, group, or role. If you specify a user, then the request also includes all of the policies attached to groups that the user is a member of.

        You can optionally include a list of one or more additional policies, specified as strings. If you want to include only a list of policies by string, use GetContextKeysForCustomPolicy instead.

        Note: This operation discloses information about the permissions granted to other users. If you do not want users to see other user's permissions, then consider allowing them to use GetContextKeysForCustomPolicy instead.

        Context keys are variables maintained by Amazon Web Services and its services that provide details about the context of an API query request. Context keys can be evaluated by testing against a value in an IAM policy. Use GetContextKeysForPrincipalPolicy to understand what key names and values you must supply when you call SimulatePrincipalPolicy.

        " }, "GetCredentialReport":{ "name":"GetCredentialReport", @@ -1088,7 +1088,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Retrieves the specified inline policy document that is embedded in the specified IAM group.

        Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

        An IAM group can also have managed policies attached to it. To retrieve a managed policy document that is attached to a group, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

        For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Retrieves the specified inline policy document that is embedded in the specified IAM group.

        Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality, and some SDKs do this decoding automatically.

        An IAM group can also have managed policies attached to it. To retrieve a managed policy document that is attached to a group, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

        For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        " }, "GetInstanceProfile":{ "name":"GetInstanceProfile", @@ -1173,7 +1173,7 @@ "errors":[ {"shape":"NoSuchEntityException"} ], - "documentation":"

        Retrieves the service last accessed data report for Organizations that was previously generated using the GenerateOrganizationsAccessReport operation. This operation retrieves the status of your report job and the report contents.

        Depending on the parameters that you passed when you generated the report, the data returned could include different information. For details, see GenerateOrganizationsAccessReport.

        To call this operation, you must be signed in to the management account in your organization. SCPs must be enabled for your organization root. You must have permissions to perform this operation. For more information, see Refining permissions using service last accessed data in the IAM User Guide.

        For each service that principals in an account (root user, IAM users, or IAM roles) could access using SCPs, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, it returns the reason that it failed.

        By default, the list is sorted by service namespace.

        " + "documentation":"

        Retrieves the service last accessed data report for Organizations that was previously generated using the GenerateOrganizationsAccessReport operation. This operation retrieves the status of your report job and the report contents.

        Depending on the parameters that you passed when you generated the report, the data returned could include different information. For details, see GenerateOrganizationsAccessReport.

        To call this operation, you must be signed in to the management account in your organization. SCPs must be enabled for your organization root. You must have permissions to perform this operation. For more information, see Refining permissions using service last accessed data in the IAM User Guide.

        For each service that principals in an account (root user, IAM users, or IAM roles) could access using SCPs, the operation returns details about the most recent access attempt. If there was no attempt, the service is listed without details about the most recent attempt to access the service. If the operation fails, it returns the reason that it failed.

        By default, the list is sorted by service namespace.

        " }, "GetPolicy":{ "name":"GetPolicy", @@ -1191,7 +1191,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Retrieves information about the specified managed policy, including the policy's default version and the total number of IAM users, groups, and roles to which the policy is attached. To retrieve the list of the specific users, groups, and roles that the policy is attached to, use ListEntitiesForPolicy. This operation returns metadata about the policy. To retrieve the actual policy document for a specific version of the policy, use GetPolicyVersion.

        This operation retrieves information about managed policies. To retrieve information about an inline policy that is embedded with an IAM user, group, or role, use GetUserPolicy, GetGroupPolicy, or GetRolePolicy.

        For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Retrieves information about the specified managed policy, including the policy's default version and the total number of IAM users, groups, and roles to which the policy is attached. To retrieve the list of the specific users, groups, and roles that the policy is attached to, use ListEntitiesForPolicy. This operation returns metadata about the policy. To retrieve the actual policy document for a specific version of the policy, use GetPolicyVersion.

        This operation retrieves information about managed policies. To retrieve information about an inline policy that is embedded with an IAM user, group, or role, use GetUserPolicy, GetGroupPolicy, or GetRolePolicy.

        For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        " }, "GetPolicyVersion":{ "name":"GetPolicyVersion", @@ -1209,7 +1209,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Retrieves information about the specified version of the specified managed policy, including the policy document.

        Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

        To list the available versions for a policy, use ListPolicyVersions.

        This operation retrieves information about managed policies. To retrieve information about an inline policy that is embedded in a user, group, or role, use GetUserPolicy, GetGroupPolicy, or GetRolePolicy.

        For more information about the types of policies, see Managed policies and inline policies in the IAM User Guide.

        For more information about managed policy versions, see Versioning for managed policies in the IAM User Guide.

        " + "documentation":"

        Retrieves information about the specified version of the specified managed policy, including the policy document.

        Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality, and some SDKs do this decoding automatically.

        To list the available versions for a policy, use ListPolicyVersions.

        This operation retrieves information about managed policies. To retrieve information about an inline policy that is embedded in a user, group, or role, use GetUserPolicy, GetGroupPolicy, or GetRolePolicy.

        For more information about the types of policies, see Managed policies and inline policies in the IAM User Guide.

        For more information about managed policy versions, see Versioning for managed policies in the IAM User Guide.

        " }, "GetRole":{ "name":"GetRole", @@ -1226,7 +1226,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Retrieves information about the specified role, including the role's path, GUID, ARN, and the role's trust policy that grants permission to assume the role. For more information about roles, see IAM roles in the IAM User Guide.

        Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

        " + "documentation":"

        Retrieves information about the specified role, including the role's path, GUID, ARN, and the role's trust policy that grants permission to assume the role. For more information about roles, see IAM roles in the IAM User Guide.

        Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality, and some SDKs do this decoding automatically.

        " }, "GetRolePolicy":{ "name":"GetRolePolicy", @@ -1243,7 +1243,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Retrieves the specified inline policy document that is embedded with the specified IAM role.

        Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

        An IAM role can also have managed policies attached to it. To retrieve a managed policy document that is attached to a role, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

        For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        For more information about roles, see IAM roles in the IAM User Guide.

        " + "documentation":"

        Retrieves the specified inline policy document that is embedded with the specified IAM role.

        Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality, and some SDKs do this decoding automatically.

        An IAM role can also have managed policies attached to it. To retrieve a managed policy document that is attached to a role, use GetPolicy to determine the policy's default version, then use GetPolicyVersion to retrieve the policy document.

        For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        For more information about roles, see IAM roles in the IAM User Guide.

        " }, "GetSAMLProvider":{ "name":"GetSAMLProvider", @@ -1347,7 +1347,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Retrieves the status of your service-linked role deletion. After you use DeleteServiceLinkedRole to submit a service-linked role for deletion, you can use the DeletionTaskId parameter in GetServiceLinkedRoleDeletionStatus to check the status of the deletion. If the deletion fails, this operation returns the reason that it failed, if that information is returned by the service.

        " + "documentation":"

        Retrieves the status of your service-linked role deletion. After you use DeleteServiceLinkedRole to submit a service-linked role for deletion, you can use the DeletionTaskId parameter in GetServiceLinkedRoleDeletionStatus to check the status of the deletion. If the deletion fails, this operation returns the reason that it failed, if that information is returned by the service.

        " }, "GetUser":{ "name":"GetUser", @@ -1381,7 +1381,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Retrieves the specified inline policy document that is embedded in the specified IAM user.

        Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

        An IAM user can also have managed policies attached to it. To retrieve a managed policy document that is attached to a user, use GetPolicy to determine the policy's default version. Then use GetPolicyVersion to retrieve the policy document.

        For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Retrieves the specified inline policy document that is embedded in the specified IAM user.

        Policies returned by this operation are URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality, and some SDKs do this decoding automatically.

        An IAM user can also have managed policies attached to it. To retrieve a managed policy document that is attached to a user, use GetPolicy to determine the policy's default version. Then use GetPolicyVersion to retrieve the policy document.

        For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        " }, "ListAccessKeys":{ "name":"ListAccessKeys", @@ -1432,7 +1432,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists all managed policies that are attached to the specified IAM group.

        An IAM group can also have inline policies embedded with it. To list the inline policies for a group, use ListGroupPolicies. For information about policies, see Managed policies and inline policies in the IAM User Guide.

        You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified group (or none that match the specified path prefix), the operation returns an empty list.

        " + "documentation":"

        Lists all managed policies that are attached to the specified IAM group.

        An IAM group can also have inline policies embedded with it. To list the inline policies for a group, use ListGroupPolicies. For information about policies, see Managed policies and inline policies in the IAM User Guide.

        You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified group (or none that match the specified path prefix), the operation returns an empty list.

        " }, "ListAttachedRolePolicies":{ "name":"ListAttachedRolePolicies", @@ -1450,7 +1450,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists all managed policies that are attached to the specified IAM role.

        An IAM role can also have inline policies embedded with it. To list the inline policies for a role, use ListRolePolicies. For information about policies, see Managed policies and inline policies in the IAM User Guide.

        You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified role (or none that match the specified path prefix), the operation returns an empty list.

        " + "documentation":"

        Lists all managed policies that are attached to the specified IAM role.

        An IAM role can also have inline policies embedded with it. To list the inline policies for a role, use ListRolePolicies. For information about policies, see Managed policies and inline policies in the IAM User Guide.

        You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified role (or none that match the specified path prefix), the operation returns an empty list.

        " }, "ListAttachedUserPolicies":{ "name":"ListAttachedUserPolicies", @@ -1468,7 +1468,7 @@ {"shape":"InvalidInputException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists all managed policies that are attached to the specified IAM user.

        An IAM user can also have inline policies embedded with it. To list the inline policies for a user, use ListUserPolicies. For information about policies, see Managed policies and inline policies in the IAM User Guide.

        You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified group (or none that match the specified path prefix), the operation returns an empty list.

        " + "documentation":"

        Lists all managed policies that are attached to the specified IAM user.

        An IAM user can also have inline policies embedded with it. To list the inline policies for a user, use ListUserPolicies. For information about policies, see Managed policies and inline policies in the IAM User Guide.

        You can paginate the results using the MaxItems and Marker parameters. You can use the PathPrefix parameter to limit the list of policies to only those matching the specified path prefix. If there are no policies attached to the specified group (or none that match the specified path prefix), the operation returns an empty list.

        " }, "ListEntitiesForPolicy":{ "name":"ListEntitiesForPolicy", @@ -1503,7 +1503,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists the names of the inline policies that are embedded in the specified IAM group.

        An IAM group can also have managed policies attached to it. To list the managed policies that are attached to a group, use ListAttachedGroupPolicies. For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified group, the operation returns an empty list.

        " + "documentation":"

        Lists the names of the inline policies that are embedded in the specified IAM group.

        An IAM group can also have managed policies attached to it. To list the managed policies that are attached to a group, use ListAttachedGroupPolicies. For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified group, the operation returns an empty list.

        " }, "ListGroups":{ "name":"ListGroups", @@ -1569,7 +1569,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists the instance profiles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about instance profiles, see Using instance profiles in the IAM User Guide.

        IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for an instance profile, see GetInstanceProfile.

        You can paginate the results using the MaxItems and Marker parameters.

        " + "documentation":"

        Lists the instance profiles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about instance profiles, see Using instance profiles in the IAM User Guide.

        IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for an instance profile, see GetInstanceProfile.

        You can paginate the results using the MaxItems and Marker parameters.

        " }, "ListInstanceProfilesForRole":{ "name":"ListInstanceProfilesForRole", @@ -1655,7 +1655,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists information about the IAM OpenID Connect (OIDC) provider resource objects defined in the Amazon Web Services account.

        IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for an OIDC provider, see GetOpenIDConnectProvider.

        " + "documentation":"

        Lists information about the IAM OpenID Connect (OIDC) provider resource objects defined in the Amazon Web Services account.

        IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for an OIDC provider, see GetOpenIDConnectProvider.

        " }, "ListOrganizationsFeatures":{ "name":"ListOrganizationsFeatures", @@ -1690,7 +1690,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists all the managed policies that are available in your Amazon Web Services account, including your own customer-defined managed policies and all Amazon Web Services managed policies.

        You can filter the list of policies that is returned using the optional OnlyAttached, Scope, and PathPrefix parameters. For example, to list only the customer managed policies in your Amazon Web Services account, set Scope to Local. To list only Amazon Web Services managed policies, set Scope to AWS.

        You can paginate the results using the MaxItems and Marker parameters.

        For more information about managed policies, see Managed policies and inline policies in the IAM User Guide.

        IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a customer manged policy, see GetPolicy.

        " + "documentation":"

        Lists all the managed policies that are available in your Amazon Web Services account, including your own customer-defined managed policies and all Amazon Web Services managed policies.

        You can filter the list of policies that is returned using the optional OnlyAttached, Scope, and PathPrefix parameters. For example, to list only the customer managed policies in your Amazon Web Services account, set Scope to Local. To list only Amazon Web Services managed policies, set Scope to AWS.

        You can paginate the results using the MaxItems and Marker parameters.

        For more information about managed policies, see Managed policies and inline policies in the IAM User Guide.

        IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a customer manged policy, see GetPolicy.

        " }, "ListPoliciesGrantingServiceAccess":{ "name":"ListPoliciesGrantingServiceAccess", @@ -1707,7 +1707,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"InvalidInputException"} ], - "documentation":"

        Retrieves a list of policies that the IAM identity (user, group, or role) can use to access each specified service.

        This operation does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, Organizations policies, IAM permissions boundaries, and STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating policies in the IAM User Guide.

        The list of policies returned by the operation depends on the ARN of the identity that you provide.

        • User – The list of policies includes the managed and inline policies that are attached to the user directly. The list also includes any additional managed and inline policies that are attached to the group to which the user belongs.

        • Group – The list of policies includes only the managed and inline policies that are attached to the group directly. Policies that are attached to the group’s user are not included.

        • Role – The list of policies includes only the managed and inline policies that are attached to the role.

        For each managed policy, this operation returns the ARN and policy name. For each inline policy, it returns the policy name and the entity to which it is attached. Inline policies do not have an ARN. For more information about these policy types, see Managed policies and inline policies in the IAM User Guide.

        Policies that are attached to users and roles as permissions boundaries are not returned. To view which managed policy is currently used to set the permissions boundary for a user or role, use the GetUser or GetRole operations.

        " + "documentation":"

        Retrieves a list of policies that the IAM identity (user, group, or role) can use to access each specified service.

        This operation does not use other policy types when determining whether a resource could access a service. These other policy types include resource-based policies, access control lists, Organizations policies, IAM permissions boundaries, and STS assume role policies. It only applies permissions policy logic. For more about the evaluation of policy types, see Evaluating policies in the IAM User Guide.

        The list of policies returned by the operation depends on the ARN of the identity that you provide.

        • User – The list of policies includes the managed and inline policies that are attached to the user directly. The list also includes any additional managed and inline policies that are attached to the group to which the user belongs.

        • Group – The list of policies includes only the managed and inline policies that are attached to the group directly. Policies that are attached to the group’s user are not included.

        • Role – The list of policies includes only the managed and inline policies that are attached to the role.

        For each managed policy, this operation returns the ARN and policy name. For each inline policy, it returns the policy name and the entity to which it is attached. Inline policies do not have an ARN. For more information about these policy types, see Managed policies and inline policies in the IAM User Guide.

        Policies that are attached to users and roles as permissions boundaries are not returned. To view which managed policy is currently used to set the permissions boundary for a user or role, use the GetUser or GetRole operations.

        " }, "ListPolicyTags":{ "name":"ListPolicyTags", @@ -1760,7 +1760,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists the names of the inline policies that are embedded in the specified IAM role.

        An IAM role can also have managed policies attached to it. To list the managed policies that are attached to a role, use ListAttachedRolePolicies. For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified role, the operation returns an empty list.

        " + "documentation":"

        Lists the names of the inline policies that are embedded in the specified IAM role.

        An IAM role can also have managed policies attached to it. To list the managed policies that are attached to a role, use ListAttachedRolePolicies. For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified role, the operation returns an empty list.

        " }, "ListRoleTags":{ "name":"ListRoleTags", @@ -1793,7 +1793,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists the IAM roles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about roles, see IAM roles in the IAM User Guide.

        IAM resource-listing operations return a subset of the available attributes for the resource. This operation does not return the following attributes, even though they are an attribute of the returned object:

        • PermissionsBoundary

        • RoleLastUsed

        • Tags

        To view all of the information for a role, see GetRole.

        You can paginate the results using the MaxItems and Marker parameters.

        " + "documentation":"

        Lists the IAM roles that have the specified path prefix. If there are none, the operation returns an empty list. For more information about roles, see IAM roles in the IAM User Guide.

        IAM resource-listing operations return a subset of the available attributes for the resource. This operation does not return the following attributes, even though they are an attribute of the returned object:

        • PermissionsBoundary

        • RoleLastUsed

        • Tags

        To view all of the information for a role, see GetRole.

        You can paginate the results using the MaxItems and Marker parameters.

        " }, "ListSAMLProviderTags":{ "name":"ListSAMLProviderTags", @@ -1827,7 +1827,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists the SAML provider resource objects defined in IAM in the account. IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a SAML provider, see GetSAMLProvider.

        This operation requires Signature Version 4.

        " + "documentation":"

        Lists the SAML provider resource objects defined in IAM in the account. IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a SAML provider, see GetSAMLProvider.

        This operation requires Signature Version 4.

        " }, "ListSSHPublicKeys":{ "name":"ListSSHPublicKeys", @@ -1876,7 +1876,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists the server certificates stored in IAM that have the specified path prefix. If none exist, the operation returns an empty list.

        You can paginate the results using the MaxItems and Marker parameters.

        For more information about working with server certificates, see Working with server certificates in the IAM User Guide. This topic also includes a list of Amazon Web Services services that can use the server certificates that you manage with IAM.

        IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a servercertificate, see GetServerCertificate.

        " + "documentation":"

        Lists the server certificates stored in IAM that have the specified path prefix. If none exist, the operation returns an empty list.

        You can paginate the results using the MaxItems and Marker parameters.

        For more information about working with server certificates, see Working with server certificates in the IAM User Guide. This topic also includes a list of Amazon Web Services services that can use the server certificates that you manage with IAM.

        IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view all of the information for a servercertificate, see GetServerCertificate.

        " }, "ListServiceSpecificCredentials":{ "name":"ListServiceSpecificCredentials", @@ -1927,7 +1927,7 @@ {"shape":"NoSuchEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists the names of the inline policies embedded in the specified IAM user.

        An IAM user can also have managed policies attached to it. To list the managed policies that are attached to a user, use ListAttachedUserPolicies. For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified user, the operation returns an empty list.

        " + "documentation":"

        Lists the names of the inline policies embedded in the specified IAM user.

        An IAM user can also have managed policies attached to it. To list the managed policies that are attached to a user, use ListAttachedUserPolicies. For more information about policies, see Managed policies and inline policies in the IAM User Guide.

        You can paginate the results using the MaxItems and Marker parameters. If there are no inline policies embedded with the specified user, the operation returns an empty list.

        " }, "ListUserTags":{ "name":"ListUserTags", @@ -1960,7 +1960,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

        Lists the IAM users that have the specified path prefix. If no path prefix is specified, the operation returns all users in the Amazon Web Services account. If there are none, the operation returns an empty list.

        IAM resource-listing operations return a subset of the available attributes for the resource. This operation does not return the following attributes, even though they are an attribute of the returned object:

        • PermissionsBoundary

        • Tags

        To view all of the information for a user, see GetUser.

        You can paginate the results using the MaxItems and Marker parameters.

        " + "documentation":"

        Lists the IAM users that have the specified path prefix. If no path prefix is specified, the operation returns all users in the Amazon Web Services account. If there are none, the operation returns an empty list.

        IAM resource-listing operations return a subset of the available attributes for the resource. This operation does not return the following attributes, even though they are an attribute of the returned object:

        • PermissionsBoundary

        • Tags

        To view all of the information for a user, see GetUser.

        You can paginate the results using the MaxItems and Marker parameters.

        " }, "ListVirtualMFADevices":{ "name":"ListVirtualMFADevices", @@ -1973,7 +1973,7 @@ "shape":"ListVirtualMFADevicesResponse", "resultWrapper":"ListVirtualMFADevicesResult" }, - "documentation":"

        Lists the virtual MFA devices defined in the Amazon Web Services account by assignment status. If you do not specify an assignment status, the operation returns a list of all virtual MFA devices. Assignment status can be Assigned, Unassigned, or Any.

        IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view tag information for a virtual MFA device, see ListMFADeviceTags.

        You can paginate the results using the MaxItems and Marker parameters.

        " + "documentation":"

        Lists the virtual MFA devices defined in the Amazon Web Services account by assignment status. If you do not specify an assignment status, the operation returns a list of all virtual MFA devices. Assignment status can be Assigned, Unassigned, or Any.

        IAM resource-listing operations return a subset of the available attributes for the resource. For example, this operation does not return tags, even though they are an attribute of the returned object. To view tag information for a virtual MFA device, see ListMFADeviceTags.

        You can paginate the results using the MaxItems and Marker parameters.

        " }, "PutGroupPolicy":{ "name":"PutGroupPolicy", @@ -2140,7 +2140,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Sets the specified version of the specified policy as the policy's default (operative) version.

        This operation affects all users, groups, and roles that the policy is attached to. To list the users, groups, and roles that the policy is attached to, use ListEntitiesForPolicy.

        For information about managed policies, see Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Sets the specified version of the specified policy as the policy's default (operative) version.

        This operation affects all users, groups, and roles that the policy is attached to. To list the users, groups, and roles that the policy is attached to, use ListEntitiesForPolicy.

        For information about managed policies, see Managed policies and inline policies in the IAM User Guide.

        " }, "SetSecurityTokenServicePreferences":{ "name":"SetSecurityTokenServicePreferences", @@ -2152,7 +2152,7 @@ "errors":[ {"shape":"ServiceFailureException"} ], - "documentation":"

        Sets the specified version of the global endpoint token as the token version used for the Amazon Web Services account.

        By default, Security Token Service (STS) is available as a global service, and all STS requests go to a single endpoint at https://sts.amazonaws.com. Amazon Web Services recommends using Regional STS endpoints to reduce latency, build in redundancy, and increase session token availability. For information about Regional endpoints for STS, see Security Token Service endpoints and quotas in the Amazon Web Services General Reference.

        If you make an STS call to the global endpoint, the resulting session tokens might be valid in some Regions but not others. It depends on the version that is set in this operation. Version 1 tokens are valid only in Amazon Web Services Regions that are available by default. These tokens do not work in manually enabled Regions, such as Asia Pacific (Hong Kong). Version 2 tokens are valid in all Regions. However, version 2 tokens are longer and might affect systems where you temporarily store tokens. For information, see Activating and deactivating STS in an Amazon Web Services Region in the IAM User Guide.

        To view the current session token version, see the GlobalEndpointTokenVersion entry in the response of the GetAccountSummary operation.

        " + "documentation":"

        Sets the specified version of the global endpoint token as the token version used for the Amazon Web Services account.

        By default, Security Token Service (STS) is available as a global service, and all STS requests go to a single endpoint at https://sts.amazonaws.com. Amazon Web Services recommends using Regional STS endpoints to reduce latency, build in redundancy, and increase session token availability. For information about Regional endpoints for STS, see Security Token Service endpoints and quotas in the Amazon Web Services General Reference.

        If you make an STS call to the global endpoint, the resulting session tokens might be valid in some Regions but not others. It depends on the version that is set in this operation. Version 1 tokens are valid only in Amazon Web Services Regions that are available by default. These tokens do not work in manually enabled Regions, such as Asia Pacific (Hong Kong). Version 2 tokens are valid in all Regions. However, version 2 tokens are longer and might affect systems where you temporarily store tokens. For information, see Activating and deactivating STS in an Amazon Web Services Region in the IAM User Guide.

        To view the current session token version, see the GlobalEndpointTokenVersion entry in the response of the GetAccountSummary operation.

        " }, "SimulateCustomPolicy":{ "name":"SimulateCustomPolicy", @@ -2169,7 +2169,7 @@ {"shape":"InvalidInputException"}, {"shape":"PolicyEvaluationException"} ], - "documentation":"

        Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API operations and Amazon Web Services resources to determine the policies' effective permissions. The policies are provided as strings.

        The simulation does not perform the API operations; it only checks the authorization to determine if the simulated policies allow or deny the operations. You can simulate resources that don't exist in your account.

        If you want to simulate existing policies that are attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead.

        Context keys are variables that are maintained by Amazon Web Services and its services and which provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy.

        If the output is long, you can use MaxItems and Marker parameters to paginate the results.

        The IAM policy simulator evaluates statements in the identity-based policy and the inputs that you provide during simulation. The policy simulator results can differ from your live Amazon Web Services environment. We recommend that you check your policies against your live Amazon Web Services environment after testing using the policy simulator to confirm that you have the desired results. For more information about using the policy simulator, see Testing IAM policies with the IAM policy simulator in the IAM User Guide.

        " + "documentation":"

        Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API operations and Amazon Web Services resources to determine the policies' effective permissions. The policies are provided as strings.

        The simulation does not perform the API operations; it only checks the authorization to determine if the simulated policies allow or deny the operations. You can simulate resources that don't exist in your account.

        If you want to simulate existing policies that are attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead.

        Context keys are variables that are maintained by Amazon Web Services and its services and which provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy.

        If the output is long, you can use MaxItems and Marker parameters to paginate the results.

        The IAM policy simulator evaluates statements in the identity-based policy and the inputs that you provide during simulation. The policy simulator results can differ from your live Amazon Web Services environment. We recommend that you check your policies against your live Amazon Web Services environment after testing using the policy simulator to confirm that you have the desired results. For more information about using the policy simulator, see Testing IAM policies with the IAM policy simulator in the IAM User Guide.

        " }, "SimulatePrincipalPolicy":{ "name":"SimulatePrincipalPolicy", @@ -2187,7 +2187,7 @@ {"shape":"InvalidInputException"}, {"shape":"PolicyEvaluationException"} ], - "documentation":"

        Simulate how a set of IAM policies attached to an IAM entity works with a list of API operations and Amazon Web Services resources to determine the policies' effective permissions. The entity can be an IAM user, group, or role. If you specify a user, then the simulation also includes all of the policies that are attached to groups that the user belongs to. You can simulate resources that don't exist in your account.

        You can optionally include a list of one or more additional policies specified as strings to include in the simulation. If you want to simulate only policies specified as strings, use SimulateCustomPolicy instead.

        You can also optionally include one resource-based policy to be evaluated with each of the resources included in the simulation for IAM users only.

        The simulation does not perform the API operations; it only checks the authorization to determine if the simulated policies allow or deny the operations.

        Note: This operation discloses information about the permissions granted to other users. If you do not want users to see other user's permissions, then consider allowing them to use SimulateCustomPolicy instead.

        Context keys are variables maintained by Amazon Web Services and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy.

        If the output is long, you can use the MaxItems and Marker parameters to paginate the results.

        The IAM policy simulator evaluates statements in the identity-based policy and the inputs that you provide during simulation. The policy simulator results can differ from your live Amazon Web Services environment. We recommend that you check your policies against your live Amazon Web Services environment after testing using the policy simulator to confirm that you have the desired results. For more information about using the policy simulator, see Testing IAM policies with the IAM policy simulator in the IAM User Guide.

        " + "documentation":"

        Simulate how a set of IAM policies attached to an IAM entity works with a list of API operations and Amazon Web Services resources to determine the policies' effective permissions. The entity can be an IAM user, group, or role. If you specify a user, then the simulation also includes all of the policies that are attached to groups that the user belongs to. You can simulate resources that don't exist in your account.

        You can optionally include a list of one or more additional policies specified as strings to include in the simulation. If you want to simulate only policies specified as strings, use SimulateCustomPolicy instead.

        You can also optionally include one resource-based policy to be evaluated with each of the resources included in the simulation for IAM users only.

        The simulation does not perform the API operations; it only checks the authorization to determine if the simulated policies allow or deny the operations.

        Note: This operation discloses information about the permissions granted to other users. If you do not want users to see other user's permissions, then consider allowing them to use SimulateCustomPolicy instead.

        Context keys are variables maintained by Amazon Web Services and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy.

        If the output is long, you can use the MaxItems and Marker parameters to paginate the results.

        The IAM policy simulator evaluates statements in the identity-based policy and the inputs that you provide during simulation. The policy simulator results can differ from your live Amazon Web Services environment. We recommend that you check your policies against your live Amazon Web Services environment after testing using the policy simulator to confirm that you have the desired results. For more information about using the policy simulator, see Testing IAM policies with the IAM policy simulator in the IAM User Guide.

        " }, "TagInstanceProfile":{ "name":"TagInstanceProfile", @@ -2445,7 +2445,8 @@ "errors":[ {"shape":"NoSuchEntityException"}, {"shape":"LimitExceededException"}, - {"shape":"ServiceFailureException"} + {"shape":"ServiceFailureException"}, + {"shape":"InvalidInputException"} ], "documentation":"

        Changes the status of the specified access key from Active to Inactive, or vice versa. This operation can be used to disable a user's key as part of a key rotation workflow.

        If the UserName is not specified, the user name is determined implicitly based on the Amazon Web Services access key ID used to sign the request. If a temporary access key is used, then UserName is required. If a long-term key is assigned to the user, then UserName is not required. This operation works for access keys under the Amazon Web Services account. Consequently, you can use this operation to manage Amazon Web Services account root user credentials even if the Amazon Web Services account has no associated users.

        For information about rotating keys, see Managing keys and certificates in the IAM User Guide.

        " }, @@ -2509,7 +2510,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Changes the password for the specified IAM user. You can use the CLI, the Amazon Web Services API, or the Users page in the IAM console to change the password for any IAM user. Use ChangePassword to change your own password in the My Security Credentials page in the Amazon Web Services Management Console.

        For more information about modifying passwords, see Managing passwords in the IAM User Guide.

        " + "documentation":"

        Changes the password for the specified IAM user. You can use the CLI, the Amazon Web Services API, or the Users page in the IAM console to change the password for any IAM user. Use ChangePassword to change your own password in the My Security Credentials page in the Amazon Web Services Management Console.

        For more information about modifying passwords, see Managing passwords in the IAM User Guide.

        " }, "UpdateOpenIDConnectProviderThumbprint":{ "name":"UpdateOpenIDConnectProviderThumbprint", @@ -2559,7 +2560,7 @@ {"shape":"UnmodifiableEntityException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

        Use UpdateRole instead.

        Modifies only the description of a role. This operation performs the same function as the Description parameter in the UpdateRole operation.

        " + "documentation":"

        Use UpdateRole instead.

        Modifies only the description of a role. This operation performs the same function as the Description parameter in the UpdateRole operation.

        " }, "UpdateSAMLProvider":{ "name":"UpdateSAMLProvider", @@ -2588,7 +2589,8 @@ }, "input":{"shape":"UpdateSSHPublicKeyRequest"}, "errors":[ - {"shape":"NoSuchEntityException"} + {"shape":"NoSuchEntityException"}, + {"shape":"InvalidInputException"} ], "documentation":"

        Sets the status of an IAM user's SSH public key to active or inactive. SSH public keys that are inactive cannot be used for authentication. This operation can be used to disable a user's SSH public key as part of a key rotation work flow.

        The SSH public key affected by this operation is used only for authenticating the associated IAM user to an CodeCommit repository. For more information about using SSH keys to authenticate to an CodeCommit repository, see Set up CodeCommit for SSH connections in the CodeCommit User Guide.

        " }, @@ -2629,7 +2631,8 @@ "errors":[ {"shape":"NoSuchEntityException"}, {"shape":"LimitExceededException"}, - {"shape":"ServiceFailureException"} + {"shape":"ServiceFailureException"}, + {"shape":"InvalidInputException"} ], "documentation":"

        Changes the status of the specified user signing certificate from active to disabled, or vice versa. This operation can be used to disable an IAM user's signing certificate as part of a certificate rotation work flow.

        If the UserName field is not specified, the user name is determined implicitly based on the Amazon Web Services access key ID used to sign the request. This operation works for access keys under the Amazon Web Services account. Consequently, you can use this operation to manage Amazon Web Services account root user credentials even if the Amazon Web Services account has no associated users.

        " }, @@ -2756,7 +2759,7 @@ "documentation":"

        The number of accounts with authenticated principals (root user, IAM users, and IAM roles) that attempted to access the service in the tracking period.

        " } }, - "documentation":"

        An object that contains details about when a principal in the reported Organizations entity last attempted to access an Amazon Web Services service. A principal can be an IAM user, an IAM role, or the Amazon Web Services account root user within the reported Organizations entity.

        This data type is a response element in the GetOrganizationsAccessReport operation.

        " + "documentation":"

        An object that contains details about when a principal in the reported Organizations entity last attempted to access an Amazon Web Services service. A principal can be an IAM user, an IAM role, or the Amazon Web Services account root user within the reported Organizations entity.

        This data type is a response element in the GetOrganizationsAccessReport operation.

        " }, "AccessDetails":{ "type":"list", @@ -2792,7 +2795,7 @@ "documentation":"

        The date when the access key was created.

        " } }, - "documentation":"

        Contains information about an Amazon Web Services access key.

        This data type is used as a response element in the CreateAccessKey and ListAccessKeys operations.

        The SecretAccessKey value is returned only in response to CreateAccessKey. You can get a secret access key only when you first create an access key; you cannot recover the secret access key later. If you lose a secret access key, you must create a new access key.

        " + "documentation":"

        Contains information about an Amazon Web Services access key.

        This data type is used as a response element in the CreateAccessKey and ListAccessKeys operations.

        The SecretAccessKey value is returned only in response to CreateAccessKey. You can get a secret access key only when you first create an access key; you cannot recover the secret access key later. If you lose a secret access key, you must create a new access key.

        " }, "AccessKeyLastUsed":{ "type":"structure", @@ -2814,7 +2817,7 @@ "documentation":"

        The Amazon Web Services Region where this access key was most recently used. The value for this field is \"N/A\" in the following situations:

        • The user does not have an access key.

        • An access key exists but has not been used since IAM began tracking this information.

        • There is no sign-in data associated with the user.

        For more information about Amazon Web Services Regions, see Regions and endpoints in the Amazon Web Services General Reference.

        " } }, - "documentation":"

        Contains information about the last time an Amazon Web Services access key was used since IAM began tracking this information on April 22, 2015.

        This data type is used as a response element in the GetAccessKeyLastUsed operation.

        " + "documentation":"

        Contains information about the last time an Amazon Web Services access key was used since IAM began tracking this information on April 22, 2015.

        This data type is used as a response element in the GetAccessKeyLastUsed operation.

        " }, "AccessKeyMetadata":{ "type":"structure", @@ -2836,12 +2839,11 @@ "documentation":"

        The date when the access key was created.

        " } }, - "documentation":"

        Contains information about an Amazon Web Services access key, without its secret key.

        This data type is used as a response element in the ListAccessKeys operation.

        " + "documentation":"

        Contains information about an Amazon Web Services access key, without its secret key.

        This data type is used as a response element in the ListAccessKeys operation.

        " }, "AccountNotManagementOrDelegatedAdministratorException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The request was rejected because the account making the request is not the management account or delegated administrator account for centralized root access.

        ", "exception":true }, @@ -2863,7 +2865,7 @@ "members":{ "OpenIDConnectProviderArn":{ "shape":"arnType", - "documentation":"

        The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider resource to add the client ID to. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders operation.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider resource to add the client ID to. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders operation.

        " }, "ClientID":{ "shape":"clientIDType", @@ -2983,7 +2985,7 @@ }, "PolicyArn":{"shape":"arnType"} }, - "documentation":"

        Contains information about an attached policy.

        An attached policy is a managed policy that has been attached to a user, group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails operations.

        For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Contains information about an attached policy.

        An attached policy is a managed policy that has been attached to a user, group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails operations.

        For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

        " }, "BootstrapDatum":{ "type":"blob", @@ -2991,8 +2993,7 @@ }, "CallerIsNotManagementAccountException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The request was rejected because the account making the request is not the management account for the organization.

        ", "exception":true }, @@ -3061,7 +3062,7 @@ "documentation":"

        The data type of the value (or values) specified in the ContextKeyValues parameter.

        " } }, - "documentation":"

        Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.

        This data type is used as an input parameter to SimulateCustomPolicy and SimulatePrincipalPolicy.

        " + "documentation":"

        Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.

        This data type is used as an input parameter to SimulateCustomPolicy and SimulatePrincipalPolicy.

        " }, "ContextEntryListType":{ "type":"list", @@ -3116,7 +3117,7 @@ "documentation":"

        A structure with details about the access key.

        " } }, - "documentation":"

        Contains the response to a successful CreateAccessKey request.

        " + "documentation":"

        Contains the response to a successful CreateAccessKey request.

        " }, "CreateAccountAliasRequest":{ "type":"structure", @@ -3151,7 +3152,7 @@ "documentation":"

        A structure containing details about the new group.

        " } }, - "documentation":"

        Contains the response to a successful CreateGroup request.

        " + "documentation":"

        Contains the response to a successful CreateGroup request.

        " }, "CreateInstanceProfileRequest":{ "type":"structure", @@ -3180,7 +3181,7 @@ "documentation":"

        A structure containing details about the new instance profile.

        " } }, - "documentation":"

        Contains the response to a successful CreateInstanceProfile request.

        " + "documentation":"

        Contains the response to a successful CreateInstanceProfile request.

        " }, "CreateLoginProfileRequest":{ "type":"structure", @@ -3208,7 +3209,7 @@ "documentation":"

        A structure containing the user name and password create date.

        " } }, - "documentation":"

        Contains the response to a successful CreateLoginProfile request.

        " + "documentation":"

        Contains the response to a successful CreateLoginProfile request.

        " }, "CreateOpenIDConnectProviderRequest":{ "type":"structure", @@ -3237,14 +3238,14 @@ "members":{ "OpenIDConnectProviderArn":{ "shape":"arnType", - "documentation":"

        The Amazon Resource Name (ARN) of the new IAM OpenID Connect provider that is created. For more information, see OpenIDConnectProviderListEntry.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the new IAM OpenID Connect provider that is created. For more information, see OpenIDConnectProviderListEntry.

        " }, "Tags":{ "shape":"tagListType", "documentation":"

        A list of tags that are attached to the new IAM OIDC provider. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

        " } }, - "documentation":"

        Contains the response to a successful CreateOpenIDConnectProvider request.

        " + "documentation":"

        Contains the response to a successful CreateOpenIDConnectProvider request.

        " }, "CreatePolicyRequest":{ "type":"structure", @@ -3283,7 +3284,7 @@ "documentation":"

        A structure containing details about the new policy.

        " } }, - "documentation":"

        Contains the response to a successful CreatePolicy request.

        " + "documentation":"

        Contains the response to a successful CreatePolicy request.

        " }, "CreatePolicyVersionRequest":{ "type":"structure", @@ -3314,7 +3315,7 @@ "documentation":"

        A structure containing details about the new policy version.

        " } }, - "documentation":"

        Contains the response to a successful CreatePolicyVersion request.

        " + "documentation":"

        Contains the response to a successful CreatePolicyVersion request.

        " }, "CreateRoleRequest":{ "type":"structure", @@ -3362,7 +3363,7 @@ "documentation":"

        A structure containing details about the new role.

        " } }, - "documentation":"

        Contains the response to a successful CreateRole request.

        " + "documentation":"

        Contains the response to a successful CreateRole request.

        " }, "CreateSAMLProviderRequest":{ "type":"structure", @@ -3405,7 +3406,7 @@ "documentation":"

        A list of tags that are attached to the new IAM SAML provider. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

        " } }, - "documentation":"

        Contains the response to a successful CreateSAMLProvider request.

        " + "documentation":"

        Contains the response to a successful CreateSAMLProvider request.

        " }, "CreateServiceLinkedRoleRequest":{ "type":"structure", @@ -3430,7 +3431,7 @@ "members":{ "Role":{ "shape":"Role", - "documentation":"

        A Role object that contains details about the newly created role.

        " + "documentation":"

        A Role object that contains details about the newly created role.

        " } } }, @@ -3448,6 +3449,10 @@ "ServiceName":{ "shape":"serviceName", "documentation":"

        The name of the Amazon Web Services service that is to be associated with the credentials. The service you specify here is the only service that can be accessed using these credentials.

        " + }, + "CredentialAgeDays":{ + "shape":"credentialAgeDays", + "documentation":"

        The number of days until the service specific credential expires. This field is only valid for Bedrock API keys and must be a positive integer. When not specified, the credential will not expire.

        " } } }, @@ -3456,7 +3461,7 @@ "members":{ "ServiceSpecificCredential":{ "shape":"ServiceSpecificCredential", - "documentation":"

        A structure that contains information about the newly created service-specific credential.

        This is the only time that the password for this credential set is available. It cannot be recovered later. Instead, you must reset the password with ResetServiceSpecificCredential.

        " + "documentation":"

        A structure that contains information about the newly created service-specific credential.

        This is the only time that the password for this credential set is available. It cannot be recovered later. Instead, you must reset the password with ResetServiceSpecificCredential.

        " } } }, @@ -3490,7 +3495,7 @@ "documentation":"

        A structure with details about the new IAM user.

        " } }, - "documentation":"

        Contains the response to a successful CreateUser request.

        " + "documentation":"

        Contains the response to a successful CreateUser request.

        " }, "CreateVirtualMFADeviceRequest":{ "type":"structure", @@ -3519,14 +3524,14 @@ "documentation":"

        A structure containing details about the new virtual MFA device.

        " } }, - "documentation":"

        Contains the response to a successful CreateVirtualMFADevice request.

        " + "documentation":"

        Contains the response to a successful CreateVirtualMFADevice request.

        " }, "CredentialReportExpiredException":{ "type":"structure", "members":{ "message":{"shape":"credentialReportExpiredExceptionMessage"} }, - "documentation":"

        The request was rejected because the most recent credential report has expired. To generate a new credential report, use GenerateCredentialReport. For more information about credential report expiration, see Getting credential reports in the IAM User Guide.

        ", + "documentation":"

        The request was rejected because the most recent credential report has expired. To generate a new credential report, use GenerateCredentialReport. For more information about credential report expiration, see Getting credential reports in the IAM User Guide.

        ", "error":{ "code":"ReportExpired", "httpStatusCode":410, @@ -3539,7 +3544,7 @@ "members":{ "message":{"shape":"credentialReportNotPresentExceptionMessage"} }, - "documentation":"

        The request was rejected because the credential report does not exist. To generate a credential report, use GenerateCredentialReport.

        ", + "documentation":"

        The request was rejected because the credential report does not exist. To generate a credential report, use GenerateCredentialReport.

        ", "error":{ "code":"ReportNotPresent", "httpStatusCode":410, @@ -3663,7 +3668,7 @@ "members":{ "OpenIDConnectProviderArn":{ "shape":"arnType", - "documentation":"

        The Amazon Resource Name (ARN) of the IAM OpenID Connect provider resource object to delete. You can get a list of OpenID Connect provider resource ARNs by using the ListOpenIDConnectProviders operation.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the IAM OpenID Connect provider resource object to delete. You can get a list of OpenID Connect provider resource ARNs by using the ListOpenIDConnectProviders operation.

        " } } }, @@ -3798,7 +3803,7 @@ }, "ServiceSpecificCredentialId":{ "shape":"serviceSpecificCredentialId", - "documentation":"

        The unique identifier of the service-specific credential. You can get this value by calling ListServiceSpecificCredentials.

        This parameter allows (through its regex pattern) a string of characters that can consist of any upper or lowercased letter or digit.

        " + "documentation":"

        The unique identifier of the service-specific credential. You can get this value by calling ListServiceSpecificCredentials.

        This parameter allows (through its regex pattern) a string of characters that can consist of any upper or lowercased letter or digit.

        " } } }, @@ -3875,7 +3880,7 @@ "documentation":"

        A list of objects that contains details about the service-linked role deletion failure, if that information is returned by the service. If the service-linked role has active sessions or if any resources that were used by the role have not been deleted from the linked service, the role can't be deleted. This parameter includes a list of the resources that are associated with the role and the Region in which the resources are being used.

        " } }, - "documentation":"

        The reason that the service-linked role deletion failed.

        This data type is used as a response element in the GetServiceLinkedRoleDeletionStatus operation.

        " + "documentation":"

        The reason that the service-linked role deletion failed.

        This data type is used as a response element in the GetServiceLinkedRoleDeletionStatus operation.

        " }, "DeletionTaskIdType":{ "type":"string", @@ -3944,8 +3949,7 @@ }, "DisableOrganizationsRootCredentialsManagementRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DisableOrganizationsRootCredentialsManagementResponse":{ "type":"structure", @@ -3962,8 +3966,7 @@ }, "DisableOrganizationsRootSessionsRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DisableOrganizationsRootSessionsResponse":{ "type":"structure", @@ -4033,8 +4036,7 @@ }, "EnableOrganizationsRootCredentialsManagementRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "EnableOrganizationsRootCredentialsManagementResponse":{ "type":"structure", @@ -4051,8 +4053,7 @@ }, "EnableOrganizationsRootSessionsRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "EnableOrganizationsRootSessionsResponse":{ "type":"structure", @@ -4093,7 +4094,7 @@ "documentation":"

        The date and time, in ISO 8601 date-time format, when the authenticated entity last attempted to access Amazon Web Services. Amazon Web Services does not report unauthenticated requests.

        This field is null if no IAM entities attempted to access the service within the tracking period.

        " } }, - "documentation":"

        An object that contains details about when the IAM entities (users or roles) were last used in an attempt to access the specified Amazon Web Services service.

        This data type is a response element in the GetServiceLastAccessedDetailsWithEntities operation.

        " + "documentation":"

        An object that contains details about when the IAM entities (users or roles) were last used in an attempt to access the specified Amazon Web Services service.

        This data type is a response element in the GetServiceLastAccessedDetailsWithEntities operation.

        " }, "EntityInfo":{ "type":"structure", @@ -4122,7 +4123,7 @@ "documentation":"

        The path to the entity (user or role). For more information about paths, see IAM identifiers in the IAM User Guide.

        " } }, - "documentation":"

        Contains details about the specified entity (user or role).

        This data type is an element of the EntityDetails object.

        " + "documentation":"

        Contains details about the specified entity (user or role).

        This data type is an element of the EntityDetails object.

        " }, "EntityTemporarilyUnmodifiableException":{ "type":"structure", @@ -4163,7 +4164,7 @@ "documentation":"

        The error code associated with the operation failure.

        " } }, - "documentation":"

        Contains information about the reason that the operation failed.

        This data type is used as a response element in the GetOrganizationsAccessReport, GetServiceLastAccessedDetails, and GetServiceLastAccessedDetailsWithEntities operations.

        " + "documentation":"

        Contains information about the reason that the operation failed.

        This data type is used as a response element in the GetOrganizationsAccessReport, GetServiceLastAccessedDetails, and GetServiceLastAccessedDetailsWithEntities operations.

        " }, "EvalDecisionDetailsType":{ "type":"map", @@ -4200,7 +4201,7 @@ }, "MissingContextValues":{ "shape":"ContextKeyNamesResultListType", - "documentation":"

        A list of context keys that are required by the included input policies but that were not provided by one of the input parameters. This list is used when the resource in a simulation is \"*\", either explicitly, or when the ResourceArns parameter blank. If you include a list of resources, then any missing context values are instead included under the ResourceSpecificResults section. To discover the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy.

        " + "documentation":"

        A list of context keys that are required by the included input policies but that were not provided by one of the input parameters. This list is used when the resource in a simulation is \"*\", either explicitly, or when the ResourceArns parameter blank. If you include a list of resources, then any missing context values are instead included under the ResourceSpecificResults section. To discover the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy.

        " }, "OrganizationsDecisionDetail":{ "shape":"OrganizationsDecisionDetail", @@ -4219,7 +4220,7 @@ "documentation":"

        The individual results of the simulation of the API operation specified in EvalActionName on each resource.

        " } }, - "documentation":"

        Contains the results of a simulation.

        This data type is used by the return parameter of SimulateCustomPolicy and SimulatePrincipalPolicy .

        " + "documentation":"

        Contains the results of a simulation.

        This data type is used by the return parameter of SimulateCustomPolicy and SimulatePrincipalPolicy .

        " }, "EvaluationResultsListType":{ "type":"list", @@ -4248,7 +4249,7 @@ "documentation":"

        Information about the credential report.

        " } }, - "documentation":"

        Contains the response to a successful GenerateCredentialReport request.

        " + "documentation":"

        Contains the response to a successful GenerateCredentialReport request.

        " }, "GenerateOrganizationsAccessReportRequest":{ "type":"structure", @@ -4269,7 +4270,7 @@ "members":{ "JobId":{ "shape":"jobIDType", - "documentation":"

        The job identifier that you can use in the GetOrganizationsAccessReport operation.

        " + "documentation":"

        The job identifier that you can use in the GetOrganizationsAccessReport operation.

        " } } }, @@ -4292,7 +4293,7 @@ "members":{ "JobId":{ "shape":"jobIDType", - "documentation":"

        The JobId that you can use in the GetServiceLastAccessedDetails or GetServiceLastAccessedDetailsWithEntities operations. The JobId returned by GenerateServiceLastAccessedDetail must be used by the same role within a session, or by the same user when used to call GetServiceLastAccessedDetail.

        " + "documentation":"

        The JobId that you can use in the GetServiceLastAccessedDetails or GetServiceLastAccessedDetailsWithEntities operations. The JobId returned by GenerateServiceLastAccessedDetail must be used by the same role within a session, or by the same user when used to call GetServiceLastAccessedDetail.

        " } } }, @@ -4318,7 +4319,7 @@ "documentation":"

        Contains information about the last time the access key was used.

        " } }, - "documentation":"

        Contains the response to a successful GetAccessKeyLastUsed request. It is also returned as a member of the AccessKeyMetaData structure returned by the ListAccessKeys action.

        " + "documentation":"

        Contains the response to a successful GetAccessKeyLastUsed request. It is also returned as a member of the AccessKeyMetaData structure returned by the ListAccessKeys action.

        " }, "GetAccountAuthorizationDetailsRequest":{ "type":"structure", @@ -4365,7 +4366,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful GetAccountAuthorizationDetails request.

        " + "documentation":"

        Contains the response to a successful GetAccountAuthorizationDetails request.

        " }, "GetAccountPasswordPolicyResponse":{ "type":"structure", @@ -4376,7 +4377,7 @@ "documentation":"

        A structure that contains details about the account's password policy.

        " } }, - "documentation":"

        Contains the response to a successful GetAccountPasswordPolicy request.

        " + "documentation":"

        Contains the response to a successful GetAccountPasswordPolicy request.

        " }, "GetAccountSummaryResponse":{ "type":"structure", @@ -4386,7 +4387,7 @@ "documentation":"

        A set of key–value pairs containing information about IAM entity usage and IAM quotas.

        " } }, - "documentation":"

        Contains the response to a successful GetAccountSummary request.

        " + "documentation":"

        Contains the response to a successful GetAccountSummary request.

        " }, "GetContextKeysForCustomPolicyRequest":{ "type":"structure", @@ -4406,7 +4407,7 @@ "documentation":"

        The list of context keys that are referenced in the input policies.

        " } }, - "documentation":"

        Contains the response to a successful GetContextKeysForPrincipalPolicy or GetContextKeysForCustomPolicy request.

        " + "documentation":"

        Contains the response to a successful GetContextKeysForPrincipalPolicy or GetContextKeysForCustomPolicy request.

        " }, "GetContextKeysForPrincipalPolicyRequest":{ "type":"structure", @@ -4438,7 +4439,7 @@ "documentation":"

        The date and time when the credential report was created, in ISO 8601 date-time format.

        " } }, - "documentation":"

        Contains the response to a successful GetCredentialReport request.

        " + "documentation":"

        Contains the response to a successful GetCredentialReport request.

        " }, "GetGroupPolicyRequest":{ "type":"structure", @@ -4478,7 +4479,7 @@ "documentation":"

        The policy document.

        IAM stores policies in JSON format. However, resources that were created using CloudFormation templates can be formatted in YAML. CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

        " } }, - "documentation":"

        Contains the response to a successful GetGroupPolicy request.

        " + "documentation":"

        Contains the response to a successful GetGroupPolicy request.

        " }, "GetGroupRequest":{ "type":"structure", @@ -4522,7 +4523,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful GetGroup request.

        " + "documentation":"

        Contains the response to a successful GetGroup request.

        " }, "GetInstanceProfileRequest":{ "type":"structure", @@ -4543,7 +4544,7 @@ "documentation":"

        A structure containing details about the instance profile.

        " } }, - "documentation":"

        Contains the response to a successful GetInstanceProfile request.

        " + "documentation":"

        Contains the response to a successful GetInstanceProfile request.

        " }, "GetLoginProfileRequest":{ "type":"structure", @@ -4563,7 +4564,7 @@ "documentation":"

        A structure containing the user name and the profile creation date for the user.

        " } }, - "documentation":"

        Contains the response to a successful GetLoginProfile request.

        " + "documentation":"

        Contains the response to a successful GetLoginProfile request.

        " }, "GetMFADeviceRequest":{ "type":"structure", @@ -4607,7 +4608,7 @@ "members":{ "OpenIDConnectProviderArn":{ "shape":"arnType", - "documentation":"

        The Amazon Resource Name (ARN) of the OIDC provider resource object in IAM to get information for. You can get a list of OIDC provider resource ARNs by using the ListOpenIDConnectProviders operation.

        For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the OIDC provider resource object in IAM to get information for. You can get a list of OIDC provider resource ARNs by using the ListOpenIDConnectProviders operation.

        For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

        " } } }, @@ -4616,15 +4617,15 @@ "members":{ "Url":{ "shape":"OpenIDConnectProviderUrlType", - "documentation":"

        The URL that the IAM OIDC provider resource object is associated with. For more information, see CreateOpenIDConnectProvider.

        " + "documentation":"

        The URL that the IAM OIDC provider resource object is associated with. For more information, see CreateOpenIDConnectProvider.

        " }, "ClientIDList":{ "shape":"clientIDListType", - "documentation":"

        A list of client IDs (also known as audiences) that are associated with the specified IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider.

        " + "documentation":"

        A list of client IDs (also known as audiences) that are associated with the specified IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider.

        " }, "ThumbprintList":{ "shape":"thumbprintListType", - "documentation":"

        A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider.

        " + "documentation":"

        A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider.

        " }, "CreateDate":{ "shape":"dateType", @@ -4635,7 +4636,7 @@ "documentation":"

        A list of tags that are attached to the specified IAM OIDC provider. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

        " } }, - "documentation":"

        Contains the response to a successful GetOpenIDConnectProvider request.

        " + "documentation":"

        Contains the response to a successful GetOpenIDConnectProvider request.

        " }, "GetOrganizationsAccessReportRequest":{ "type":"structure", @@ -4643,7 +4644,7 @@ "members":{ "JobId":{ "shape":"jobIDType", - "documentation":"

        The identifier of the request generated by the GenerateOrganizationsAccessReport operation.

        " + "documentation":"

        The identifier of the request generated by the GenerateOrganizationsAccessReport operation.

        " }, "MaxItems":{ "shape":"maxItemsType", @@ -4719,7 +4720,7 @@ "documentation":"

        A structure containing details about the policy.

        " } }, - "documentation":"

        Contains the response to a successful GetPolicy request.

        " + "documentation":"

        Contains the response to a successful GetPolicy request.

        " }, "GetPolicyVersionRequest":{ "type":"structure", @@ -4746,7 +4747,7 @@ "documentation":"

        A structure containing details about the policy version.

        " } }, - "documentation":"

        Contains the response to a successful GetPolicyVersion request.

        " + "documentation":"

        Contains the response to a successful GetPolicyVersion request.

        " }, "GetRolePolicyRequest":{ "type":"structure", @@ -4786,7 +4787,7 @@ "documentation":"

        The policy document.

        IAM stores policies in JSON format. However, resources that were created using CloudFormation templates can be formatted in YAML. CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

        " } }, - "documentation":"

        Contains the response to a successful GetRolePolicy request.

        " + "documentation":"

        Contains the response to a successful GetRolePolicy request.

        " }, "GetRoleRequest":{ "type":"structure", @@ -4807,7 +4808,7 @@ "documentation":"

        A structure containing details about the IAM role.

        " } }, - "documentation":"

        Contains the response to a successful GetRole request.

        " + "documentation":"

        Contains the response to a successful GetRole request.

        " }, "GetSAMLProviderRequest":{ "type":"structure", @@ -4851,7 +4852,7 @@ "documentation":"

        The private key metadata for the SAML provider.

        " } }, - "documentation":"

        Contains the response to a successful GetSAMLProvider request.

        " + "documentation":"

        Contains the response to a successful GetSAMLProvider request.

        " }, "GetSSHPublicKeyRequest":{ "type":"structure", @@ -4883,7 +4884,7 @@ "documentation":"

        A structure containing details about the SSH public key.

        " } }, - "documentation":"

        Contains the response to a successful GetSSHPublicKey request.

        " + "documentation":"

        Contains the response to a successful GetSSHPublicKey request.

        " }, "GetServerCertificateRequest":{ "type":"structure", @@ -4904,7 +4905,7 @@ "documentation":"

        A structure containing details about the server certificate.

        " } }, - "documentation":"

        Contains the response to a successful GetServerCertificate request.

        " + "documentation":"

        Contains the response to a successful GetServerCertificate request.

        " }, "GetServiceLastAccessedDetailsRequest":{ "type":"structure", @@ -4912,7 +4913,7 @@ "members":{ "JobId":{ "shape":"jobIDType", - "documentation":"

        The ID of the request generated by the GenerateServiceLastAccessedDetails operation. The JobId returned by GenerateServiceLastAccessedDetail must be used by the same role within a session, or by the same user when used to call GetServiceLastAccessedDetail.

        " + "documentation":"

        The ID of the request generated by the GenerateServiceLastAccessedDetails operation. The JobId returned by GenerateServiceLastAccessedDetail must be used by the same role within a session, or by the same user when used to call GetServiceLastAccessedDetail.

        " }, "MaxItems":{ "shape":"maxItemsType", @@ -5037,7 +5038,7 @@ "members":{ "DeletionTaskId":{ "shape":"DeletionTaskIdType", - "documentation":"

        The deletion task identifier. This identifier is returned by the DeleteServiceLinkedRole operation in the format task/aws-service-role/<service-principal-name>/<role-name>/<task-uuid>.

        " + "documentation":"

        The deletion task identifier. This identifier is returned by the DeleteServiceLinkedRole operation in the format task/aws-service-role/<service-principal-name>/<role-name>/<task-uuid>.

        " } } }, @@ -5093,7 +5094,7 @@ "documentation":"

        The policy document.

        IAM stores policies in JSON format. However, resources that were created using CloudFormation templates can be formatted in YAML. CloudFormation always converts a YAML policy to JSON format before submitting it to IAM.

        " } }, - "documentation":"

        Contains the response to a successful GetUserPolicy request.

        " + "documentation":"

        Contains the response to a successful GetUserPolicy request.

        " }, "GetUserRequest":{ "type":"structure", @@ -5113,7 +5114,7 @@ "documentation":"

        A structure containing details about the IAM user.

        Due to a service issue, password last used data does not include password use from May 3, 2018 22:50 PDT to May 23, 2018 14:08 PDT. This affects last sign-in dates shown in the IAM console and password last used dates in the IAM credential report, and returned by this operation. If users signed in during the affected time, the password last used date that is returned is the date the user last signed in before May 3, 2018. For users that signed in after May 23, 2018 14:08 PDT, the returned password last used date is accurate.

        You can use password last used information to identify unused credentials for deletion. For example, you might delete users who did not sign in to Amazon Web Services in the last 90 days. In cases like this, we recommend that you adjust your evaluation window to include dates after May 23, 2018. Alternatively, if your users use access keys to access Amazon Web Services programmatically you can refer to access key last used information because it is accurate for all dates.

        " } }, - "documentation":"

        Contains the response to a successful GetUser request.

        " + "documentation":"

        Contains the response to a successful GetUser request.

        " }, "Group":{ "type":"structure", @@ -5146,7 +5147,7 @@ "documentation":"

        The date and time, in ISO 8601 date-time format, when the group was created.

        " } }, - "documentation":"

        Contains information about an IAM group entity.

        This data type is used as a response element in the following operations:

        " + "documentation":"

        Contains information about an IAM group entity.

        This data type is used as a response element in the following operations:

        " }, "GroupDetail":{ "type":"structure", @@ -5177,7 +5178,7 @@ "documentation":"

        A list of the managed policies attached to the group.

        " } }, - "documentation":"

        Contains information about an IAM group, including all of the group's policies.

        This data type is used as a response element in the GetAccountAuthorizationDetails operation.

        " + "documentation":"

        Contains information about an IAM group, including all of the group's policies.

        This data type is used as a response element in the GetAccountAuthorizationDetails operation.

        " }, "InstanceProfile":{ "type":"structure", @@ -5219,7 +5220,7 @@ "documentation":"

        A list of tags that are attached to the instance profile. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

        " } }, - "documentation":"

        Contains information about an instance profile.

        This data type is used as a response element in the following operations:

        " + "documentation":"

        Contains information about an instance profile.

        This data type is used as a response element in the following operations:

        " }, "InvalidAuthenticationCodeException":{ "type":"structure", @@ -5347,7 +5348,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListAccessKeys request.

        " + "documentation":"

        Contains the response to a successful ListAccessKeys request.

        " }, "ListAccountAliasesRequest":{ "type":"structure", @@ -5379,7 +5380,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListAccountAliases request.

        " + "documentation":"

        Contains the response to a successful ListAccountAliases request.

        " }, "ListAttachedGroupPoliciesRequest":{ "type":"structure", @@ -5419,7 +5420,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListAttachedGroupPolicies request.

        " + "documentation":"

        Contains the response to a successful ListAttachedGroupPolicies request.

        " }, "ListAttachedRolePoliciesRequest":{ "type":"structure", @@ -5459,7 +5460,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListAttachedRolePolicies request.

        " + "documentation":"

        Contains the response to a successful ListAttachedRolePolicies request.

        " }, "ListAttachedUserPoliciesRequest":{ "type":"structure", @@ -5499,7 +5500,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListAttachedUserPolicies request.

        " + "documentation":"

        Contains the response to a successful ListAttachedUserPolicies request.

        " }, "ListEntitiesForPolicyRequest":{ "type":"structure", @@ -5555,7 +5556,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListEntitiesForPolicy request.

        " + "documentation":"

        Contains the response to a successful ListEntitiesForPolicy request.

        " }, "ListGroupPoliciesRequest":{ "type":"structure", @@ -5592,7 +5593,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListGroupPolicies request.

        " + "documentation":"

        Contains the response to a successful ListGroupPolicies request.

        " }, "ListGroupsForUserRequest":{ "type":"structure", @@ -5629,7 +5630,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListGroupsForUser request.

        " + "documentation":"

        Contains the response to a successful ListGroupsForUser request.

        " }, "ListGroupsRequest":{ "type":"structure", @@ -5665,7 +5666,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListGroups request.

        " + "documentation":"

        Contains the response to a successful ListGroups request.

        " }, "ListInstanceProfileTagsRequest":{ "type":"structure", @@ -5738,7 +5739,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListInstanceProfilesForRole request.

        " + "documentation":"

        Contains the response to a successful ListInstanceProfilesForRole request.

        " }, "ListInstanceProfilesRequest":{ "type":"structure", @@ -5774,7 +5775,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListInstanceProfiles request.

        " + "documentation":"

        Contains the response to a successful ListInstanceProfiles request.

        " }, "ListMFADeviceTagsRequest":{ "type":"structure", @@ -5846,7 +5847,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListMFADevices request.

        " + "documentation":"

        Contains the response to a successful ListMFADevices request.

        " }, "ListOpenIDConnectProviderTagsRequest":{ "type":"structure", @@ -5886,8 +5887,7 @@ }, "ListOpenIDConnectProvidersRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "ListOpenIDConnectProvidersResponse":{ "type":"structure", @@ -5897,12 +5897,11 @@ "documentation":"

        The list of IAM OIDC provider resource objects defined in the Amazon Web Services account.

        " } }, - "documentation":"

        Contains the response to a successful ListOpenIDConnectProviders request.

        " + "documentation":"

        Contains the response to a successful ListOpenIDConnectProviders request.

        " }, "ListOrganizationsFeaturesRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "ListOrganizationsFeaturesResponse":{ "type":"structure", @@ -5929,7 +5928,7 @@ "documentation":"

        The PoliciesGrantingServiceAccess object that contains details about the policy.

        " } }, - "documentation":"

        Contains details about the permissions policies that are attached to the specified identity (user, group, or role).

        This data type is used as a response element in the ListPoliciesGrantingServiceAccess operation.

        " + "documentation":"

        Contains details about the permissions policies that are attached to the specified identity (user, group, or role).

        This data type is used as a response element in the ListPoliciesGrantingServiceAccess operation.

        " }, "ListPoliciesGrantingServiceAccessRequest":{ "type":"structure", @@ -6015,7 +6014,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListPolicies request.

        " + "documentation":"

        Contains the response to a successful ListPolicies request.

        " }, "ListPolicyTagsRequest":{ "type":"structure", @@ -6087,7 +6086,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListPolicyVersions request.

        " + "documentation":"

        Contains the response to a successful ListPolicyVersions request.

        " }, "ListRolePoliciesRequest":{ "type":"structure", @@ -6124,7 +6123,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListRolePolicies request.

        " + "documentation":"

        Contains the response to a successful ListRolePolicies request.

        " }, "ListRoleTagsRequest":{ "type":"structure", @@ -6196,7 +6195,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListRoles request.

        " + "documentation":"

        Contains the response to a successful ListRoles request.

        " }, "ListSAMLProviderTagsRequest":{ "type":"structure", @@ -6236,8 +6235,7 @@ }, "ListSAMLProvidersRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "ListSAMLProvidersResponse":{ "type":"structure", @@ -6247,7 +6245,7 @@ "documentation":"

        The list of SAML provider resource objects defined in IAM for this Amazon Web Services account.

        " } }, - "documentation":"

        Contains the response to a successful ListSAMLProviders request.

        " + "documentation":"

        Contains the response to a successful ListSAMLProviders request.

        " }, "ListSSHPublicKeysRequest":{ "type":"structure", @@ -6282,7 +6280,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListSSHPublicKeys request.

        " + "documentation":"

        Contains the response to a successful ListSSHPublicKeys request.

        " }, "ListServerCertificateTagsRequest":{ "type":"structure", @@ -6354,7 +6352,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListServerCertificates request.

        " + "documentation":"

        Contains the response to a successful ListServerCertificates request.

        " }, "ListServiceSpecificCredentialsRequest":{ "type":"structure", @@ -6366,6 +6364,18 @@ "ServiceName":{ "shape":"serviceName", "documentation":"

        Filters the returned results to only those for the specified Amazon Web Services service. If not specified, then Amazon Web Services returns service-specific credentials for all services.

        " + }, + "AllUsers":{ + "shape":"allUsers", + "documentation":"

        A flag indicating whether to list service specific credentials for all users. This parameter cannot be specified together with UserName. When true, returns all credentials associated with the specified service.

        " + }, + "Marker":{ + "shape":"markerType", + "documentation":"

        Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker from the response that you received to indicate where the next call should start.

        " + }, + "MaxItems":{ + "shape":"maxItemsType", + "documentation":"

        Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true.

        " } } }, @@ -6375,6 +6385,14 @@ "ServiceSpecificCredentials":{ "shape":"ServiceSpecificCredentialsListType", "documentation":"

        A list of structures that each contain details about a service-specific credential.

        " + }, + "Marker":{ + "shape":"responseMarkerType", + "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " + }, + "IsTruncated":{ + "shape":"booleanType", + "documentation":"

        A flag that indicates whether there are more items to return. If your results were truncated, you can make a subsequent pagination request using the Marker request parameter to retrieve more items.

        " } } }, @@ -6412,7 +6430,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListSigningCertificates request.

        " + "documentation":"

        Contains the response to a successful ListSigningCertificates request.

        " }, "ListUserPoliciesRequest":{ "type":"structure", @@ -6449,7 +6467,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListUserPolicies request.

        " + "documentation":"

        Contains the response to a successful ListUserPolicies request.

        " }, "ListUserTagsRequest":{ "type":"structure", @@ -6521,7 +6539,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListUsers request.

        " + "documentation":"

        Contains the response to a successful ListUsers request.

        " }, "ListVirtualMFADevicesRequest":{ "type":"structure", @@ -6557,7 +6575,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful ListVirtualMFADevices request.

        " + "documentation":"

        Contains the response to a successful ListVirtualMFADevices request.

        " }, "LoginProfile":{ "type":"structure", @@ -6579,7 +6597,7 @@ "documentation":"

        Specifies whether the user is required to set a new password on next sign-in.

        " } }, - "documentation":"

        Contains the user name and password create date for a user.

        This data type is used as a response element in the CreateLoginProfile and GetLoginProfile operations.

        " + "documentation":"

        Contains the user name and password create date for a user.

        This data type is used as a response element in the CreateLoginProfile and GetLoginProfile operations.

        " }, "MFADevice":{ "type":"structure", @@ -6602,7 +6620,7 @@ "documentation":"

        The date when the MFA device was enabled for the user.

        " } }, - "documentation":"

        Contains information about an MFA device.

        This data type is used as a response element in the ListMFADevices operation.

        " + "documentation":"

        Contains information about an MFA device.

        This data type is used as a response element in the ListMFADevices operation.

        " }, "MalformedCertificateException":{ "type":"structure", @@ -6679,7 +6697,7 @@ "documentation":"

        A list containing information about the versions of the policy.

        " } }, - "documentation":"

        Contains information about a managed policy, including the policy's ARN, versions, and the number of principal entities (users, groups, and roles) that the policy is attached to.

        This data type is used as a response element in the GetAccountAuthorizationDetails operation.

        For more information about managed policies, see Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Contains information about a managed policy, including the policy's ARN, versions, and the number of principal entities (users, groups, and roles) that the policy is attached to.

        This data type is used as a response element in the GetAccountAuthorizationDetails operation.

        For more information about managed policies, see Managed policies and inline policies in the IAM User Guide.

        " }, "ManagedPolicyDetailListType":{ "type":"list", @@ -6736,15 +6754,13 @@ }, "OrganizationNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The request was rejected because no organization is associated with your account.

        ", "exception":true }, "OrganizationNotInAllFeaturesModeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The request was rejected because your organization does not have All features enabled. For more information, see Available feature sets in the Organizations User Guide.

        ", "exception":true }, @@ -6802,7 +6818,7 @@ "documentation":"

        Specifies whether IAM users are prevented from setting a new password via the Amazon Web Services Management Console after their password has expired. The IAM user cannot access the console until an administrator resets the password. IAM users with iam:ChangePassword permission and active access keys can reset their own expired console password using the CLI or API.

        " } }, - "documentation":"

        Contains information about the account password policy.

        This data type is used as a response element in the GetAccountPasswordPolicy operation.

        " + "documentation":"

        Contains information about the account password policy.

        This data type is used as a response element in the GetAccountPasswordPolicy operation.

        " }, "PasswordPolicyViolationException":{ "type":"structure", @@ -6865,7 +6881,7 @@ }, "Description":{ "shape":"policyDescriptionType", - "documentation":"

        A friendly description of the policy.

        This element is included in the response to the GetPolicy operation. It is not included in the response to the ListPolicies operation.

        " + "documentation":"

        A friendly description of the policy.

        This element is included in the response to the GetPolicy operation. It is not included in the response to the ListPolicies operation.

        " }, "CreateDate":{ "shape":"dateType", @@ -6880,7 +6896,7 @@ "documentation":"

        A list of tags that are attached to the instance profile. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

        " } }, - "documentation":"

        Contains information about a managed policy.

        This data type is used as a response element in the CreatePolicy, GetPolicy, and ListPolicies operations.

        For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Contains information about a managed policy.

        This data type is used as a response element in the CreatePolicy, GetPolicy, and ListPolicies operations.

        For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

        " }, "PolicyDetail":{ "type":"structure", @@ -6894,7 +6910,7 @@ "documentation":"

        The policy document.

        " } }, - "documentation":"

        Contains information about an IAM policy, including the policy document.

        This data type is used as a response element in the GetAccountAuthorizationDetails operation.

        " + "documentation":"

        Contains information about an IAM policy, including the policy document.

        This data type is used as a response element in the GetAccountAuthorizationDetails operation.

        " }, "PolicyEvaluationDecisionType":{ "type":"string", @@ -6941,7 +6957,7 @@ "documentation":"

        The name of the entity (user or role) to which the inline policy is attached.

        This field is null for managed policies. For more information about these policy types, see Managed policies and inline policies in the IAM User Guide.

        " } }, - "documentation":"

        Contains details about the permissions policies that are attached to the specified identity (user, group, or role).

        This data type is an element of the ListPoliciesGrantingServiceAccessEntry object.

        " + "documentation":"

        Contains details about the permissions policies that are attached to the specified identity (user, group, or role).

        This data type is an element of the ListPoliciesGrantingServiceAccessEntry object.

        " }, "PolicyGroup":{ "type":"structure", @@ -6955,7 +6971,7 @@ "documentation":"

        The stable and unique string identifying the group. For more information about IDs, see IAM identifiers in the IAM User Guide.

        " } }, - "documentation":"

        Contains information about a group that a managed policy is attached to.

        This data type is used as a response element in the ListEntitiesForPolicy operation.

        For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Contains information about a group that a managed policy is attached to.

        This data type is used as a response element in the ListEntitiesForPolicy operation.

        For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

        " }, "PolicyGroupListType":{ "type":"list", @@ -6987,7 +7003,7 @@ "documentation":"

        The stable and unique string identifying the role. For more information about IDs, see IAM identifiers in the IAM User Guide.

        " } }, - "documentation":"

        Contains information about a role that a managed policy is attached to.

        This data type is used as a response element in the ListEntitiesForPolicy operation.

        For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Contains information about a role that a managed policy is attached to.

        This data type is used as a response element in the ListEntitiesForPolicy operation.

        For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

        " }, "PolicyRoleListType":{ "type":"list", @@ -7025,7 +7041,7 @@ "documentation":"

        The stable and unique string identifying the user. For more information about IDs, see IAM identifiers in the IAM User Guide.

        " } }, - "documentation":"

        Contains information about a user that a managed policy is attached to.

        This data type is used as a response element in the ListEntitiesForPolicy operation.

        For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Contains information about a user that a managed policy is attached to.

        This data type is used as a response element in the ListEntitiesForPolicy operation.

        For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

        " }, "PolicyUserListType":{ "type":"list", @@ -7036,7 +7052,7 @@ "members":{ "Document":{ "shape":"policyDocumentType", - "documentation":"

        The policy document.

        The policy document is returned in the response to the GetPolicyVersion and GetAccountAuthorizationDetails operations. It is not returned in the response to the CreatePolicyVersion or ListPolicyVersions operations.

        The policy document returned in this structure is URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

        " + "documentation":"

        The policy document.

        The policy document is returned in the response to the GetPolicyVersion and GetAccountAuthorizationDetails operations. It is not returned in the response to the CreatePolicyVersion or ListPolicyVersions operations.

        The policy document returned in this structure is URL-encoded compliant with RFC 3986. You can use a URL decoding method to convert the policy back to plain JSON text. For example, if you use Java, you can use the decode method of the java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs provide similar functionality.

        " }, "VersionId":{ "shape":"policyVersionIdType", @@ -7051,7 +7067,7 @@ "documentation":"

        The date and time, in ISO 8601 date-time format, when the policy version was created.

        " } }, - "documentation":"

        Contains information about a version of a managed policy.

        This data type is used as a response element in the CreatePolicyVersion, GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails operations.

        For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

        " + "documentation":"

        Contains information about a version of a managed policy.

        This data type is used as a response element in the CreatePolicyVersion, GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails operations.

        For more information about managed policies, refer to Managed policies and inline policies in the IAM User Guide.

        " }, "Position":{ "type":"structure", @@ -7065,7 +7081,7 @@ "documentation":"

        The column in the line containing the specified position in the document.

        " } }, - "documentation":"

        Contains the row and column of a location of a Statement element in a policy document.

        This data type is used as a member of the Statement type.

        " + "documentation":"

        Contains the row and column of a location of a Statement element in a policy document.

        This data type is used as a member of the Statement type.

        " }, "PutGroupPolicyRequest":{ "type":"structure", @@ -7185,11 +7201,11 @@ "members":{ "OpenIDConnectProviderArn":{ "shape":"arnType", - "documentation":"

        The Amazon Resource Name (ARN) of the IAM OIDC provider resource to remove the client ID from. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders operation.

        For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the IAM OIDC provider resource to remove the client ID from. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders operation.

        For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

        " }, "ClientID":{ "shape":"clientIDType", - "documentation":"

        The client ID (also known as audience) to remove from the IAM OIDC provider resource. For more information about client IDs, see CreateOpenIDConnectProvider.

        " + "documentation":"

        The client ID (also known as audience) to remove from the IAM OIDC provider resource. For more information about client IDs, see CreateOpenIDConnectProvider.

        " } } }, @@ -7312,7 +7328,7 @@ }, "MissingContextValues":{ "shape":"ContextKeyNamesResultListType", - "documentation":"

        A list of context keys that are required by the included input policies but that were not provided by one of the input parameters. This list is used when a list of ARNs is included in the ResourceArns parameter instead of \"*\". If you do not specify individual resources, by setting ResourceArns to \"*\" or by not including the ResourceArns parameter, then any missing context values are instead included under the EvaluationResults section. To discover the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy.

        " + "documentation":"

        A list of context keys that are required by the included input policies but that were not provided by one of the input parameters. This list is used when a list of ARNs is included in the ResourceArns parameter instead of \"*\". If you do not specify individual resources, by setting ResourceArns to \"*\" or by not including the ResourceArns parameter, then any missing context values are instead included under the EvaluationResults section. To discover the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy.

        " }, "EvalDecisionDetails":{ "shape":"EvalDecisionDetailsType", @@ -7323,7 +7339,7 @@ "documentation":"

        Contains information about the effect that a permissions boundary has on a policy simulation when that boundary is applied to an IAM entity.

        " } }, - "documentation":"

        Contains the result of the simulation of a single API operation call on a single resource.

        This data type is used by a member of the EvaluationResult data type.

        " + "documentation":"

        Contains the result of the simulation of a single API operation call on a single resource.

        This data type is used by a member of the EvaluationResult data type.

        " }, "ResourceSpecificResultListType":{ "type":"list", @@ -7462,7 +7478,7 @@ "documentation":"

        Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions where data is tracked in the IAM User Guide.

        " } }, - "documentation":"

        Contains information about an IAM role, including all of the role's policies.

        This data type is used as a response element in the GetAccountAuthorizationDetails operation.

        " + "documentation":"

        Contains information about an IAM role, including all of the role's policies.

        This data type is used as a response element in the GetAccountAuthorizationDetails operation.

        " }, "RoleLastUsed":{ "type":"structure", @@ -7476,7 +7492,7 @@ "documentation":"

        The name of the Amazon Web Services Region in which the role was last used.

        " } }, - "documentation":"

        Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions where data is tracked in the IAM user Guide.

        This data type is returned as a response element in the GetRole and GetAccountAuthorizationDetails operations.

        " + "documentation":"

        Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions where data is tracked in the IAM user Guide.

        This data type is returned as a response element in the GetRole and GetAccountAuthorizationDetails operations.

        " }, "RoleUsageListType":{ "type":"list", @@ -7494,7 +7510,7 @@ "documentation":"

        The name of the resource that is using the service-linked role.

        " } }, - "documentation":"

        An object that contains details about how a service-linked role is used, if that information is returned by the service.

        This data type is used as a response element in the GetServiceLinkedRoleDeletionStatus operation.

        " + "documentation":"

        An object that contains details about how a service-linked role is used, if that information is returned by the service.

        This data type is used as a response element in the GetServiceLinkedRoleDeletionStatus operation.

        " }, "SAMLMetadataDocumentType":{ "type":"string", @@ -7513,7 +7529,7 @@ "documentation":"

        The date and time, in ISO 8601 date-time format, when the private key was uploaded.

        " } }, - "documentation":"

        Contains the private keys for the SAML provider.

        This data type is used as a response element in the GetSAMLProvider operation.

        " + "documentation":"

        Contains the private keys for the SAML provider.

        This data type is used as a response element in the GetSAMLProvider operation.

        " }, "SAMLProviderListEntry":{ "type":"structure", @@ -7578,7 +7594,7 @@ "documentation":"

        The date and time, in ISO 8601 date-time format, when the SSH public key was uploaded.

        " } }, - "documentation":"

        Contains information about an SSH public key.

        This data type is used as a response element in the GetSSHPublicKey and UploadSSHPublicKey operations.

        " + "documentation":"

        Contains information about an SSH public key.

        This data type is used as a response element in the GetSSHPublicKey and UploadSSHPublicKey operations.

        " }, "SSHPublicKeyListType":{ "type":"list", @@ -7610,7 +7626,7 @@ "documentation":"

        The date and time, in ISO 8601 date-time format, when the SSH public key was uploaded.

        " } }, - "documentation":"

        Contains information about an SSH public key, without the key's body or fingerprint.

        This data type is used as a response element in the ListSSHPublicKeys operation.

        " + "documentation":"

        Contains information about an SSH public key, without the key's body or fingerprint.

        This data type is used as a response element in the ListSSHPublicKeys operation.

        " }, "ServerCertificate":{ "type":"structure", @@ -7636,7 +7652,7 @@ "documentation":"

        A list of tags that are attached to the server certificate. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

        " } }, - "documentation":"

        Contains information about a server certificate.

        This data type is used as a response element in the GetServerCertificate operation.

        " + "documentation":"

        Contains information about a server certificate.

        This data type is used as a response element in the GetServerCertificate operation.

        " }, "ServerCertificateMetadata":{ "type":"structure", @@ -7672,12 +7688,11 @@ "documentation":"

        The date on which the certificate is set to expire.

        " } }, - "documentation":"

        Contains information about a server certificate without its certificate body, certificate chain, and private key.

        This data type is used as a response element in the UploadServerCertificate and ListServerCertificates operations.

        " + "documentation":"

        Contains information about a server certificate without its certificate body, certificate chain, and private key.

        This data type is used as a response element in the UploadServerCertificate and ListServerCertificates operations.

        " }, "ServiceAccessNotEnabledException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The request was rejected because trusted access is not enabled for IAM in Organizations. For details, see IAM and Organizations in the Organizations User Guide.

        ", "exception":true }, @@ -7726,10 +7741,10 @@ }, "TrackedActionsLastAccessed":{ "shape":"TrackedActionsLastAccessed", - "documentation":"

        An object that contains details about the most recent attempt to access a tracked action within the service.

        This field is null if there no tracked actions or if the principal did not use the tracked actions within the tracking period. This field is also null if the report was generated at the service level and not the action level. For more information, see the Granularity field in GenerateServiceLastAccessedDetails.

        " + "documentation":"

        An object that contains details about the most recent attempt to access a tracked action within the service.

        This field is null if there no tracked actions or if the principal did not use the tracked actions within the tracking period. This field is also null if the report was generated at the service level and not the action level. For more information, see the Granularity field in GenerateServiceLastAccessedDetails.

        " } }, - "documentation":"

        Contains details about the most recent attempt to access the service.

        This data type is used as a response element in the GetServiceLastAccessedDetails operation.

        " + "documentation":"

        Contains details about the most recent attempt to access the service.

        This data type is used as a response element in the GetServiceLastAccessedDetails operation.

        " }, "ServiceNotSupportedException":{ "type":"structure", @@ -7749,8 +7764,6 @@ "required":[ "CreateDate", "ServiceName", - "ServiceUserName", - "ServicePassword", "ServiceSpecificCredentialId", "UserName", "Status" @@ -7760,6 +7773,10 @@ "shape":"dateType", "documentation":"

        The date and time, in ISO 8601 date-time format, when the service-specific credential were created.

        " }, + "ExpirationDate":{ + "shape":"dateType", + "documentation":"

        The date and time when the service specific credential expires. This field is only present for Bedrock API keys that were created with an expiration period.

        " + }, "ServiceName":{ "shape":"serviceName", "documentation":"

        The name of the service associated with the service-specific credential.

        " @@ -7772,6 +7789,14 @@ "shape":"servicePassword", "documentation":"

        The generated password for the service-specific credential.

        " }, + "ServiceCredentialAlias":{ + "shape":"serviceCredentialAlias", + "documentation":"

        For Bedrock API keys, this is the public portion of the credential that includes the IAM user name and a suffix containing version and creation information.

        " + }, + "ServiceCredentialSecret":{ + "shape":"serviceCredentialSecret", + "documentation":"

        For Bedrock API keys, this is the secret portion of the credential that should be used to authenticate API calls. This value is returned only when the credential is created.

        " + }, "ServiceSpecificCredentialId":{ "shape":"serviceSpecificCredentialId", "documentation":"

        The unique identifier for the service-specific credential.

        " @@ -7792,7 +7817,6 @@ "required":[ "UserName", "Status", - "ServiceUserName", "CreateDate", "ServiceSpecificCredentialId", "ServiceName" @@ -7810,10 +7834,18 @@ "shape":"serviceUserName", "documentation":"

        The generated user name for the service-specific credential.

        " }, + "ServiceCredentialAlias":{ + "shape":"serviceCredentialAlias", + "documentation":"

        For Bedrock API keys, this is the public portion of the credential that includes the IAM user name and a suffix containing version and creation information.

        " + }, "CreateDate":{ "shape":"dateType", "documentation":"

        The date and time, in ISO 8601 date-time format, when the service-specific credential were created.

        " }, + "ExpirationDate":{ + "shape":"dateType", + "documentation":"

        The date and time when the service specific credential expires. This field is only present for Bedrock API keys that were created with an expiration period.

        " + }, "ServiceSpecificCredentialId":{ "shape":"serviceSpecificCredentialId", "documentation":"

        The unique identifier for the service-specific credential.

        " @@ -7890,7 +7922,7 @@ "documentation":"

        The date when the signing certificate was uploaded.

        " } }, - "documentation":"

        Contains information about an X.509 signing certificate.

        This data type is used as a response element in the UploadSigningCertificate and ListSigningCertificates operations.

        " + "documentation":"

        Contains information about an X.509 signing certificate.

        This data type is used as a response element in the UploadSigningCertificate and ListSigningCertificates operations.

        " }, "SimulateCustomPolicyRequest":{ "type":"structure", @@ -7961,7 +7993,7 @@ "documentation":"

        When IsTruncated is true, this element is present and contains the value to use for the Marker parameter in a subsequent pagination request.

        " } }, - "documentation":"

        Contains the response to a successful SimulatePrincipalPolicy or SimulateCustomPolicy request.

        " + "documentation":"

        Contains the response to a successful SimulatePrincipalPolicy or SimulateCustomPolicy request.

        " }, "SimulatePrincipalPolicyRequest":{ "type":"structure", @@ -8044,7 +8076,7 @@ "documentation":"

        The row and column of the end of a Statement in an IAM policy.

        " } }, - "documentation":"

        Contains a reference to a Statement element in a policy document that determines the result of the simulation.

        This data type is used by the MatchedStatements member of the EvaluationResult type.

        " + "documentation":"

        Contains a reference to a Statement element in a policy document that determines the result of the simulation.

        This data type is used by the MatchedStatements member of the EvaluationResult type.

        " }, "StatementListType":{ "type":"list", @@ -8063,7 +8095,7 @@ }, "Value":{ "shape":"tagValueType", - "documentation":"

        The value associated with this tag. For example, tags with a key name of Department could have values such as Human Resources, Accounting, and Support. Tags with a key name of Cost Center might have values that consist of the number associated with the different cost centers in your company. Typically, many resources have tags with the same key name but with different values.

        Amazon Web Services always interprets the tag Value as a single string. If you need to store an array, you can store comma-separated values in the string. However, you must interpret the value in your code.

        " + "documentation":"

        The value associated with this tag. For example, tags with a key name of Department could have values such as Human Resources, Accounting, and Support. Tags with a key name of Cost Center might have values that consist of the number associated with the different cost centers in your company. Typically, many resources have tags with the same key name but with different values.

        " } }, "documentation":"

        A structure that represents user-provided metadata that can be associated with an IAM resource. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

        " @@ -8221,7 +8253,7 @@ "documentation":"

        The Region from which the authenticated entity (user or role) last attempted to access the tracked action. Amazon Web Services does not report unauthenticated requests.

        This field is null if no IAM entities attempted to access the service within the tracking period.

        " } }, - "documentation":"

        Contains details about the most recent attempt to access an action within the service.

        This data type is used as a response element in the GetServiceLastAccessedDetails operation.

        " + "documentation":"

        Contains details about the most recent attempt to access an action within the service.

        This data type is used as a response element in the GetServiceLastAccessedDetails operation.

        " }, "TrackedActionsLastAccessed":{ "type":"list", @@ -8496,7 +8528,7 @@ }, "Password":{ "shape":"passwordType", - "documentation":"

        The new password for the specified IAM user.

        The regex pattern used to validate this parameter is a string of characters consisting of the following:

        • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

        • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

        • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

        However, the format can be further restricted by the account administrator by setting a password policy on the Amazon Web Services account. For more information, see UpdateAccountPasswordPolicy.

        " + "documentation":"

        The new password for the specified IAM user.

        The regex pattern used to validate this parameter is a string of characters consisting of the following:

        • Any printable ASCII character ranging from the space character (\\u0020) through the end of the ASCII character range

        • The printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF)

        • The special characters tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D)

        However, the format can be further restricted by the account administrator by setting a password policy on the Amazon Web Services account. For more information, see UpdateAccountPasswordPolicy.

        " }, "PasswordResetRequired":{ "shape":"booleanObjectType", @@ -8513,11 +8545,11 @@ "members":{ "OpenIDConnectProviderArn":{ "shape":"arnType", - "documentation":"

        The Amazon Resource Name (ARN) of the IAM OIDC provider resource object for which you want to update the thumbprint. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders operation.

        For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the IAM OIDC provider resource object for which you want to update the thumbprint. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders operation.

        For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

        " }, "ThumbprintList":{ "shape":"thumbprintListType", - "documentation":"

        A list of certificate thumbprints that are associated with the specified IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider.

        " + "documentation":"

        A list of certificate thumbprints that are associated with the specified IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider.

        " } } }, @@ -8567,8 +8599,7 @@ }, "UpdateRoleResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateSAMLProviderRequest":{ "type":"structure", @@ -8604,7 +8635,7 @@ "documentation":"

        The Amazon Resource Name (ARN) of the SAML provider that was updated.

        " } }, - "documentation":"

        Contains the response to a successful UpdateSAMLProvider request.

        " + "documentation":"

        Contains the response to a successful UpdateSAMLProvider request.

        " }, "UpdateSSHPublicKeyRequest":{ "type":"structure", @@ -8731,7 +8762,7 @@ "documentation":"

        Contains information about the SSH public key.

        " } }, - "documentation":"

        Contains the response to a successful UploadSSHPublicKey request.

        " + "documentation":"

        Contains the response to a successful UploadSSHPublicKey request.

        " }, "UploadServerCertificateRequest":{ "type":"structure", @@ -8779,7 +8810,7 @@ "documentation":"

        A list of tags that are attached to the new IAM server certificate. The returned list of tags is sorted by tag key. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

        " } }, - "documentation":"

        Contains the response to a successful UploadServerCertificate request.

        " + "documentation":"

        Contains the response to a successful UploadServerCertificate request.

        " }, "UploadSigningCertificateRequest":{ "type":"structure", @@ -8804,7 +8835,7 @@ "documentation":"

        Information about the certificate.

        " } }, - "documentation":"

        Contains the response to a successful UploadSigningCertificate request.

        " + "documentation":"

        Contains the response to a successful UploadSigningCertificate request.

        " }, "User":{ "type":"structure", @@ -8838,7 +8869,7 @@ }, "PasswordLastUsed":{ "shape":"dateType", - "documentation":"

        The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an Amazon Web Services website. For a list of Amazon Web Services websites that capture a user's last sign-in time, see the Credential reports topic in the IAM User Guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. If the field is null (no value), then it indicates that they never signed in with a password. This can be because:

        • The user never had a password.

        • A password exists but has not been used since IAM started tracking this information on October 20, 2014.

        A null value does not mean that the user never had a password. Also, if the user does not currently have a password but had one in the past, then this field contains the date and time the most recent password was used.

        This value is returned only in the GetUser and ListUsers operations.

        " + "documentation":"

        The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an Amazon Web Services website. For a list of Amazon Web Services websites that capture a user's last sign-in time, see the Credential reports topic in the IAM User Guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. If the field is null (no value), then it indicates that they never signed in with a password. This can be because:

        • The user never had a password.

        • A password exists but has not been used since IAM started tracking this information on October 20, 2014.

        A null value does not mean that the user never had a password. Also, if the user does not currently have a password but had one in the past, then this field contains the date and time the most recent password was used.

        This value is returned only in the GetUser and ListUsers operations.

        " }, "PermissionsBoundary":{ "shape":"AttachedPermissionsBoundary", @@ -8849,7 +8880,7 @@ "documentation":"

        A list of tags that are associated with the user. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

        " } }, - "documentation":"

        Contains information about an IAM user entity.

        This data type is used as a response element in the following operations:

        " + "documentation":"

        Contains information about an IAM user entity.

        This data type is used as a response element in the following operations:

        " }, "UserDetail":{ "type":"structure", @@ -8892,7 +8923,7 @@ "documentation":"

        A list of tags that are associated with the user. For more information about tagging, see Tagging IAM resources in the IAM User Guide.

        " } }, - "documentation":"

        Contains information about an IAM user, including all the user's policies and all the IAM groups the user is in.

        This data type is used as a response element in the GetAccountAuthorizationDetails operation.

        " + "documentation":"

        Contains information about an IAM user, including all the user's policies and all the IAM groups the user is in.

        This data type is used as a response element in the GetAccountAuthorizationDetails operation.

        " }, "VirtualMFADevice":{ "type":"structure", @@ -8934,7 +8965,7 @@ "accessKeyMetadataListType":{ "type":"list", "member":{"shape":"AccessKeyMetadata"}, - "documentation":"

        Contains a list of access key metadata.

        This data type is used as a response element in the ListAccessKeys operation.

        " + "documentation":"

        Contains a list of access key metadata.

        This data type is used as a response element in the ListAccessKeys operation.

        " }, "accessKeySecretType":{ "type":"string", @@ -8950,6 +8981,10 @@ "min":3, "pattern":"^[a-z0-9]([a-z0-9]|-(?!-)){1,61}[a-z0-9]$" }, + "allUsers":{ + "type":"boolean", + "box":true + }, "arnType":{ "type":"string", "documentation":"

        The Amazon Resource Name (ARN). ARNs are unique identifiers for Amazon Web Services resources.

        For more information about ARNs, go to Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.

        ", @@ -9008,7 +9043,7 @@ "certificateListType":{ "type":"list", "member":{"shape":"SigningCertificate"}, - "documentation":"

        Contains a list of signing certificates.

        This data type is used as a response element in the ListSigningCertificates operation.

        " + "documentation":"

        Contains a list of signing certificates.

        This data type is used as a response element in the ListSigningCertificates operation.

        " }, "clientIDListType":{ "type":"list", @@ -9019,6 +9054,11 @@ "max":255, "min":1 }, + "credentialAgeDays":{ + "type":"integer", + "max":36600, + "min":1 + }, "credentialReportExpiredExceptionMessage":{"type":"string"}, "credentialReportNotPresentExceptionMessage":{"type":"string"}, "credentialReportNotReadyExceptionMessage":{"type":"string"}, @@ -9075,7 +9115,7 @@ "groupListType":{ "type":"list", "member":{"shape":"Group"}, - "documentation":"

        Contains a list of IAM groups.

        This data type is used as a response element in the ListGroups operation.

        " + "documentation":"

        Contains a list of IAM groups.

        This data type is used as a response element in the ListGroups operation.

        " }, "groupNameListType":{ "type":"list", @@ -9151,7 +9191,7 @@ "mfaDeviceListType":{ "type":"list", "member":{"shape":"MFADevice"}, - "documentation":"

        Contains a list of MFA devices.

        This data type is used as a response element in the ListMFADevices and ListVirtualMFADevices operations.

        " + "documentation":"

        Contains a list of MFA devices.

        This data type is used as a response element in the ListMFADevices and ListVirtualMFADevices operations.

        " }, "minimumPasswordLengthType":{ "type":"integer", @@ -9226,7 +9266,7 @@ "policyNameListType":{ "type":"list", "member":{"shape":"policyNameType"}, - "documentation":"

        Contains a list of policy names.

        This data type is used as a response element in the ListPolicies operation.

        " + "documentation":"

        Contains a list of policy names.

        This data type is used as a response element in the ListPolicies operation.

        " }, "policyNameType":{ "type":"string", @@ -9318,7 +9358,7 @@ "roleListType":{ "type":"list", "member":{"shape":"Role"}, - "documentation":"

        Contains a list of IAM roles.

        This data type is used as a response element in the ListRoles operation.

        " + "documentation":"

        Contains a list of IAM roles.

        This data type is used as a response element in the ListRoles operation.

        " }, "roleMaxSessionDurationType":{ "type":"integer", @@ -9347,6 +9387,16 @@ "min":1, "pattern":"[\\w+=,.@-]+" }, + "serviceCredentialAlias":{ + "type":"string", + "max":200, + "min":0, + "pattern":"[\\w+=,.@-]+" + }, + "serviceCredentialSecret":{ + "type":"string", + "sensitive":true + }, "serviceFailureExceptionMessage":{"type":"string"}, "serviceName":{"type":"string"}, "serviceNameType":{"type":"string"}, @@ -9376,8 +9426,8 @@ "serviceUserName":{ "type":"string", "max":200, - "min":17, - "pattern":"[\\w+=,.@-]+" + "min":0, + "pattern":"[\\w+=,.@-]*" }, "sortKeyType":{ "type":"string", @@ -9392,7 +9442,8 @@ "type":"string", "enum":[ "Active", - "Inactive" + "Inactive", + "Expired" ] }, "stringType":{"type":"string"}, @@ -9476,7 +9527,7 @@ "userListType":{ "type":"list", "member":{"shape":"User"}, - "documentation":"

        Contains a list of users.

        This data type is used as a response element in the GetGroup and ListUsers operations.

        " + "documentation":"

        Contains a list of users.

        This data type is used as a response element in the GetGroup and ListUsers operations.

        " }, "userNameType":{ "type":"string", diff --git a/services/identitystore/pom.xml b/services/identitystore/pom.xml index fa8f6e604d2f..6429906929ae 100644 --- a/services/identitystore/pom.xml +++ b/services/identitystore/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT identitystore AWS Java SDK :: Services :: Identitystore diff --git a/services/identitystore/src/main/resources/codegen-resources/customization.config b/services/identitystore/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/identitystore/src/main/resources/codegen-resources/customization.config +++ b/services/identitystore/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/imagebuilder/pom.xml b/services/imagebuilder/pom.xml index 2e0b18356da5..70f786536215 100644 --- a/services/imagebuilder/pom.xml +++ b/services/imagebuilder/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT imagebuilder AWS Java SDK :: Services :: Imagebuilder diff --git a/services/imagebuilder/src/main/resources/codegen-resources/customization.config b/services/imagebuilder/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/imagebuilder/src/main/resources/codegen-resources/customization.config +++ b/services/imagebuilder/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/inspector/pom.xml b/services/inspector/pom.xml index 5439f146dac0..c8002ace115d 100644 --- a/services/inspector/pom.xml +++ b/services/inspector/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT inspector AWS Java SDK :: Services :: Amazon Inspector Service diff --git a/services/inspector/src/main/resources/codegen-resources/customization.config b/services/inspector/src/main/resources/codegen-resources/customization.config index f5a533c1c38b..a709750374d8 100644 --- a/services/inspector/src/main/resources/codegen-resources/customization.config +++ b/services/inspector/src/main/resources/codegen-resources/customization.config @@ -55,6 +55,5 @@ ] } }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/inspector2/pom.xml b/services/inspector2/pom.xml index f8d8f67b48b6..bf25555bf75f 100644 --- a/services/inspector2/pom.xml +++ b/services/inspector2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT inspector2 AWS Java SDK :: Services :: Inspector2 diff --git a/services/inspector2/src/main/resources/codegen-resources/customization.config b/services/inspector2/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/inspector2/src/main/resources/codegen-resources/customization.config +++ b/services/inspector2/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/inspector2/src/main/resources/codegen-resources/paginators-1.json b/services/inspector2/src/main/resources/codegen-resources/paginators-1.json index f3abea9ff99e..2bec35ed71d5 100644 --- a/services/inspector2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/inspector2/src/main/resources/codegen-resources/paginators-1.json @@ -6,6 +6,12 @@ "limit_key": "maxResults", "result_key": "scanResultDetails" }, + "GetClustersForImage": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "cluster" + }, "ListAccountPermissions": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/inspector2/src/main/resources/codegen-resources/service-2.json b/services/inspector2/src/main/resources/codegen-resources/service-2.json index 2cdc9c942a57..af1ba1f84755 100644 --- a/services/inspector2/src/main/resources/codegen-resources/service-2.json +++ b/services/inspector2/src/main/resources/codegen-resources/service-2.json @@ -33,6 +33,44 @@ ], "documentation":"

        Associates an Amazon Web Services account with an Amazon Inspector delegated administrator. An HTTP 200 response indicates the association was successfully started, but doesn’t indicate whether it was completed. You can check if the association completed by using ListMembers for multiple accounts or GetMembers for a single account.

        " }, + "BatchAssociateCodeSecurityScanConfiguration":{ + "name":"BatchAssociateCodeSecurityScanConfiguration", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/scan-configuration/batch/associate", + "responseCode":200 + }, + "input":{"shape":"BatchAssociateCodeSecurityScanConfigurationRequest"}, + "output":{"shape":"BatchAssociateCodeSecurityScanConfigurationResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Associates multiple code repositories with an Amazon Inspector code security scan configuration.

        " + }, + "BatchDisassociateCodeSecurityScanConfiguration":{ + "name":"BatchDisassociateCodeSecurityScanConfiguration", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/scan-configuration/batch/disassociate", + "responseCode":200 + }, + "input":{"shape":"BatchDisassociateCodeSecurityScanConfigurationRequest"}, + "output":{"shape":"BatchDisassociateCodeSecurityScanConfigurationResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Disassociates multiple code repositories from an Amazon Inspector code security scan configuration.

        " + }, "BatchGetAccountStatus":{ "name":"BatchGetAccountStatus", "http":{ @@ -190,6 +228,44 @@ ], "documentation":"

        Creates a CIS scan configuration.

        " }, + "CreateCodeSecurityIntegration":{ + "name":"CreateCodeSecurityIntegration", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/integration/create", + "responseCode":200 + }, + "input":{"shape":"CreateCodeSecurityIntegrationRequest"}, + "output":{"shape":"CreateCodeSecurityIntegrationResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Creates a code security integration with a source code repository provider.

        After calling the CreateCodeSecurityIntegration operation, you complete authentication and authorization with your provider. Next you call the UpdateCodeSecurityIntegration operation to provide the details to complete the integration setup

        " + }, + "CreateCodeSecurityScanConfiguration":{ + "name":"CreateCodeSecurityScanConfiguration", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/scan-configuration/create", + "responseCode":200 + }, + "input":{"shape":"CreateCodeSecurityScanConfigurationRequest"}, + "output":{"shape":"CreateCodeSecurityScanConfigurationResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Creates a scan configuration for code security scanning.

        " + }, "CreateFilter":{ "name":"CreateFilter", "http":{ @@ -264,6 +340,42 @@ ], "documentation":"

        Deletes a CIS scan configuration.

        " }, + "DeleteCodeSecurityIntegration":{ + "name":"DeleteCodeSecurityIntegration", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/integration/delete", + "responseCode":200 + }, + "input":{"shape":"DeleteCodeSecurityIntegrationRequest"}, + "output":{"shape":"DeleteCodeSecurityIntegrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Deletes a code security integration.

        " + }, + "DeleteCodeSecurityScanConfiguration":{ + "name":"DeleteCodeSecurityScanConfiguration", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/scan-configuration/delete", + "responseCode":200 + }, + "input":{"shape":"DeleteCodeSecurityScanConfigurationRequest"}, + "output":{"shape":"DeleteCodeSecurityScanConfigurationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Deletes a code security scan configuration.

        " + }, "DeleteFilter":{ "name":"DeleteFilter", "http":{ @@ -425,6 +537,78 @@ ], "documentation":"

        Retrieves CIS scan result details.

        " }, + "GetClustersForImage":{ + "name":"GetClustersForImage", + "http":{ + "method":"POST", + "requestUri":"/cluster/get", + "responseCode":200 + }, + "input":{"shape":"GetClustersForImageRequest"}, + "output":{"shape":"GetClustersForImageResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns a list of clusters and metadata associated with an image.

        " + }, + "GetCodeSecurityIntegration":{ + "name":"GetCodeSecurityIntegration", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/integration/get", + "responseCode":200 + }, + "input":{"shape":"GetCodeSecurityIntegrationRequest"}, + "output":{"shape":"GetCodeSecurityIntegrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Retrieves information about a code security integration.

        " + }, + "GetCodeSecurityScan":{ + "name":"GetCodeSecurityScan", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/scan/get", + "responseCode":200 + }, + "input":{"shape":"GetCodeSecurityScanRequest"}, + "output":{"shape":"GetCodeSecurityScanResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Retrieves information about a specific code security scan.

        " + }, + "GetCodeSecurityScanConfiguration":{ + "name":"GetCodeSecurityScanConfiguration", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/scan-configuration/get", + "responseCode":200 + }, + "input":{"shape":"GetCodeSecurityScanConfigurationRequest"}, + "output":{"shape":"GetCodeSecurityScanConfigurationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Retrieves information about a code security scan configuration.

        " + }, "GetConfiguration":{ "name":"GetConfiguration", "http":{ @@ -634,6 +818,59 @@ ], "documentation":"

        Returns a CIS scan list.

        " }, + "ListCodeSecurityIntegrations":{ + "name":"ListCodeSecurityIntegrations", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/integration/list", + "responseCode":200 + }, + "input":{"shape":"ListCodeSecurityIntegrationsRequest"}, + "output":{"shape":"ListCodeSecurityIntegrationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Lists all code security integrations in your account.

        " + }, + "ListCodeSecurityScanConfigurationAssociations":{ + "name":"ListCodeSecurityScanConfigurationAssociations", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/scan-configuration/associations/list", + "responseCode":200 + }, + "input":{"shape":"ListCodeSecurityScanConfigurationAssociationsRequest"}, + "output":{"shape":"ListCodeSecurityScanConfigurationAssociationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Lists the associations between code repositories and Amazon Inspector code security scan configurations.

        " + }, + "ListCodeSecurityScanConfigurations":{ + "name":"ListCodeSecurityScanConfigurations", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/scan-configuration/list", + "responseCode":200 + }, + "input":{"shape":"ListCodeSecurityScanConfigurationsRequest"}, + "output":{"shape":"ListCodeSecurityScanConfigurationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Lists all code security scan configurations in your account.

        " + }, "ListCoverage":{ "name":"ListCoverage", "http":{ @@ -876,6 +1113,25 @@ "documentation":"

        Starts a CIS session. This API is used by the Amazon Inspector SSM plugin to communicate with the Amazon Inspector service. The Amazon Inspector SSM plugin calls this API to start a CIS scan session for the scan ID supplied by the service.

        ", "idempotent":true }, + "StartCodeSecurityScan":{ + "name":"StartCodeSecurityScan", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/scan/start", + "responseCode":200 + }, + "input":{"shape":"StartCodeSecurityScanRequest"}, + "output":{"shape":"StartCodeSecurityScanResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Initiates a code security scan on a specified repository.

        " + }, "StopCisSession":{ "name":"StopCisSession", "http":{ @@ -949,6 +1205,44 @@ ], "documentation":"

        Updates a CIS scan configuration.

        " }, + "UpdateCodeSecurityIntegration":{ + "name":"UpdateCodeSecurityIntegration", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/integration/update", + "responseCode":200 + }, + "input":{"shape":"UpdateCodeSecurityIntegrationRequest"}, + "output":{"shape":"UpdateCodeSecurityIntegrationResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Updates an existing code security integration.

        After calling the CreateCodeSecurityIntegration operation, you complete authentication and authorization with your provider. Next you call the UpdateCodeSecurityIntegration operation to provide the details to complete the integration setup

        " + }, + "UpdateCodeSecurityScanConfiguration":{ + "name":"UpdateCodeSecurityScanConfiguration", + "http":{ + "method":"POST", + "requestUri":"/codesecurity/scan-configuration/update", + "responseCode":200 + }, + "input":{"shape":"UpdateCodeSecurityScanConfigurationRequest"}, + "output":{"shape":"UpdateCodeSecurityScanConfigurationResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Updates an existing code security scan configuration.

        " + }, "UpdateConfiguration":{ "name":"UpdateConfiguration", "http":{ @@ -1062,7 +1356,7 @@ "members":{ "message":{"shape":"String"} }, - "documentation":"

        You do not have sufficient access to perform this action.

        ", + "documentation":"

        You do not have sufficient access to perform this action.

        For Enable, you receive this error if you attempt to use a feature in an unsupported Amazon Web Services Region.

        ", "error":{ "httpStatusCode":403, "senderFault":true @@ -1219,6 +1513,10 @@ "shape":"AwsEcrContainerAggregation", "documentation":"

        An object that contains details about an aggregation request based on Amazon ECR container images.

        " }, + "codeRepositoryAggregation":{ + "shape":"CodeRepositoryAggregation", + "documentation":"

        An object that contains details about an aggregation request based on code repositories.

        " + }, "ec2InstanceAggregation":{ "shape":"Ec2InstanceAggregation", "documentation":"

        An object that contains details about an aggregation request based on Amazon EC2 instances.

        " @@ -1278,6 +1576,10 @@ "shape":"AwsEcrContainerAggregationResponse", "documentation":"

        An object that contains details about an aggregation response based on Amazon ECR container images.

        " }, + "codeRepositoryAggregation":{ + "shape":"CodeRepositoryAggregationResponse", + "documentation":"

        An object that contains details about an aggregation response based on code repositories.

        " + }, "ec2InstanceAggregation":{ "shape":"Ec2InstanceAggregationResponse", "documentation":"

        An object that contains details about an aggregation response based on Amazon EC2 instances.

        " @@ -1331,7 +1633,8 @@ "IMAGE_LAYER", "ACCOUNT", "AWS_LAMBDA_FUNCTION", - "LAMBDA_LAYER" + "LAMBDA_LAYER", + "CODE_REPOSITORY" ] }, "AmiAggregation":{ @@ -1406,6 +1709,27 @@ "max":1011, "min":1 }, + "AssociateConfigurationRequest":{ + "type":"structure", + "required":[ + "resource", + "scanConfigurationArn" + ], + "members":{ + "resource":{"shape":"CodeSecurityResource"}, + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the scan configuration.

        " + } + }, + "documentation":"

        Contains details about a request to associate a code repository with a scan configuration.

        " + }, + "AssociateConfigurationRequestList":{ + "type":"list", + "member":{"shape":"AssociateConfigurationRequest"}, + "max":25, + "min":1 + }, "AssociateMemberRequest":{ "type":"structure", "required":["accountId"], @@ -1426,6 +1750,21 @@ } } }, + "AssociationResultStatusCode":{ + "type":"string", + "enum":[ + "INTERNAL_ERROR", + "ACCESS_DENIED", + "SCAN_CONFIGURATION_NOT_FOUND", + "INVALID_INPUT", + "RESOURCE_NOT_FOUND", + "QUOTA_EXCEEDED" + ] + }, + "AssociationResultStatusMessage":{ + "type":"string", + "min":1 + }, "AtigData":{ "type":"structure", "members":{ @@ -1448,6 +1787,10 @@ }, "documentation":"

        The Amazon Web Services Threat Intel Group (ATIG) details for a specific vulnerability.

        " }, + "AuthorizationUrl":{ + "type":"string", + "sensitive":true + }, "AutoEnable":{ "type":"structure", "required":[ @@ -1455,6 +1798,10 @@ "ecr" ], "members":{ + "codeRepository":{ + "shape":"Boolean", + "documentation":"

        Represents whether code repository scans are automatically enabled for new members of your Amazon Inspector organization.

        " + }, "ec2":{ "shape":"Boolean", "documentation":"

        Represents whether Amazon EC2 scans are automatically enabled for new members of your Amazon Inspector organization.

        " @@ -1469,7 +1816,7 @@ }, "lambdaCode":{ "shape":"Boolean", - "documentation":"

        Represents whether Lambda code scans are automatically enabled for new members of your Amazon Inspector organization.

         </p> 
        " + "documentation":"

        Represents whether Lambda code scans are automatically enabled for new members of your Amazon Inspector organization.

        " } }, "documentation":"

        Represents which scan types are automatically enabled for new members of your Amazon Inspector organization.

        " @@ -1535,6 +1882,14 @@ "shape":"StringFilterList", "documentation":"

        The image tags.

        " }, + "inUseCount":{ + "shape":"NumberFilterList", + "documentation":"

        The number of Amazon ECS tasks or Amazon EKS pods where the Amazon ECR container image is in use.

        " + }, + "lastInUseAt":{ + "shape":"DateFilterList", + "documentation":"

        The last time an Amazon ECR image was used in an Amazon ECS task or Amazon EKS pod.

        " + }, "repositories":{ "shape":"StringFilterList", "documentation":"

        The container repositories.

        " @@ -1574,6 +1929,14 @@ "shape":"StringList", "documentation":"

        The container image stags.

        " }, + "inUseCount":{ + "shape":"Long", + "documentation":"

        The number of Amazon ECS tasks or Amazon EKS pods where the Amazon ECR container image is in use.

        " + }, + "lastInUseAt":{ + "shape":"DateTimeTimestamp", + "documentation":"

        The last time an Amazon ECR image was used in an Amazon ECS task or Amazon EKS pod.

        " + }, "repository":{ "shape":"String", "documentation":"

        The container repository.

        " @@ -1613,6 +1976,14 @@ "shape":"ImageTagList", "documentation":"

        The image tags attached to the Amazon ECR container image.

        " }, + "inUseCount":{ + "shape":"Long", + "documentation":"

        The number of Amazon ECS tasks or Amazon EKS pods where the Amazon ECR container image is in use.

        " + }, + "lastInUseAt":{ + "shape":"DateTimeTimestamp", + "documentation":"

        The last time an Amazon ECR image was used in an Amazon ECS task or Amazon EKS pod.

        " + }, "platform":{ "shape":"Platform", "documentation":"

        The platform of the Amazon ECR container image.

        " @@ -1640,27 +2011,108 @@ "ALL" ] }, - "AwsLambdaFunctionDetails":{ + "AwsEcsMetadataDetails":{ "type":"structure", "required":[ - "codeSha256", - "executionRoleArn", - "functionName", - "runtime", - "version" + "detailsGroup", + "taskDefinitionArn" ], "members":{ - "architectures":{ - "shape":"ArchitectureList", - "documentation":"

        The instruction set architecture that the Amazon Web Services Lambda function supports. Architecture is a string array with one of the valid values. The default architecture value is x86_64.

        " - }, - "codeSha256":{ - "shape":"NonEmptyString", - "documentation":"

        The SHA256 hash of the Amazon Web Services Lambda function's deployment package.

        " - }, - "executionRoleArn":{ - "shape":"ExecutionRoleArn", - "documentation":"

        The Amazon Web Services Lambda function's execution role.

        " + "detailsGroup":{ + "shape":"AwsEcsMetadataDetailsDetailsGroupString", + "documentation":"

        The details group information for a task in a cluster.

        " + }, + "taskDefinitionArn":{ + "shape":"AwsEcsMetadataDetailsTaskDefinitionArnString", + "documentation":"

        The task definition ARN.

        " + } + }, + "documentation":"

        Metadata about tasks where an image was in use.

        " + }, + "AwsEcsMetadataDetailsDetailsGroupString":{ + "type":"string", + "max":256, + "min":1 + }, + "AwsEcsMetadataDetailsTaskDefinitionArnString":{ + "type":"string", + "max":2048, + "min":1 + }, + "AwsEksMetadataDetails":{ + "type":"structure", + "members":{ + "namespace":{ + "shape":"AwsEksMetadataDetailsNamespaceString", + "documentation":"

        The namespace for an Amazon EKS cluster.

        " + }, + "workloadInfoList":{ + "shape":"AwsEksWorkloadInfoList", + "documentation":"

        The list of workloads.

        " + } + }, + "documentation":"

        The metadata for an Amazon EKS pod where an Amazon ECR image is in use.

        " + }, + "AwsEksMetadataDetailsNamespaceString":{ + "type":"string", + "max":256, + "min":1 + }, + "AwsEksWorkloadInfo":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "name":{ + "shape":"AwsEksWorkloadInfoNameString", + "documentation":"

        The name of the workload.

        " + }, + "type":{ + "shape":"AwsEksWorkloadInfoTypeString", + "documentation":"

        The workload type.

        " + } + }, + "documentation":"

        Information about the workload.

        " + }, + "AwsEksWorkloadInfoList":{ + "type":"list", + "member":{"shape":"AwsEksWorkloadInfo"}, + "max":100, + "min":0 + }, + "AwsEksWorkloadInfoNameString":{ + "type":"string", + "max":256, + "min":1 + }, + "AwsEksWorkloadInfoTypeString":{ + "type":"string", + "max":256, + "min":1 + }, + "AwsLambdaFunctionDetails":{ + "type":"structure", + "required":[ + "codeSha256", + "executionRoleArn", + "functionName", + "runtime", + "version" + ], + "members":{ + "architectures":{ + "shape":"ArchitectureList", + "documentation":"

        The instruction set architecture that the Amazon Web Services Lambda function supports. Architecture is a string array with one of the valid values. The default architecture value is x86_64.

        " + }, + "codeSha256":{ + "shape":"NonEmptyString", + "documentation":"

        The SHA256 hash of the Amazon Web Services Lambda function's deployment package.

        " + }, + "executionRoleArn":{ + "shape":"ExecutionRoleArn", + "documentation":"

        The Amazon Web Services Lambda function's execution role.

        " }, "functionName":{ "shape":"FunctionName", @@ -1706,6 +2158,52 @@ }, "exception":true }, + "BatchAssociateCodeSecurityScanConfigurationRequest":{ + "type":"structure", + "required":["associateConfigurationRequests"], + "members":{ + "associateConfigurationRequests":{ + "shape":"AssociateConfigurationRequestList", + "documentation":"

        A list of code repositories to associate with the specified scan configuration.

        " + } + } + }, + "BatchAssociateCodeSecurityScanConfigurationResponse":{ + "type":"structure", + "members":{ + "failedAssociations":{ + "shape":"FailedAssociationResultList", + "documentation":"

        Details of any code repositories that failed to be associated with the scan configuration.

        " + }, + "successfulAssociations":{ + "shape":"SuccessfulAssociationResultList", + "documentation":"

        Details of code repositories that were successfully associated with the scan configuration.

        " + } + } + }, + "BatchDisassociateCodeSecurityScanConfigurationRequest":{ + "type":"structure", + "required":["disassociateConfigurationRequests"], + "members":{ + "disassociateConfigurationRequests":{ + "shape":"DisassociateConfigurationRequestList", + "documentation":"

        A list of code repositories to disassociate from the specified scan configuration.

        " + } + } + }, + "BatchDisassociateCodeSecurityScanConfigurationResponse":{ + "type":"structure", + "members":{ + "failedAssociations":{ + "shape":"FailedAssociationResultList", + "documentation":"

        Details of any code repositories that failed to be disassociated from the scan configuration.

        " + }, + "successfulAssociations":{ + "shape":"SuccessfulAssociationResultList", + "documentation":"

        Details of code repositories that were successfully disassociated from the scan configuration.

        " + } + } + }, "BatchGetAccountStatusRequest":{ "type":"structure", "members":{ @@ -1819,7 +2317,7 @@ "members":{ "accountIds":{ "shape":"AccountIdSet", - "documentation":"

        The unique identifiers for the Amazon Web Services accounts to retrieve Amazon Inspector deep inspection activation status for.

         </p> 
        " + "documentation":"

        The unique identifiers for the Amazon Web Services accounts to retrieve Amazon Inspector deep inspection activation status for.

        " } } }, @@ -1828,11 +2326,11 @@ "members":{ "accountIds":{ "shape":"MemberAccountEc2DeepInspectionStatusStateList", - "documentation":"

        An array of objects that provide details on the activation status of Amazon Inspector deep inspection for each of the requested accounts.

         </p> 
        " + "documentation":"

        An array of objects that provide details on the activation status of Amazon Inspector deep inspection for each of the requested accounts.

        " }, "failedAccountIds":{ "shape":"FailedMemberAccountEc2DeepInspectionStatusStateList", - "documentation":"

        An array of objects that provide details on any accounts that failed to activate Amazon Inspector deep inspection and why.

         </p> 
        " + "documentation":"

        An array of objects that provide details on any accounts that failed to activate Amazon Inspector deep inspection and why.

        " } } }, @@ -2749,6 +3247,92 @@ "max":64, "min":1 }, + "ClusterDetails":{ + "type":"structure", + "required":[ + "clusterMetadata", + "lastInUse" + ], + "members":{ + "clusterMetadata":{"shape":"ClusterMetadata"}, + "lastInUse":{ + "shape":"Timestamp", + "documentation":"

        The last timestamp when Amazon Inspector recorded the image in use in the task or pod in the cluster.

        " + }, + "runningUnitCount":{ + "shape":"Long", + "documentation":"

        The number of tasks or pods where an image was running on the cluster.

        " + }, + "stoppedUnitCount":{ + "shape":"Long", + "documentation":"

        The number of tasks or pods where an image was stopped on the cluster in the last 24 hours.

        " + } + }, + "documentation":"

        Details about the task or pod in the cluster.

        " + }, + "ClusterForImageFilterCriteria":{ + "type":"structure", + "required":["resourceId"], + "members":{ + "resourceId":{ + "shape":"ClusterForImageFilterCriteriaResourceIdString", + "documentation":"

        The resource Id to be used in the filter criteria.

        " + } + }, + "documentation":"

        The filter criteria to be used.

        " + }, + "ClusterForImageFilterCriteriaResourceIdString":{ + "type":"string", + "pattern":"^arn:.*:ecr:.*:\\d{12}:repository\\/(?:[a-z0-9]+(?:[._-][a-z0-9]+)*\\/)*[a-z0-9]+(?:[._-][a-z0-9]+)*(\\/sha256:[a-z0-9]{64})?$" + }, + "ClusterInformation":{ + "type":"structure", + "required":["clusterArn"], + "members":{ + "clusterArn":{ + "shape":"ClusterInformationClusterArnString", + "documentation":"

        The cluster ARN.

        " + }, + "clusterDetails":{ + "shape":"ClusterInformationClusterDetailsList", + "documentation":"

        Details about the cluster.

        " + } + }, + "documentation":"

        Information about the cluster.

        " + }, + "ClusterInformationClusterArnString":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^arn:aws(?:-[a-z0-9-]+)?:(?:ecs|eks):[a-z0-9-]+:[0-9]{12}:cluster/[a-zA-Z0-9_-]+$" + }, + "ClusterInformationClusterDetailsList":{ + "type":"list", + "member":{"shape":"ClusterDetails"}, + "max":100, + "min":1 + }, + "ClusterInformationList":{ + "type":"list", + "member":{"shape":"ClusterInformation"}, + "max":100, + "min":1 + }, + "ClusterMetadata":{ + "type":"structure", + "members":{ + "awsEcsMetadataDetails":{ + "shape":"AwsEcsMetadataDetails", + "documentation":"

        The details for an Amazon ECS cluster in the cluster metadata.

        " + }, + "awsEksMetadataDetails":{ + "shape":"AwsEksMetadataDetails", + "documentation":"

        The details for an Amazon EKS cluster in the cluster metadata.

        " + } + }, + "documentation":"

        The metadata for a cluster.

        ", + "union":true + }, "CodeFilePath":{ "type":"structure", "required":[ @@ -2806,6 +3390,339 @@ "max":20, "min":1 }, + "CodeRepositoryAggregation":{ + "type":"structure", + "members":{ + "projectNames":{ + "shape":"StringFilterList", + "documentation":"

        The project names to include in the aggregation results.

        " + }, + "providerTypes":{ + "shape":"StringFilterList", + "documentation":"

        The repository provider types to include in the aggregation results.

        " + }, + "resourceIds":{ + "shape":"StringFilterList", + "documentation":"

        The resource IDs to include in the aggregation results.

        " + }, + "sortBy":{ + "shape":"CodeRepositorySortBy", + "documentation":"

        The value to sort results by in the code repository aggregation.

        " + }, + "sortOrder":{ + "shape":"SortOrder", + "documentation":"

        The order to sort results by (ascending or descending) in the code repository aggregation.

        " + } + }, + "documentation":"

        The details that define an aggregation based on code repositories.

        " + }, + "CodeRepositoryAggregationResponse":{ + "type":"structure", + "required":["projectNames"], + "members":{ + "accountId":{ + "shape":"String", + "documentation":"

        The Amazon Web Services account ID associated with the code repository.

        " + }, + "exploitAvailableActiveFindingsCount":{ + "shape":"Long", + "documentation":"

        The number of active findings that have an exploit available for the code repository.

        " + }, + "fixAvailableActiveFindingsCount":{ + "shape":"Long", + "documentation":"

        The number of active findings that have a fix available for the code repository.

        " + }, + "projectNames":{ + "shape":"String", + "documentation":"

        The names of the projects associated with the code repository.

        " + }, + "providerType":{ + "shape":"String", + "documentation":"

        The type of repository provider for the code repository.

        " + }, + "resourceId":{ + "shape":"String", + "documentation":"

        The resource ID of the code repository.

        " + }, + "severityCounts":{"shape":"SeverityCounts"} + }, + "documentation":"

        A response that contains the results of a finding aggregation by code repository.

        " + }, + "CodeRepositoryDetails":{ + "type":"structure", + "members":{ + "integrationArn":{ + "shape":"CodeRepositoryIntegrationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the code security integration associated with the repository.

        " + }, + "projectName":{ + "shape":"CodeRepositoryProjectName", + "documentation":"

        The name of the project in the code repository.

        " + }, + "providerType":{ + "shape":"CodeRepositoryProviderType", + "documentation":"

        The type of repository provider (such as GitHub, GitLab, etc.).

        " + } + }, + "documentation":"

        Contains details about a code repository associated with a finding.

        " + }, + "CodeRepositoryIntegrationArn":{ + "type":"string", + "pattern":"^arn:(aws[a-zA-Z-]*)?:inspector2:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:codesecurity-integration\\/[a-f0-9-]{36}$" + }, + "CodeRepositoryMetadata":{ + "type":"structure", + "required":[ + "projectName", + "providerType", + "providerTypeVisibility" + ], + "members":{ + "integrationArn":{ + "shape":"CodeRepositoryIntegrationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the code security integration associated with the repository.

        " + }, + "lastScannedCommitId":{ + "shape":"CommitId", + "documentation":"

        The ID of the last commit that was scanned in the repository.

        " + }, + "onDemandScan":{ + "shape":"CodeRepositoryOnDemandScan", + "documentation":"

        Information about on-demand scans performed on the repository.

        " + }, + "projectName":{ + "shape":"CodeRepositoryMetadataProjectNameString", + "documentation":"

        The name of the project in the code repository.

        " + }, + "providerType":{ + "shape":"CodeRepositoryMetadataProviderTypeString", + "documentation":"

        The type of repository provider (such as GitHub, GitLab, etc.).

        " + }, + "providerTypeVisibility":{ + "shape":"CodeRepositoryMetadataProviderTypeVisibilityString", + "documentation":"

        The visibility setting of the repository (public or private).

        " + }, + "scanConfiguration":{ + "shape":"ProjectCodeSecurityScanConfiguration", + "documentation":"

        The scan configuration settings applied to the code repository.

        " + } + }, + "documentation":"

        Contains metadata information about a code repository that is being scanned by Amazon Inspector.

        " + }, + "CodeRepositoryMetadataProjectNameString":{ + "type":"string", + "max":300, + "min":1 + }, + "CodeRepositoryMetadataProviderTypeString":{ + "type":"string", + "max":300, + "min":1 + }, + "CodeRepositoryMetadataProviderTypeVisibilityString":{ + "type":"string", + "max":300, + "min":1 + }, + "CodeRepositoryOnDemandScan":{ + "type":"structure", + "members":{ + "lastScanAt":{ + "shape":"DateTimeTimestamp", + "documentation":"

        The timestamp when the last on-demand scan was performed.

        " + }, + "lastScannedCommitId":{ + "shape":"CommitId", + "documentation":"

        The ID of the last commit that was scanned during an on-demand scan.

        " + }, + "scanStatus":{"shape":"ScanStatus"} + }, + "documentation":"

        Contains information about on-demand scans performed on a code repository.

        " + }, + "CodeRepositoryProjectName":{ + "type":"string", + "max":512, + "min":1 + }, + "CodeRepositoryProviderType":{ + "type":"string", + "enum":[ + "GITHUB", + "GITLAB_SELF_MANAGED" + ] + }, + "CodeRepositorySortBy":{ + "type":"string", + "enum":[ + "CRITICAL", + "HIGH", + "ALL" + ] + }, + "CodeScanStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "SUCCESSFUL", + "FAILED", + "SKIPPED" + ] + }, + "CodeSecurityClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[\\S]+$" + }, + "CodeSecurityIntegrationArn":{ + "type":"string", + "documentation":"

        arn:aws:inspector2:::codesecurity-integration/

        ", + "pattern":"^arn:(aws[a-zA-Z-]*)?:inspector2:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:codesecurity-integration/[a-f0-9-]{36}$" + }, + "CodeSecurityIntegrationSummary":{ + "type":"structure", + "required":[ + "createdOn", + "integrationArn", + "lastUpdateOn", + "name", + "status", + "statusReason", + "type" + ], + "members":{ + "createdOn":{ + "shape":"Timestamp", + "documentation":"

        The timestamp when the code security integration was created.

        " + }, + "integrationArn":{ + "shape":"CodeSecurityIntegrationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the code security integration.

        " + }, + "lastUpdateOn":{ + "shape":"Timestamp", + "documentation":"

        The timestamp when the code security integration was last updated.

        " + }, + "name":{ + "shape":"IntegrationName", + "documentation":"

        The name of the code security integration.

        " + }, + "status":{ + "shape":"IntegrationStatus", + "documentation":"

        The current status of the code security integration.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        The reason for the current status of the code security integration.

        " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

        The tags associated with the code security integration.

        " + }, + "type":{ + "shape":"IntegrationType", + "documentation":"

        The type of repository provider for the integration.

        " + } + }, + "documentation":"

        A summary of information about a code security integration.

        " + }, + "CodeSecurityResource":{ + "type":"structure", + "members":{ + "projectId":{ + "shape":"ProjectId", + "documentation":"

        The unique identifier of the project in the code repository.

        " + } + }, + "documentation":"

        Identifies a specific resource in a code repository that will be scanned.

        ", + "union":true + }, + "CodeSecurityScanConfiguration":{ + "type":"structure", + "required":["ruleSetCategories"], + "members":{ + "continuousIntegrationScanConfiguration":{ + "shape":"ContinuousIntegrationScanConfiguration", + "documentation":"

        Configuration settings for continuous integration scans that run automatically when code changes are made.

        " + }, + "periodicScanConfiguration":{ + "shape":"PeriodicScanConfiguration", + "documentation":"

        Configuration settings for periodic scans that run on a scheduled basis.

        " + }, + "ruleSetCategories":{ + "shape":"RuleSetCategories", + "documentation":"

        The categories of security rules to be applied during the scan.

        " + } + }, + "documentation":"

        Contains the configuration settings for code security scans.

        " + }, + "CodeSecurityScanConfigurationAssociationSummaries":{ + "type":"list", + "member":{"shape":"CodeSecurityScanConfigurationAssociationSummary"} + }, + "CodeSecurityScanConfigurationAssociationSummary":{ + "type":"structure", + "members":{ + "resource":{"shape":"CodeSecurityResource"} + }, + "documentation":"

        A summary of an association between a code repository and a scan configuration.

        " + }, + "CodeSecurityScanConfigurationSummaries":{ + "type":"list", + "member":{"shape":"CodeSecurityScanConfigurationSummary"} + }, + "CodeSecurityScanConfigurationSummary":{ + "type":"structure", + "required":[ + "name", + "ownerAccountId", + "ruleSetCategories", + "scanConfigurationArn" + ], + "members":{ + "continuousIntegrationScanSupportedEvents":{ + "shape":"ContinuousIntegrationScanSupportedEvents", + "documentation":"

        The repository events that trigger continuous integration scans.

        " + }, + "frequencyExpression":{ + "shape":"FrequencyExpression", + "documentation":"

        The schedule expression for periodic scans, in cron format.

        " + }, + "name":{ + "shape":"ScanConfigurationName", + "documentation":"

        The name of the scan configuration.

        " + }, + "ownerAccountId":{ + "shape":"OwnerId", + "documentation":"

        The Amazon Web Services account ID that owns the scan configuration.

        " + }, + "periodicScanFrequency":{ + "shape":"PeriodicScanFrequency", + "documentation":"

        The frequency at which periodic scans are performed.

        " + }, + "ruleSetCategories":{ + "shape":"RuleSetCategories", + "documentation":"

        The categories of security rules applied during the scan.

        " + }, + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the scan configuration.

        " + }, + "scopeSettings":{ + "shape":"ScopeSettings", + "documentation":"

        The scope settings that define which repositories will be scanned. If the ScopeSetting parameter is ALL the scan configuration applies to all existing and future projects imported into Amazon Inspector.

        " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

        The tags associated with the scan configuration.

        " + } + }, + "documentation":"

        A summary of information about a code security scan configuration.

        " + }, + "CodeSecurityUuid":{ + "type":"string", + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, "CodeSnippetError":{ "type":"structure", "required":[ @@ -2916,6 +3833,12 @@ }, "documentation":"

        Contains information on the code vulnerability identified in your Lambda function.

        " }, + "CommitId":{ + "type":"string", + "max":40, + "min":0, + "pattern":"^([a-f0-9]{40})$" + }, "Component":{"type":"string"}, "ComponentArn":{"type":"string"}, "ComponentType":{"type":"string"}, @@ -2937,6 +3860,13 @@ }, "documentation":"

        A compute platform.

        " }, + "ConfigurationLevel":{ + "type":"string", + "enum":[ + "ORGANIZATION", + "ACCOUNT" + ] + }, "ConflictException":{ "type":"structure", "required":[ @@ -2955,13 +3885,37 @@ "documentation":"

        The type of the conflicting resource.

        " } }, - "documentation":"

        A conflict occurred.

        ", + "documentation":"

        A conflict occurred. This exception occurs when the same resource is being modified by concurrent requests.

        ", "error":{ "httpStatusCode":409, "senderFault":true }, "exception":true }, + "ContinuousIntegrationScanConfiguration":{ + "type":"structure", + "required":["supportedEvents"], + "members":{ + "supportedEvents":{ + "shape":"ContinuousIntegrationScanSupportedEvents", + "documentation":"

        The repository events that trigger continuous integration scans, such as pull requests or commits.

        " + } + }, + "documentation":"

        Configuration settings for continuous integration scans that run automatically when code changes are made.

        " + }, + "ContinuousIntegrationScanEvent":{ + "type":"string", + "enum":[ + "PULL_REQUEST", + "PUSH" + ] + }, + "ContinuousIntegrationScanSupportedEvents":{ + "type":"list", + "member":{"shape":"ContinuousIntegrationScanEvent"}, + "max":2, + "min":1 + }, "Counts":{ "type":"structure", "members":{ @@ -3009,10 +3963,30 @@ "shape":"CoverageStringFilterList", "documentation":"

        An array of Amazon Web Services account IDs to return coverage statistics for.

        " }, + "codeRepositoryProjectName":{ + "shape":"CoverageStringFilterList", + "documentation":"

        Filter criteria for code repositories based on project name.

        " + }, + "codeRepositoryProviderType":{ + "shape":"CoverageStringFilterList", + "documentation":"

        Filter criteria for code repositories based on provider type (such as GitHub, GitLab, etc.).

        " + }, + "codeRepositoryProviderTypeVisibility":{ + "shape":"CoverageStringFilterList", + "documentation":"

        Filter criteria for code repositories based on visibility setting (public or private).

        " + }, "ec2InstanceTags":{ "shape":"CoverageMapFilterList", "documentation":"

        The Amazon EC2 instance tags to filter on.

        " }, + "ecrImageInUseCount":{ + "shape":"CoverageNumberFilterList", + "documentation":"

        The number of Amazon ECR images in use.

        " + }, + "ecrImageLastInUseAt":{ + "shape":"CoverageDateFilterList", + "documentation":"

        The Amazon ECR image that was last in use.

        " + }, "ecrImageTags":{ "shape":"CoverageStringFilterList", "documentation":"

        The Amazon ECR image tags to filter on.

        " @@ -3041,6 +4015,10 @@ "shape":"CoverageDateFilterList", "documentation":"

        Filters Amazon Web Services resources based on whether Amazon Inspector has checked them for vulnerabilities within the specified time range.

        " }, + "lastScannedCommitId":{ + "shape":"CoverageStringFilterList", + "documentation":"

        Filter criteria for code repositories based on the ID of the last scanned commit.

        " + }, "resourceId":{ "shape":"CoverageStringFilterList", "documentation":"

        An array of Amazon Web Services resource IDs to return coverage statistics for.

        " @@ -3092,11 +4070,31 @@ "documentation":"

        The tag value associated with the coverage map filter.

        " } }, - "documentation":"

        Contains details of a coverage map filter.

        " + "documentation":"

        Contains details of a coverage map filter.

        " + }, + "CoverageMapFilterList":{ + "type":"list", + "member":{"shape":"CoverageMapFilter"}, + "max":10, + "min":1 + }, + "CoverageNumberFilter":{ + "type":"structure", + "members":{ + "lowerInclusive":{ + "shape":"Long", + "documentation":"

        The lower inclusive for the coverage number.

        " + }, + "upperInclusive":{ + "shape":"Long", + "documentation":"

        The upper inclusive for the coverage number.>

        " + } + }, + "documentation":"

        The coverage number to be used in the filter.

        " }, - "CoverageMapFilterList":{ + "CoverageNumberFilterList":{ "type":"list", - "member":{"shape":"CoverageMapFilter"}, + "member":{"shape":"CoverageNumberFilter"}, "max":10, "min":1 }, @@ -3106,7 +4104,8 @@ "AWS_EC2_INSTANCE", "AWS_ECR_CONTAINER_IMAGE", "AWS_ECR_REPOSITORY", - "AWS_LAMBDA_FUNCTION" + "AWS_LAMBDA_FUNCTION", + "CODE_REPOSITORY" ] }, "CoverageStringComparison":{ @@ -3251,6 +4250,92 @@ }, "documentation":"

        Creates CIS targets.

        " }, + "CreateCodeSecurityIntegrationRequest":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "details":{ + "shape":"CreateIntegrationDetail", + "documentation":"

        The integration details specific to the repository provider type.

        " + }, + "name":{ + "shape":"IntegrationName", + "documentation":"

        The name of the code security integration.

        " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

        The tags to apply to the code security integration.

        " + }, + "type":{ + "shape":"IntegrationType", + "documentation":"

        The type of repository provider for the integration.

        " + } + } + }, + "CreateCodeSecurityIntegrationResponse":{ + "type":"structure", + "required":[ + "integrationArn", + "status" + ], + "members":{ + "authorizationUrl":{ + "shape":"AuthorizationUrl", + "documentation":"

        The URL used to authorize the integration with the repository provider.

        " + }, + "integrationArn":{ + "shape":"CodeSecurityIntegrationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the created code security integration.

        " + }, + "status":{ + "shape":"IntegrationStatus", + "documentation":"

        The current status of the code security integration.

        " + } + } + }, + "CreateCodeSecurityScanConfigurationRequest":{ + "type":"structure", + "required":[ + "configuration", + "level", + "name" + ], + "members":{ + "configuration":{ + "shape":"CodeSecurityScanConfiguration", + "documentation":"

        The configuration settings for the code security scan.

        " + }, + "level":{ + "shape":"ConfigurationLevel", + "documentation":"

        The security level for the scan configuration.

        " + }, + "name":{ + "shape":"ScanConfigurationName", + "documentation":"

        The name of the scan configuration.

        " + }, + "scopeSettings":{ + "shape":"ScopeSettings", + "documentation":"

        The scope settings that define which repositories will be scanned. Include this parameter to create a default scan configuration. Otherwise Amazon Inspector creates a general scan configuration.

        A default scan configuration automatically applies to all existing and future projects imported into Amazon Inspector. Use the BatchAssociateCodeSecurityScanConfiguration operation to associate a general scan configuration with projects.

        " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

        The tags to apply to the scan configuration.

        " + } + } + }, + "CreateCodeSecurityScanConfigurationResponse":{ + "type":"structure", + "required":["scanConfigurationArn"], + "members":{ + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the created scan configuration.

        " + } + } + }, "CreateFilterRequest":{ "type":"structure", "required":[ @@ -3325,6 +4410,35 @@ } } }, + "CreateGitLabSelfManagedIntegrationDetail":{ + "type":"structure", + "required":[ + "accessToken", + "instanceUrl" + ], + "members":{ + "accessToken":{ + "shape":"GitLabAccessToken", + "documentation":"

        The personal access token used to authenticate with the self-managed GitLab instance.

        " + }, + "instanceUrl":{ + "shape":"InstanceUrl", + "documentation":"

        The URL of the self-managed GitLab instance.

        " + } + }, + "documentation":"

        Contains details required to create an integration with a self-managed GitLab instance.

        " + }, + "CreateIntegrationDetail":{ + "type":"structure", + "members":{ + "gitlabSelfManaged":{ + "shape":"CreateGitLabSelfManagedIntegrationDetail", + "documentation":"

        Details specific to creating an integration with a self-managed GitLab instance.

        " + } + }, + "documentation":"

        Contains details required to create a code security integration with a specific repository provider.

        ", + "union":true + }, "CreateSbomExportRequest":{ "type":"structure", "required":[ @@ -3613,6 +4727,44 @@ } } }, + "DeleteCodeSecurityIntegrationRequest":{ + "type":"structure", + "required":["integrationArn"], + "members":{ + "integrationArn":{ + "shape":"CodeSecurityIntegrationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the code security integration to delete.

        " + } + } + }, + "DeleteCodeSecurityIntegrationResponse":{ + "type":"structure", + "members":{ + "integrationArn":{ + "shape":"CodeSecurityIntegrationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the deleted code security integration.

        " + } + } + }, + "DeleteCodeSecurityScanConfigurationRequest":{ + "type":"structure", + "required":["scanConfigurationArn"], + "members":{ + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the scan configuration to delete.

        " + } + } + }, + "DeleteCodeSecurityScanConfigurationResponse":{ + "type":"structure", + "members":{ + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the deleted scan configuration.

        " + } + } + }, "DeleteFilterRequest":{ "type":"structure", "required":["arn"], @@ -3738,6 +4890,27 @@ } } }, + "DisassociateConfigurationRequest":{ + "type":"structure", + "required":[ + "resource", + "scanConfigurationArn" + ], + "members":{ + "resource":{"shape":"CodeSecurityResource"}, + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the scan configuration to disassociate from a code repository.

        " + } + }, + "documentation":"

        Contains details about a request to disassociate a code repository from a scan configuration.

        " + }, + "DisassociateConfigurationRequestList":{ + "type":"list", + "member":{"shape":"DisassociateConfigurationRequest"}, + "max":25, + "min":1 + }, "DisassociateMemberRequest":{ "type":"structure", "required":["accountId"], @@ -3929,6 +5102,10 @@ "shape":"EcrPullDateRescanDuration", "documentation":"

        The rescan duration configured for image pull date.

        " }, + "pullDateRescanMode":{ + "shape":"EcrPullDateRescanMode", + "documentation":"

        The pull date for the re-scan mode.

        " + }, "rescanDuration":{ "shape":"EcrRescanDuration", "documentation":"

        The rescan duration configured for image push date.

        " @@ -3953,6 +5130,14 @@ "shape":"DateTimeTimestamp", "documentation":"

        The date an image was last pulled at.

        " }, + "inUseCount":{ + "shape":"Long", + "documentation":"

        The number of Amazon ECS tasks or Amazon EKS pods where the Amazon ECR container image is in use.

        " + }, + "lastInUseAt":{ + "shape":"DateTimeTimestamp", + "documentation":"

        The last time an Amazon ECR image was used in an Amazon ECS task or Amazon EKS pod.

        " + }, "tags":{ "shape":"TagList", "documentation":"

        Tags associated with the Amazon ECR image metadata.

        " @@ -3970,6 +5155,13 @@ "DAYS_180" ] }, + "EcrPullDateRescanMode":{ + "type":"string", + "enum":[ + "LAST_PULL_DATE", + "LAST_IN_USE_AT" + ] + }, "EcrRepositoryMetadata":{ "type":"structure", "members":{ @@ -4002,9 +5194,13 @@ "shape":"EcrPullDateRescanDuration", "documentation":"

        The rescan duration configured for image pull date.

        " }, + "pullDateRescanMode":{ + "shape":"EcrPullDateRescanMode", + "documentation":"

        The pull date for the re-scan mode.

        " + }, "rescanDuration":{ "shape":"EcrRescanDuration", - "documentation":"

        The rescan duration configured for image push date.

         </p> 
        " + "documentation":"

        The rescan duration configured for image push date.

        " }, "status":{ "shape":"EcrRescanDurationStatus", @@ -4260,6 +5456,29 @@ "max":100, "min":0 }, + "FailedAssociationResult":{ + "type":"structure", + "members":{ + "resource":{"shape":"CodeSecurityResource"}, + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the scan configuration that failed to be associated or disassociated.

        " + }, + "statusCode":{ + "shape":"AssociationResultStatusCode", + "documentation":"

        The status code indicating why the association or disassociation failed.

        " + }, + "statusMessage":{ + "shape":"AssociationResultStatusMessage", + "documentation":"

        A message explaining why the association or disassociation failed.

        " + } + }, + "documentation":"

        Details about a failed attempt to associate or disassociate a code repository with a scan configuration.

        " + }, + "FailedAssociationResultList":{ + "type":"list", + "member":{"shape":"FailedAssociationResult"} + }, "FailedMemberAccountEc2DeepInspectionStatusState":{ "type":"structure", "required":["accountId"], @@ -4368,6 +5587,14 @@ "shape":"StringFilterList", "documentation":"

        Details of the Amazon Web Services account IDs used to filter findings.

        " }, + "codeRepositoryProjectName":{ + "shape":"StringFilterList", + "documentation":"

        Filter criteria for findings based on the project name in a code repository.

        " + }, + "codeRepositoryProviderType":{ + "shape":"StringFilterList", + "documentation":"

        Filter criteria for findings based on the repository provider type (such as GitHub, GitLab, etc.).

        " + }, "codeVulnerabilityDetectorName":{ "shape":"StringFilterList", "documentation":"

        The name of the detector used to identify a code vulnerability in a Lambda function used to filter findings.

        " @@ -4408,6 +5635,14 @@ "shape":"StringFilterList", "documentation":"

        Details of the Amazon ECR image hashes used to filter findings.

        " }, + "ecrImageInUseCount":{ + "shape":"NumberFilterList", + "documentation":"

        Filter criteria indicating when details for an Amazon ECR image include when an Amazon ECR image is in use.

        " + }, + "ecrImageLastInUseAt":{ + "shape":"DateFilterList", + "documentation":"

        Filter criteria indicating when an Amazon ECR image was last used in an Amazon ECS cluster task or Amazon EKS cluster pod.

        " + }, "ecrImagePushedAt":{ "shape":"DateFilterList", "documentation":"

        Details on the Amazon ECR image push date and time used to filter findings.

        " @@ -4949,9 +6184,15 @@ "EC2", "ECR", "LAMBDA", - "LAMBDA_CODE" + "LAMBDA_CODE", + "CODE_REPOSITORY" ] }, + "FrequencyExpression":{ + "type":"string", + "max":256, + "min":1 + }, "FunctionName":{ "type":"string", "pattern":"^[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$" @@ -5042,9 +6283,217 @@ "shape":"NextToken", "documentation":"

        The pagination token from a previous request that's used to retrieve the next page of results.

        " }, - "scanResultDetails":{ - "shape":"CisScanResultDetailsList", - "documentation":"

        The scan result details.

        " + "scanResultDetails":{ + "shape":"CisScanResultDetailsList", + "documentation":"

        The scan result details.

        " + } + } + }, + "GetClustersForImageNextToken":{ + "type":"string", + "max":3000, + "min":1 + }, + "GetClustersForImageRequest":{ + "type":"structure", + "required":["filter"], + "members":{ + "filter":{ + "shape":"ClusterForImageFilterCriteria", + "documentation":"

        The resource Id for the Amazon ECR image.

        " + }, + "maxResults":{ + "shape":"GetClustersForImageRequestMaxResultsInteger", + "documentation":"

        The maximum number of results to be returned in a single page of results.

        " + }, + "nextToken":{ + "shape":"GetClustersForImageNextToken", + "documentation":"

        The pagination token from a previous request used to retrieve the next page of results.

        " + } + } + }, + "GetClustersForImageRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "GetClustersForImageResponse":{ + "type":"structure", + "required":["cluster"], + "members":{ + "cluster":{ + "shape":"ClusterInformationList", + "documentation":"

        A unit of work inside of a cluster, which can include metadata about the cluster.

        " + }, + "nextToken":{ + "shape":"GetClustersForImageNextToken", + "documentation":"

        The pagination token from a previous request used to retrieve the next page of results.

        " + } + } + }, + "GetCodeSecurityIntegrationRequest":{ + "type":"structure", + "required":["integrationArn"], + "members":{ + "integrationArn":{ + "shape":"CodeSecurityIntegrationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the code security integration to retrieve.

        " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

        The tags associated with the code security integration.

        " + } + } + }, + "GetCodeSecurityIntegrationResponse":{ + "type":"structure", + "required":[ + "createdOn", + "integrationArn", + "lastUpdateOn", + "name", + "status", + "statusReason", + "type" + ], + "members":{ + "authorizationUrl":{ + "shape":"AuthorizationUrl", + "documentation":"

        The URL used to authorize the integration with the repository provider. This is only returned if reauthorization is required to fix a connection issue. Otherwise, it is null.

        " + }, + "createdOn":{ + "shape":"Timestamp", + "documentation":"

        The timestamp when the code security integration was created.

        " + }, + "integrationArn":{ + "shape":"CodeSecurityIntegrationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the code security integration.

        " + }, + "lastUpdateOn":{ + "shape":"Timestamp", + "documentation":"

        The timestamp when the code security integration was last updated.

        " + }, + "name":{ + "shape":"IntegrationName", + "documentation":"

        The name of the code security integration.

        " + }, + "status":{ + "shape":"IntegrationStatus", + "documentation":"

        The current status of the code security integration.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        The reason for the current status of the code security integration.

        " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

        The tags associated with the code security integration.

        " + }, + "type":{ + "shape":"IntegrationType", + "documentation":"

        The type of repository provider for the integration.

        " + } + } + }, + "GetCodeSecurityScanConfigurationRequest":{ + "type":"structure", + "required":["scanConfigurationArn"], + "members":{ + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the scan configuration to retrieve.

        " + } + } + }, + "GetCodeSecurityScanConfigurationResponse":{ + "type":"structure", + "members":{ + "configuration":{ + "shape":"CodeSecurityScanConfiguration", + "documentation":"

        The configuration settings for the code security scan.

        " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp when the scan configuration was created.

        " + }, + "lastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp when the scan configuration was last updated.

        " + }, + "level":{ + "shape":"ConfigurationLevel", + "documentation":"

        The security level for the scan configuration.

        " + }, + "name":{ + "shape":"ScanConfigurationName", + "documentation":"

        The name of the scan configuration.

        " + }, + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the scan configuration.

        " + }, + "scopeSettings":{ + "shape":"ScopeSettings", + "documentation":"

        The scope settings that define which repositories will be scanned. If the ScopeSetting parameter is ALL the scan configuration applies to all existing and future projects imported into Amazon Inspector.

        " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

        The tags associated with the scan configuration.

        " + } + } + }, + "GetCodeSecurityScanRequest":{ + "type":"structure", + "required":[ + "resource", + "scanId" + ], + "members":{ + "resource":{ + "shape":"CodeSecurityResource", + "documentation":"

        The resource identifier for the code repository that was scanned.

        " + }, + "scanId":{ + "shape":"CodeSecurityUuid", + "documentation":"

        The unique identifier of the scan to retrieve.

        " + } + } + }, + "GetCodeSecurityScanResponse":{ + "type":"structure", + "members":{ + "accountId":{ + "shape":"String", + "documentation":"

        The Amazon Web Services account ID associated with the scan.

        " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp when the scan was created.

        " + }, + "lastCommitId":{ + "shape":"String", + "documentation":"

        The identifier of the last commit that was scanned. This is only returned if the scan was successful or skipped.

        " + }, + "resource":{ + "shape":"CodeSecurityResource", + "documentation":"

        The resource identifier for the code repository that was scanned.

        " + }, + "scanId":{ + "shape":"CodeSecurityUuid", + "documentation":"

        The unique identifier of the scan.

        " + }, + "status":{ + "shape":"CodeScanStatus", + "documentation":"

        The current status of the scan.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        The reason for the current status of the scan.

        " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp when the scan was last updated.

        " } } }, @@ -5237,6 +6686,27 @@ } } }, + "GitHubAuthCode":{ + "type":"string", + "max":1024, + "min":1, + "sensitive":true + }, + "GitHubInstallationId":{ + "type":"string", + "max":1024, + "min":1 + }, + "GitLabAccessToken":{ + "type":"string", + "sensitive":true + }, + "GitLabAuthCode":{ + "type":"string", + "max":1024, + "min":1, + "sensitive":true + }, "GroupKey":{ "type":"string", "enum":[ @@ -5333,10 +6803,44 @@ }, "documentation":"

        Information about the Amazon Inspector score given to a finding.

        " }, + "InstanceUrl":{ + "type":"string", + "pattern":"^https://[-a-zA-Z0-9()@:%_+.~#?&//=]{1,1024}$", + "sensitive":true + }, "Integer":{ "type":"integer", "box":true }, + "IntegrationName":{ + "type":"string", + "max":60, + "min":1, + "pattern":"^[a-zA-Z0-9-_$:.]*$" + }, + "IntegrationStatus":{ + "type":"string", + "enum":[ + "PENDING", + "IN_PROGRESS", + "ACTIVE", + "INACTIVE", + "DISABLING" + ] + }, + "IntegrationSummaries":{ + "type":"list", + "member":{"shape":"CodeSecurityIntegrationSummary"}, + "max":100, + "min":0 + }, + "IntegrationType":{ + "type":"string", + "enum":[ + "GITLAB_SELF_MANAGED", + "GITHUB" + ] + }, "InternalServerException":{ "type":"structure", "required":["message"], @@ -5869,6 +7373,119 @@ "FAILED_CHECKS" ] }, + "ListCodeSecurityIntegrationsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListCodeSecurityIntegrationsRequestMaxResultsInteger", + "documentation":"

        The maximum number of results to return in a single call.

        ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "documentation":"

        A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

        ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListCodeSecurityIntegrationsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListCodeSecurityIntegrationsResponse":{ + "type":"structure", + "members":{ + "integrations":{ + "shape":"IntegrationSummaries", + "documentation":"

        A list of code security integration summaries.

        " + }, + "nextToken":{ + "shape":"String", + "documentation":"

        A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

        " + } + } + }, + "ListCodeSecurityScanConfigurationAssociationsRequest":{ + "type":"structure", + "required":["scanConfigurationArn"], + "members":{ + "maxResults":{ + "shape":"ListCodeSecurityScanConfigurationAssociationsRequestMaxResultsInteger", + "documentation":"

        The maximum number of results to return in the response. If your request would return more than the maximum the response will return a nextToken value, use this value when you call the action again to get the remaining results.

        ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

        A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

        ", + "location":"querystring", + "locationName":"nextToken" + }, + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the scan configuration to list associations for.

        " + } + } + }, + "ListCodeSecurityScanConfigurationAssociationsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListCodeSecurityScanConfigurationAssociationsResponse":{ + "type":"structure", + "members":{ + "associations":{ + "shape":"CodeSecurityScanConfigurationAssociationSummaries", + "documentation":"

        A list of associations between code repositories and scan configurations.

        " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

        A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

        " + } + } + }, + "ListCodeSecurityScanConfigurationsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListCodeSecurityScanConfigurationsRequestMaxResultsInteger", + "documentation":"

        The maximum number of results to return in a single call.

        ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

        A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

        ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListCodeSecurityScanConfigurationsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListCodeSecurityScanConfigurationsResponse":{ + "type":"structure", + "members":{ + "configurations":{ + "shape":"CodeSecurityScanConfigurationSummaries", + "documentation":"

        A list of code security scan configuration summaries.

        " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

        A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

        " + } + } + }, "ListCoverageMaxResults":{ "type":"integer", "box":true, @@ -6651,6 +8268,28 @@ "max":5, "min":0 }, + "PeriodicScanConfiguration":{ + "type":"structure", + "members":{ + "frequency":{ + "shape":"PeriodicScanFrequency", + "documentation":"

        The frequency at which periodic scans are performed (such as weekly or monthly).

        If you don't provide the frequencyExpression Amazon Inspector chooses day for the scan to run. If you provide the frequencyExpression, the schedule must match the specified frequency.

        " + }, + "frequencyExpression":{ + "shape":"FrequencyExpression", + "documentation":"

        The schedule expression for periodic scans, in cron format.

        " + } + }, + "documentation":"

        Configuration settings for periodic scans that run on a scheduled basis.

        " + }, + "PeriodicScanFrequency":{ + "type":"string", + "enum":[ + "WEEKLY", + "MONTHLY", + "NEVER" + ] + }, "Permission":{ "type":"structure", "required":[ @@ -6740,6 +8379,64 @@ "max":32, "min":0 }, + "ProjectCodeSecurityScanConfiguration":{ + "type":"structure", + "members":{ + "continuousIntegrationScanConfigurations":{ + "shape":"ProjectContinuousIntegrationScanConfigurationList", + "documentation":"

        The continuous integration scan configurations applied to the project.

        " + }, + "periodicScanConfigurations":{ + "shape":"ProjectPeriodicScanConfigurationList", + "documentation":"

        The periodic scan configurations applied to the project.

        " + } + }, + "documentation":"

        Contains the scan configuration settings applied to a specific project in a code repository.

        " + }, + "ProjectContinuousIntegrationScanConfiguration":{ + "type":"structure", + "members":{ + "ruleSetCategories":{ + "shape":"RuleSetCategories", + "documentation":"

        The categories of security rules applied during continuous integration scans for the project.

        " + }, + "supportedEvent":{ + "shape":"ContinuousIntegrationScanEvent", + "documentation":"

        The repository event that triggers continuous integration scans for the project.

        " + } + }, + "documentation":"

        Contains the continuous integration scan configuration settings applied to a specific project.

        " + }, + "ProjectContinuousIntegrationScanConfigurationList":{ + "type":"list", + "member":{"shape":"ProjectContinuousIntegrationScanConfiguration"} + }, + "ProjectId":{ + "type":"string", + "pattern":"^project-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "ProjectPeriodicScanConfiguration":{ + "type":"structure", + "members":{ + "frequencyExpression":{ + "shape":"FrequencyExpression", + "documentation":"

        The schedule expression for periodic scans, in cron format, applied to the project.

        " + }, + "ruleSetCategories":{ + "shape":"RuleSetCategories", + "documentation":"

        The categories of security rules applied during periodic scans for the project.

        " + } + }, + "documentation":"

        Contains the periodic scan configuration settings applied to a specific project.

        " + }, + "ProjectPeriodicScanConfigurationList":{ + "type":"list", + "member":{"shape":"ProjectPeriodicScanConfiguration"} + }, + "ProjectSelectionScope":{ + "type":"string", + "enum":["ALL"] + }, "Reason":{ "type":"string", "max":1024, @@ -6950,6 +8647,10 @@ "awsLambdaFunction":{ "shape":"AwsLambdaFunctionDetails", "documentation":"

        A summary of the information about an Amazon Web Services Lambda function affected by a finding.

        " + }, + "codeRepository":{ + "shape":"CodeRepositoryDetails", + "documentation":"

        Contains details about a code repository resource associated with a finding.

        " } }, "documentation":"

        Contains details about the resource involved in the finding.

        " @@ -6996,7 +8697,7 @@ "type":"string", "max":341, "min":10, - "pattern":"(^arn:.*:ecr:.*:\\d{12}:repository\\/(?:[a-z0-9]+(?:[._-][a-z0-9]+)*\\/)*[a-z0-9]+(?:[._-][a-z0-9]+)*(\\/sha256:[a-z0-9]{64})?$)|(^i-([a-z0-9]{8}|[a-z0-9]{17}|\\\\*)$|(^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$))" + "pattern":"(^arn:.*:ecr:.*:\\d{12}:repository\\/(?:[a-z0-9]+(?:[._-][a-z0-9]+)*\\/)*[a-z0-9]+(?:[._-][a-z0-9]+)*(\\/sha256:[a-z0-9]{64})?$)|(^i-([a-z0-9]{8}|[a-z0-9]{17}|\\\\*)$|(^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$)|(^arn:(aws[a-zA-Z-]*)?:inspector2:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:codesecurity-integration\\/[a-f0-9-]{36}\\/project-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$))" }, "ResourceIdFilterList":{ "type":"list", @@ -7058,6 +8759,10 @@ "ResourceScanMetadata":{ "type":"structure", "members":{ + "codeRepository":{ + "shape":"CodeRepositoryMetadata", + "documentation":"

        Contains metadata about scan coverage for a code repository resource.

        " + }, "ec2":{ "shape":"Ec2Metadata", "documentation":"

        An object that contains metadata details for an Amazon EC2 instance.

        " @@ -7083,7 +8788,8 @@ "EC2", "ECR", "LAMBDA", - "LAMBDA_CODE" + "LAMBDA_CODE", + "CODE_REPOSITORY" ] }, "ResourceState":{ @@ -7093,6 +8799,7 @@ "ecr" ], "members":{ + "codeRepository":{"shape":"State"}, "ec2":{ "shape":"State", "documentation":"

        An object detailing the state of Amazon Inspector scanning for Amazon EC2 resources.

        " @@ -7119,6 +8826,10 @@ "ecr" ], "members":{ + "codeRepository":{ + "shape":"Status", + "documentation":"

        The status of Amazon Inspector scanning for code repositories.

        " + }, "ec2":{ "shape":"Status", "documentation":"

        The status of Amazon Inspector scanning for Amazon EC2 resources.

        " @@ -7186,7 +8897,8 @@ "AWS_EC2_INSTANCE", "AWS_ECR_CONTAINER_IMAGE", "AWS_ECR_REPOSITORY", - "AWS_LAMBDA_FUNCTION" + "AWS_LAMBDA_FUNCTION", + "CODE_REPOSITORY" ] }, "RiskScore":{ @@ -7198,6 +8910,20 @@ "max":500, "min":1 }, + "RuleSetCategories":{ + "type":"list", + "member":{"shape":"RuleSetCategory"}, + "max":3, + "min":1 + }, + "RuleSetCategory":{ + "type":"string", + "enum":[ + "SAST", + "IAC", + "SCA" + ] + }, "Runtime":{ "type":"string", "enum":[ @@ -7231,6 +8957,17 @@ "SPDX_2_3" ] }, + "ScanConfigurationArn":{ + "type":"string", + "documentation":"

        arn:aws:inspector2:::owner//codesecurity-configuration/

        ", + "pattern":"^arn:(aws[a-zA-Z-]*)?:inspector2:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:owner/(\\d{12}|o-[a-z0-9]{10,32})/codesecurity-configuration/[a-f0-9-]{36}$" + }, + "ScanConfigurationName":{ + "type":"string", + "max":60, + "min":1, + "pattern":"^[a-zA-Z0-9-_$:.]*$" + }, "ScanMode":{ "type":"string", "enum":[ @@ -7247,7 +8984,7 @@ "members":{ "reason":{ "shape":"ScanStatusReason", - "documentation":"

        The scan status. Possible return values and descriptions are:

        PENDING_INITIAL_SCAN - This resource has been identified for scanning, results will be available soon.

        ACCESS_DENIED - Resource access policy restricting Amazon Inspector access. Please update the IAM policy.

        INTERNAL_ERROR - Amazon Inspector has encountered an internal error for this resource. Amazon Inspector service will automatically resolve the issue and resume the scanning. No action required from the user.

        UNMANAGED_EC2_INSTANCE - The EC2 instance is not managed by SSM, please use the following SSM automation to remediate the issue: https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-awssupport-troubleshoot-managed-instance.html. Once the instance becomes managed by SSM, Inspector will automatically begin scanning this instance.

        UNSUPPORTED_OS - Amazon Inspector does not support this OS, architecture, or image manifest type at this time. To see a complete list of supported operating systems see: https://docs.aws.amazon.com/inspector/latest/user/supported.html.

        SCAN_ELIGIBILITY_EXPIRED - The configured scan duration has lapsed for this image.

        RESOURCE_TERMINATED - This resource has been terminated. The findings and coverage associated with this resource are in the process of being cleaned up.

        SUCCESSFUL - The scan was successful.

        NO_RESOURCES_FOUND - Reserved for future use.

        IMAGE_SIZE_EXCEEDED - Reserved for future use.

        SCAN_FREQUENCY_MANUAL - This image will not be covered by Amazon Inspector due to the repository scan frequency configuration.

        SCAN_FREQUENCY_SCAN_ON_PUSH - This image will be scanned one time and will not new findings because of the scan frequency configuration.

        EC2_INSTANCE_STOPPED - This EC2 instance is in a stopped state, therefore, Amazon Inspector will pause scanning. The existing findings will continue to exist until the instance is terminated. Once the instance is re-started, Inspector will automatically start scanning the instance again. Please note that you will not be charged for this instance while it’s in a stopped state.

        PENDING_DISABLE - This resource is pending cleanup during disablement. The customer will not be billed while a resource is in the pending disable status.

        NO INVENTORY - Amazon Inspector couldn’t find software application inventory to scan for vulnerabilities. This might be caused due to required Amazon Inspector associations being deleted or failing to run on your resource. Please verify the status of InspectorInventoryCollection-do-not-delete association in the SSM console for the resource. Additionally, you can verify the instance’s inventory in the SSM Fleet Manager console.

        STALE_INVENTORY - Amazon Inspector wasn’t able to collect an updated software application inventory in the last 7 days. Please confirm the required Amazon Inspector associations still exist and you can still see an updated inventory in the SSM console.

        EXCLUDED_BY_TAG - This resource was not scanned because it has been excluded by a tag.

        UNSUPPORTED_RUNTIME - The function was not scanned because it has an unsupported runtime. To see a complete list of supported runtimes see: https://docs.aws.amazon.com/inspector/latest/user/supported.html.

        UNSUPPORTED_MEDIA_TYPE - The ECR image has an unsupported media type.

        UNSUPPORTED_CONFIG_FILE - Reserved for future use.

        DEEP_INSPECTION_PACKAGE_COLLECTION_LIMIT_EXCEEDED - The instance has exceeded the 5000 package limit for Amazon Inspector Deep inspection. To resume Deep inspection for this instance you can try to adjust the custom paths associated with the account.

        DEEP_INSPECTION_DAILY_SSM_INVENTORY_LIMIT_EXCEEDED - The SSM agent couldn't send inventory to Amazon Inspector because the SSM quota for Inventory data collected per instance per day has already been reached for this instance.

        DEEP_INSPECTION_COLLECTION_TIME_LIMIT_EXCEEDED - Amazon Inspector failed to extract the package inventory because the package collection time exceeding the maximum threshold of 15 minutes.

        DEEP_INSPECTION_NO_INVENTORY The Amazon Inspector plugin hasn't yet been able to collect an inventory of packages for this instance. This is usually the result of a pending scan, however, if this status persists after 6 hours, use SSM to ensure that the required Amazon Inspector associations exist and are running for the instance.

        " + "documentation":"

        The scan status. Possible return values and descriptions are:

        ACCESS_DENIED - Resource access policy restricting Amazon Inspector access. Please update the IAM policy.

        ACCESS_DENIED_TO_ENCRYPTION_KEY - The KMS key policy doesn't allow Amazon Inspector access. Update the key policy.

        DEEP_INSPECTION_COLLECTION_TIME_LIMIT_EXCEEDED - Amazon Inspector failed to extract the package inventory because the package collection time exceeding the maximum threshold of 15 minutes.

        DEEP_INSPECTION_DAILY_SSM_INVENTORY_LIMIT_EXCEEDED - The SSM agent couldn't send inventory to Amazon Inspector because the SSM quota for Inventory data collected per instance per day has already been reached for this instance.

        DEEP_INSPECTION_NO_INVENTORY - The Amazon Inspector plugin hasn't yet been able to collect an inventory of packages for this instance. This is usually the result of a pending scan, however, if this status persists after 6 hours, use SSM to ensure that the required Amazon Inspector associations exist and are running for the instance.

        DEEP_INSPECTION_PACKAGE_COLLECTION_LIMIT_EXCEEDED - The instance has exceeded the 5000 package limit for Amazon Inspector Deep inspection. To resume Deep inspection for this instance you can try to adjust the custom paths associated with the account.

        EC2_INSTANCE_STOPPED - This EC2 instance is in a stopped state, therefore, Amazon Inspector will pause scanning. The existing findings will continue to exist until the instance is terminated. Once the instance is re-started, Inspector will automatically start scanning the instance again. Please note that you will not be charged for this instance while it's in a stopped state.

        EXCLUDED_BY_TAG - This resource was not scanned because it has been excluded by a tag.

        IMAGE_SIZE_EXCEEDED - Reserved for future use.

        INTEGRATION_CONNNECTION_LOST - Amazon Inspector couldn't communicate with the source code management platform.

        INTERNAL_ERROR - Amazon Inspector has encountered an internal error for this resource. Amazon Inspector service will automatically resolve the issue and resume the scanning. No action required from the user.

        NO INVENTORY - Amazon Inspector couldn't find software application inventory to scan for vulnerabilities. This might be caused due to required Amazon Inspector associations being deleted or failing to run on your resource. Please verify the status of InspectorInventoryCollection-do-not-delete association in the SSM console for the resource. Additionally, you can verify the instance's inventory in the SSM Fleet Manager console.

        NO_RESOURCES_FOUND - Reserved for future use.

        NO_SCAN_CONFIGURATION_ASSOCIATED - The code repository resource doesn't have an associated scan configuration.

        PENDING_DISABLE - This resource is pending cleanup during disablement. The customer will not be billed while a resource is in the pending disable status.

        PENDING_INITIAL_SCAN - This resource has been identified for scanning, results will be available soon.

        RESOURCE_TERMINATED - This resource has been terminated. The findings and coverage associated with this resource are in the process of being cleaned up.

        SCAN_ELIGIBILITY_EXPIRED - The configured scan duration has lapsed for this image.

        SCAN_FREQUENCY_MANUAL - This image will not be covered by Amazon Inspector due to the repository scan frequency configuration.

        SCAN_FREQUENCY_SCAN_ON_PUSH - This image will be scanned one time and will not new findings because of the scan frequency configuration.

        SCAN_IN_PROGRESS - The resource is currently being scanned.

        STALE_INVENTORY - Amazon Inspector wasn't able to collect an updated software application inventory in the last 7 days. Please confirm the required Amazon Inspector associations still exist and you can still see an updated inventory in the SSM console.

        SUCCESSFUL - The scan was successful.

        UNMANAGED_EC2_INSTANCE - The EC2 instance is not managed by SSM, please use the following SSM automation to remediate the issue: https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-awssupport-troubleshoot-managed-instance.html. Once the instance becomes managed by SSM, Inspector will automatically begin scanning this instance.

        UNSUPPORTED_CONFIG_FILE - Reserved for future use.

        UNSUPPORTED_LANGUAGE - The scan was unsuccessful because the repository contains files in an unsupported programming language.

        UNSUPPORTED_MEDIA_TYPE - The ECR image has an unsupported media type.

        UNSUPPORTED_OS - Amazon Inspector does not support this OS, architecture, or image manifest type at this time. To see a complete list of supported operating systems see: https://docs.aws.amazon.com/inspector/latest/user/supported.html.

        UNSUPPORTED_RUNTIME - The function was not scanned because it has an unsupported runtime. To see a complete list of supported runtimes see: https://docs.aws.amazon.com/inspector/latest/user/supported.html.

        " }, "statusCode":{ "shape":"ScanStatusCode", @@ -7291,7 +9028,13 @@ "DEEP_INSPECTION_COLLECTION_TIME_LIMIT_EXCEEDED", "DEEP_INSPECTION_NO_INVENTORY", "AGENTLESS_INSTANCE_STORAGE_LIMIT_EXCEEDED", - "AGENTLESS_INSTANCE_COLLECTION_TIME_LIMIT_EXCEEDED" + "AGENTLESS_INSTANCE_COLLECTION_TIME_LIMIT_EXCEEDED", + "PENDING_REVIVAL_SCAN", + "INTEGRATION_CONNECTION_LOST", + "ACCESS_DENIED_TO_ENCRYPTION_KEY", + "UNSUPPORTED_LANGUAGE", + "NO_SCAN_CONFIGURATION_ASSOCIATED", + "SCAN_IN_PROGRESS" ] }, "ScanType":{ @@ -7325,6 +9068,16 @@ "documentation":"

        A schedule.

        ", "union":true }, + "ScopeSettings":{ + "type":"structure", + "members":{ + "projectSelectionScope":{ + "shape":"ProjectSelectionScope", + "documentation":"

        The scope of projects to be selected for scanning within the integrated repositories. Setting the value to ALL applies the scope settings to all existing and future projects imported into Amazon Inspector.

        " + } + }, + "documentation":"

        Defines the scope of repositories to be included in code security scans.

        " + }, "SearchVulnerabilitiesFilterCriteria":{ "type":"structure", "required":["vulnerabilityIds"], @@ -7570,6 +9323,34 @@ "members":{ } }, + "StartCodeSecurityScanRequest":{ + "type":"structure", + "required":["resource"], + "members":{ + "clientToken":{ + "shape":"CodeSecurityClientToken", + "documentation":"

        A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

        ", + "idempotencyToken":true + }, + "resource":{ + "shape":"CodeSecurityResource", + "documentation":"

        The resource identifier for the code repository to scan.

        " + } + } + }, + "StartCodeSecurityScanResponse":{ + "type":"structure", + "members":{ + "scanId":{ + "shape":"CodeSecurityUuid", + "documentation":"

        The unique identifier of the initiated scan.

        " + }, + "status":{ + "shape":"CodeScanStatus", + "documentation":"

        The current status of the initiated scan.

        " + } + } + }, "State":{ "type":"structure", "required":[ @@ -7810,6 +9591,21 @@ "max":16, "min":0 }, + "SuccessfulAssociationResult":{ + "type":"structure", + "members":{ + "resource":{"shape":"CodeSecurityResource"}, + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the scan configuration that was successfully associated or disassociated.

        " + } + }, + "documentation":"

        Details about a successful association or disassociation between a code repository and a scan configuration.

        " + }, + "SuccessfulAssociationResultList":{ + "type":"list", + "member":{"shape":"SuccessfulAssociationResult"} + }, "SuggestedFix":{ "type":"structure", "members":{ @@ -8181,6 +9977,66 @@ }, "documentation":"

        Updates CIS targets.

        " }, + "UpdateCodeSecurityIntegrationRequest":{ + "type":"structure", + "required":[ + "details", + "integrationArn" + ], + "members":{ + "details":{ + "shape":"UpdateIntegrationDetails", + "documentation":"

        The updated integration details specific to the repository provider type.

        " + }, + "integrationArn":{ + "shape":"CodeSecurityIntegrationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the code security integration to update.

        " + } + } + }, + "UpdateCodeSecurityIntegrationResponse":{ + "type":"structure", + "required":[ + "integrationArn", + "status" + ], + "members":{ + "integrationArn":{ + "shape":"CodeSecurityIntegrationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the updated code security integration.

        " + }, + "status":{ + "shape":"IntegrationStatus", + "documentation":"

        The current status of the updated code security integration.

        " + } + } + }, + "UpdateCodeSecurityScanConfigurationRequest":{ + "type":"structure", + "required":[ + "configuration", + "scanConfigurationArn" + ], + "members":{ + "configuration":{ + "shape":"CodeSecurityScanConfiguration", + "documentation":"

        The updated configuration settings for the code security scan.

        " + }, + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the scan configuration to update.

        " + } + } + }, + "UpdateCodeSecurityScanConfigurationResponse":{ + "type":"structure", + "members":{ + "scanConfigurationArn":{ + "shape":"ScanConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the updated scan configuration.

        " + } + } + }, "UpdateConfigurationRequest":{ "type":"structure", "members":{ @@ -8300,6 +10156,50 @@ } } }, + "UpdateGitHubIntegrationDetail":{ + "type":"structure", + "required":[ + "code", + "installationId" + ], + "members":{ + "code":{ + "shape":"GitHubAuthCode", + "documentation":"

        The authorization code received from GitHub to update the integration.

        " + }, + "installationId":{ + "shape":"GitHubInstallationId", + "documentation":"

        The installation ID of the GitHub App associated with the integration.

        " + } + }, + "documentation":"

        Contains details required to update an integration with GitHub.

        " + }, + "UpdateGitLabSelfManagedIntegrationDetail":{ + "type":"structure", + "required":["authCode"], + "members":{ + "authCode":{ + "shape":"GitLabAuthCode", + "documentation":"

        The authorization code received from the self-managed GitLab instance to update the integration.

        " + } + }, + "documentation":"

        Contains details required to update an integration with a self-managed GitLab instance.

        " + }, + "UpdateIntegrationDetails":{ + "type":"structure", + "members":{ + "github":{ + "shape":"UpdateGitHubIntegrationDetail", + "documentation":"

        Details specific to updating an integration with GitHub.

        " + }, + "gitlabSelfManaged":{ + "shape":"UpdateGitLabSelfManagedIntegrationDetail", + "documentation":"

        Details specific to updating an integration with a self-managed GitLab instance.

        " + } + }, + "documentation":"

        Contains details required to update a code security integration with a specific repository provider.

        ", + "union":true + }, "UpdateOrgEc2DeepInspectionConfigurationRequest":{ "type":"structure", "required":["orgPackagePaths"], @@ -8396,7 +10296,10 @@ "ECR_INITIAL_SCAN", "ECR_RESCAN", "LAMBDA_FUNCTION_HOURS", - "LAMBDA_FUNCTION_CODE_HOURS" + "LAMBDA_FUNCTION_CODE_HOURS", + "CODE_REPOSITORY_SAST", + "CODE_REPOSITORY_IAC", + "CODE_REPOSITORY_SCA" ] }, "UsageValue":{ diff --git a/services/inspectorscan/pom.xml b/services/inspectorscan/pom.xml index 7e8c5313c3ab..1621ff5fd98c 100644 --- a/services/inspectorscan/pom.xml +++ b/services/inspectorscan/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT inspectorscan AWS Java SDK :: Services :: Inspector Scan diff --git a/services/inspectorscan/src/main/resources/codegen-resources/customization.config b/services/inspectorscan/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/inspectorscan/src/main/resources/codegen-resources/customization.config +++ b/services/inspectorscan/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/internetmonitor/pom.xml b/services/internetmonitor/pom.xml index 98c5396d3208..3480e0a54edd 100644 --- a/services/internetmonitor/pom.xml +++ b/services/internetmonitor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT internetmonitor AWS Java SDK :: Services :: Internet Monitor diff --git a/services/internetmonitor/src/main/resources/codegen-resources/customization.config b/services/internetmonitor/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/internetmonitor/src/main/resources/codegen-resources/customization.config +++ b/services/internetmonitor/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/invoicing/pom.xml b/services/invoicing/pom.xml index f9092be2b75f..66e2a5a0d22b 100644 --- a/services/invoicing/pom.xml +++ b/services/invoicing/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT invoicing AWS Java SDK :: Services :: Invoicing diff --git a/services/invoicing/src/main/resources/codegen-resources/customization.config b/services/invoicing/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/invoicing/src/main/resources/codegen-resources/customization.config +++ b/services/invoicing/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/invoicing/src/main/resources/codegen-resources/paginators-1.json b/services/invoicing/src/main/resources/codegen-resources/paginators-1.json index 860206357477..98952e1a4868 100644 --- a/services/invoicing/src/main/resources/codegen-resources/paginators-1.json +++ b/services/invoicing/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,11 @@ { "pagination": { + "ListInvoiceSummaries": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "InvoiceSummaries" + }, "ListInvoiceUnits": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/invoicing/src/main/resources/codegen-resources/service-2.json b/services/invoicing/src/main/resources/codegen-resources/service-2.json index df50ba463cab..6de121f6015d 100644 --- a/services/invoicing/src/main/resources/codegen-resources/service-2.json +++ b/services/invoicing/src/main/resources/codegen-resources/service-2.json @@ -24,11 +24,11 @@ "input":{"shape":"BatchGetInvoiceProfileRequest"}, "output":{"shape":"BatchGetInvoiceProfileResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        This gets the invoice profile associated with a set of accounts. The accounts must be linked accounts under the requester management account organization.

        " }, @@ -41,10 +41,10 @@ "input":{"shape":"CreateInvoiceUnitRequest"}, "output":{"shape":"CreateInvoiceUnitResponse"}, "errors":[ - {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, - {"shape":"ValidationException"} + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        This creates a new invoice unit with the provided definition.

        " }, @@ -57,11 +57,11 @@ "input":{"shape":"DeleteInvoiceUnitRequest"}, "output":{"shape":"DeleteInvoiceUnitResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        This deletes an invoice unit with the provided invoice unit ARN.

        " }, @@ -74,14 +74,31 @@ "input":{"shape":"GetInvoiceUnitRequest"}, "output":{"shape":"GetInvoiceUnitResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        This retrieves the invoice unit definition.

        " }, + "ListInvoiceSummaries":{ + "name":"ListInvoiceSummaries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInvoiceSummariesRequest"}, + "output":{"shape":"ListInvoiceSummariesResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

        Retrieves your invoice details programmatically, without line item details.

        " + }, "ListInvoiceUnits":{ "name":"ListInvoiceUnits", "http":{ @@ -91,10 +108,10 @@ "input":{"shape":"ListInvoiceUnitsRequest"}, "output":{"shape":"ListInvoiceUnitsResponse"}, "errors":[ - {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, - {"shape":"ValidationException"} + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        This fetches a list of all invoice unit definitions for a given account, as of the provided AsOf date.

        " }, @@ -107,11 +124,11 @@ "input":{"shape":"ListTagsForResourceRequest"}, "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        Lists the tags for a resource.

        " }, @@ -124,12 +141,12 @@ "input":{"shape":"TagResourceRequest"}, "output":{"shape":"TagResourceResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        Adds a tag to a resource.

        " }, @@ -142,11 +159,11 @@ "input":{"shape":"UntagResourceRequest"}, "output":{"shape":"UntagResourceResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        Removes a tag from a resource.

        " }, @@ -159,11 +176,11 @@ "input":{"shape":"UpdateInvoiceUnitRequest"}, "output":{"shape":"UpdateInvoiceUnitResponse"}, "errors":[ - {"shape":"ThrottlingException"}, - {"shape":"InternalServerException"}, - {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} ], "documentation":"

        You can update the invoice unit configuration at any time, and Amazon Web Services will use the latest configuration at the end of the month.

        " } @@ -191,8 +208,36 @@ "type":"string", "pattern":"\\d{12}" }, + "AmountBreakdown":{ + "type":"structure", + "members":{ + "SubTotalAmount":{ + "shape":"BasicString", + "documentation":"

        The total of a set of the breakdown.

        " + }, + "Discounts":{ + "shape":"DiscountsBreakdown", + "documentation":"

        The discounted amount.

        " + }, + "Taxes":{ + "shape":"TaxesBreakdown", + "documentation":"

        The tax amount.

        " + }, + "Fees":{ + "shape":"FeesBreakdown", + "documentation":"

        The fee amount.

        " + } + }, + "documentation":"

        Details about how the total amount was calculated and categorized.

        " + }, "AsOfTimestamp":{"type":"timestamp"}, "BasicString":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"[\\s\\S]*" + }, + "BasicStringWithoutSpace":{ "type":"string", "max":1024, "min":0, @@ -217,6 +262,24 @@ } } }, + "BillingPeriod":{ + "type":"structure", + "required":[ + "Month", + "Year" + ], + "members":{ + "Month":{ + "shape":"Month", + "documentation":"

        The billing period month.

        " + }, + "Year":{ + "shape":"Year", + "documentation":"

        The billing period year.

        " + } + }, + "documentation":"

        The billing period for which you want to retrieve invoice-related documents.

        " + }, "CreateInvoiceUnitRequest":{ "type":"structure", "required":[ @@ -260,6 +323,47 @@ } } }, + "CurrencyCode":{ + "type":"string", + "max":3, + "min":3 + }, + "CurrencyExchangeDetails":{ + "type":"structure", + "members":{ + "SourceCurrencyCode":{ + "shape":"BasicString", + "documentation":"

        The exchange source currency.

        " + }, + "TargetCurrencyCode":{ + "shape":"BasicString", + "documentation":"

        The exchange target currency.

        " + }, + "Rate":{ + "shape":"BasicString", + "documentation":"

        The currency exchange rate.

        " + } + }, + "documentation":"

        The details of currency exchange.

        " + }, + "DateInterval":{ + "type":"structure", + "required":[ + "StartDate", + "EndDate" + ], + "members":{ + "StartDate":{ + "shape":"Timestamp", + "documentation":"

        The beginning of the time period that you want invoice-related documents for. The start date is inclusive. For example, if start is 2019-01-01, AWS retrieves invoices starting at 2019-01-01 up to the end date.

        " + }, + "EndDate":{ + "shape":"Timestamp", + "documentation":"

        The end of the time period that you want invoice-related documents for. The end date is exclusive. For example, if end is 2019-01-10, Amazon Web Services retrieves invoice-related documents from the start date up to, but not including, 2018-01-10.

        " + } + }, + "documentation":"

        The time period that you want invoice-related documents for.

        " + }, "DeleteInvoiceUnitRequest":{ "type":"structure", "required":["InvoiceUnitArn"], @@ -285,6 +389,88 @@ "min":0, "pattern":"[\\S\\s]*" }, + "DiscountsBreakdown":{ + "type":"structure", + "members":{ + "Breakdown":{ + "shape":"DiscountsBreakdownAmountList", + "documentation":"

        The list of discounts information.

        " + }, + "TotalAmount":{ + "shape":"BasicString", + "documentation":"

        The discount's total amount.

        " + } + }, + "documentation":"

        The discounts details.

        " + }, + "DiscountsBreakdownAmount":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"BasicString", + "documentation":"

        The list of discounts information.

        " + }, + "Amount":{ + "shape":"BasicString", + "documentation":"

        The discounted amount.

        " + }, + "Rate":{ + "shape":"BasicString", + "documentation":"

        The details for the discount rate..

        " + } + }, + "documentation":"

        The discounted amount.

        " + }, + "DiscountsBreakdownAmountList":{ + "type":"list", + "member":{"shape":"DiscountsBreakdownAmount"} + }, + "Entity":{ + "type":"structure", + "members":{ + "InvoicingEntity":{ + "shape":"BasicString", + "documentation":"

        The name of the entity that issues the Amazon Web Services invoice.

        " + } + }, + "documentation":"

        The organization name providing Amazon Web Services services.

        " + }, + "FeesBreakdown":{ + "type":"structure", + "members":{ + "Breakdown":{ + "shape":"FeesBreakdownAmountList", + "documentation":"

        The list of fees information.

        " + }, + "TotalAmount":{ + "shape":"BasicString", + "documentation":"

        The total amount of fees.

        " + } + }, + "documentation":"

        The details of fees.

        " + }, + "FeesBreakdownAmount":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"BasicString", + "documentation":"

        The list of fees information.

        " + }, + "Amount":{ + "shape":"BasicString", + "documentation":"

        The fee amount.

        " + }, + "Rate":{ + "shape":"BasicString", + "documentation":"

        Details about the rate amount.

        " + } + }, + "documentation":"

        The fee amount.

        " + }, + "FeesBreakdownAmountList":{ + "type":"list", + "member":{"shape":"FeesBreakdownAmount"} + }, "Filters":{ "type":"structure", "members":{ @@ -365,6 +551,32 @@ "exception":true, "fault":true }, + "InvoiceCurrencyAmount":{ + "type":"structure", + "members":{ + "TotalAmount":{ + "shape":"BasicString", + "documentation":"

        The invoice currency amount.

        " + }, + "TotalAmountBeforeTax":{ + "shape":"BasicString", + "documentation":"

        Details about the invoice total amount before tax.

        " + }, + "CurrencyCode":{ + "shape":"CurrencyCode", + "documentation":"

        The currency dominion of the invoice document.

        " + }, + "AmountBreakdown":{ + "shape":"AmountBreakdown", + "documentation":"

        Details about the invoice currency amount.

        " + }, + "CurrencyExchangeDetails":{ + "shape":"CurrencyExchangeDetails", + "documentation":"

        The details of currency exchange.

        " + } + }, + "documentation":"

        The amount charged after taxes, in the preferred currency.

        " + }, "InvoiceProfile":{ "type":"structure", "members":{ @@ -373,7 +585,7 @@ "documentation":"

        The account ID the invoice profile is generated for.

        " }, "ReceiverName":{ - "shape":"BasicString", + "shape":"BasicStringWithoutSpace", "documentation":"

        The name of the person receiving the invoice profile.

        " }, "ReceiverAddress":{ @@ -381,20 +593,127 @@ "documentation":"

        The address of the receiver that will be printed on the invoice.

        " }, "ReceiverEmail":{ - "shape":"SensitiveBasicString", + "shape":"SensitiveBasicStringWithoutSpace", "documentation":"

        The email address for the invoice profile receiver.

        " }, "Issuer":{ - "shape":"BasicString", + "shape":"BasicStringWithoutSpace", "documentation":"

        This specifies the issuing entity of the invoice.

        " }, "TaxRegistrationNumber":{ - "shape":"SensitiveBasicString", + "shape":"SensitiveBasicStringWithoutSpace", "documentation":"

        Your Tax Registration Number (TRN) information.

        " } }, "documentation":"

        Contains high-level information about the invoice receiver.

        " }, + "InvoiceSummaries":{ + "type":"list", + "member":{"shape":"InvoiceSummary"} + }, + "InvoiceSummariesFilter":{ + "type":"structure", + "members":{ + "TimeInterval":{ + "shape":"DateInterval", + "documentation":"

        The date range for invoice summary retrieval.

        " + }, + "BillingPeriod":{ + "shape":"BillingPeriod", + "documentation":"

        The billing period associated with the invoice documents.

        " + }, + "InvoicingEntity":{ + "shape":"BasicString", + "documentation":"

        The name of the entity that issues the Amazon Web Services invoice.

        " + } + }, + "documentation":"

        Filters for your invoice summaries.

        " + }, + "InvoiceSummariesMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "InvoiceSummariesSelector":{ + "type":"structure", + "required":[ + "ResourceType", + "Value" + ], + "members":{ + "ResourceType":{ + "shape":"ListInvoiceSummariesResourceType", + "documentation":"

        The query identifier type (INVOICE_ID or ACCOUNT_ID).

        " + }, + "Value":{ + "shape":"StringWithoutNewLine", + "documentation":"

        The value of the query identifier.

        " + } + }, + "documentation":"

        Specifies the invoice summary.

        " + }, + "InvoiceSummary":{ + "type":"structure", + "members":{ + "AccountId":{ + "shape":"AccountIdString", + "documentation":"

        The Amazon Web Services account ID.

        " + }, + "InvoiceId":{ + "shape":"BasicString", + "documentation":"

        The invoice ID.

        " + }, + "IssuedDate":{ + "shape":"Timestamp", + "documentation":"

        The issued date of the invoice.

        " + }, + "DueDate":{ + "shape":"Timestamp", + "documentation":"

        The invoice due date.

        " + }, + "Entity":{ + "shape":"Entity", + "documentation":"

        The organization name providing Amazon Web Services services.

        " + }, + "BillingPeriod":{ + "shape":"BillingPeriod", + "documentation":"

        The billing period of the invoice-related document.

        " + }, + "InvoiceType":{ + "shape":"InvoiceType", + "documentation":"

        The type of invoice.

        " + }, + "OriginalInvoiceId":{ + "shape":"BasicString", + "documentation":"

        The initial or original invoice ID.

        " + }, + "PurchaseOrderNumber":{ + "shape":"BasicString", + "documentation":"

        The purchase order number associated to the invoice.

        " + }, + "BaseCurrencyAmount":{ + "shape":"InvoiceCurrencyAmount", + "documentation":"

        The summary with the product and service currency.

        " + }, + "TaxCurrencyAmount":{ + "shape":"InvoiceCurrencyAmount", + "documentation":"

        The summary with the tax currency.

        " + }, + "PaymentCurrencyAmount":{ + "shape":"InvoiceCurrencyAmount", + "documentation":"

        The summary with the customer configured currency.

        " + } + }, + "documentation":"

        The invoice that the API retrieved.

        " + }, + "InvoiceType":{ + "type":"string", + "enum":[ + "INVOICE", + "CREDIT_MEMO" + ] + }, "InvoiceUnit":{ "type":"structure", "members":{ @@ -461,6 +780,49 @@ "member":{"shape":"InvoiceUnit"} }, "LastModifiedTimestamp":{"type":"timestamp"}, + "ListInvoiceSummariesRequest":{ + "type":"structure", + "required":["Selector"], + "members":{ + "Selector":{ + "shape":"InvoiceSummariesSelector", + "documentation":"

        The option to retrieve details for a specific invoice by providing its unique ID. Alternatively, access information for all invoices linked to the account by providing an account ID.

        " + }, + "Filter":{ + "shape":"InvoiceSummariesFilter", + "documentation":"

        Filters you can use to customize your invoice summary.

        " + }, + "NextToken":{ + "shape":"NextTokenString", + "documentation":"

        The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.

        " + }, + "MaxResults":{ + "shape":"InvoiceSummariesMaxResults", + "documentation":"

        The maximum number of invoice summaries a paginated response can contain.

        " + } + } + }, + "ListInvoiceSummariesResourceType":{ + "type":"string", + "enum":[ + "ACCOUNT_ID", + "INVOICE_ID" + ] + }, + "ListInvoiceSummariesResponse":{ + "type":"structure", + "required":["InvoiceSummaries"], + "members":{ + "InvoiceSummaries":{ + "shape":"InvoiceSummaries", + "documentation":"

        List of key (summary level) invoice details without line item details.

        " + }, + "NextToken":{ + "shape":"NextTokenString", + "documentation":"

        The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.

        " + } + } + }, "ListInvoiceUnitsRequest":{ "type":"structure", "members":{ @@ -520,9 +882,14 @@ "max":500, "min":1 }, + "Month":{ + "type":"integer", + "box":true, + "max":12, + "min":1 + }, "NextTokenString":{ "type":"string", - "max":2048, "min":1, "pattern":"[\\S\\s]*" }, @@ -625,7 +992,7 @@ "max":256, "min":0 }, - "SensitiveBasicString":{ + "SensitiveBasicStringWithoutSpace":{ "type":"string", "max":1024, "min":0, @@ -641,6 +1008,12 @@ "documentation":"

        The request was rejected because it attempted to create resources beyond the current Amazon Web Services account limits. The error message describes the limit exceeded.

        ", "exception":true }, + "StringWithoutNewLine":{ + "type":"string", + "max":1024, + "min":0, + "pattern":".*" + }, "TagResourceRequest":{ "type":"structure", "required":[ @@ -667,9 +1040,45 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:aws[-a-z0-9]*:[a-z0-9]+:[-a-z0-9]*:[0-9]{12}:[-a-zA-Z0-9/:_]+" + "pattern":"arn:aws[-a-z0-9]*:(invoicing)::[0-9]{12}:[-a-zA-Z0-9/:_]+" }, "TaxInheritanceDisabledFlag":{"type":"boolean"}, + "TaxesBreakdown":{ + "type":"structure", + "members":{ + "Breakdown":{ + "shape":"TaxesBreakdownAmountList", + "documentation":"

        A list of tax information.

        " + }, + "TotalAmount":{ + "shape":"BasicString", + "documentation":"

        The total amount for your taxes.

        " + } + }, + "documentation":"

        The details of the taxes.

        " + }, + "TaxesBreakdownAmount":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"BasicString", + "documentation":"

        The details of the taxes.

        " + }, + "Amount":{ + "shape":"BasicString", + "documentation":"

        The tax amount.

        " + }, + "Rate":{ + "shape":"BasicString", + "documentation":"

        The details of the tax rate.

        " + } + }, + "documentation":"

        The tax amount.

        " + }, + "TaxesBreakdownAmountList":{ + "type":"list", + "member":{"shape":"TaxesBreakdownAmount"} + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -678,6 +1087,7 @@ "documentation":"

        The request was denied due to request throttling.

        ", "exception":true }, + "Timestamp":{"type":"timestamp"}, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -792,6 +1202,12 @@ "unknownOperation", "other" ] + }, + "Year":{ + "type":"integer", + "box":true, + "max":2050, + "min":2005 } }, "documentation":"

        Amazon Web Services Invoice Configuration

        You can use Amazon Web Services Invoice Configuration APIs to programmatically create, update, delete, get, and list invoice units. You can also programmatically fetch the information of the invoice receiver. For example, business legal name, address, and invoicing contacts.

        You can use Amazon Web Services Invoice Configuration to receive separate Amazon Web Services invoices based your organizational needs. By using Amazon Web Services Invoice Configuration, you can configure invoice units that are groups of Amazon Web Services accounts that represent your business entities, and receive separate invoices for each business entity. You can also assign a unique member or payer account as the invoice receiver for each invoice unit. As you create new accounts within your Organizations using Amazon Web Services Invoice Configuration APIs, you can automate the creation of new invoice units and subsequently automate the addition of new accounts to your invoice units.

        Service endpoint

        You can use the following endpoints for Amazon Web Services Invoice Configuration:

        • https://invoicing.us-east-1.api.aws

        " diff --git a/services/iot/pom.xml b/services/iot/pom.xml index cad540932e97..b4f71e125ce8 100644 --- a/services/iot/pom.xml +++ b/services/iot/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iot AWS Java SDK :: Services :: AWS IoT diff --git a/services/iot/src/main/resources/codegen-resources/customization.config b/services/iot/src/main/resources/codegen-resources/customization.config index 4579f956d68a..1ebfa285e404 100644 --- a/services/iot/src/main/resources/codegen-resources/customization.config +++ b/services/iot/src/main/resources/codegen-resources/customization.config @@ -55,6 +55,5 @@ "union": true } }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iotanalytics/pom.xml b/services/iotanalytics/pom.xml index bf9da5967a69..4d4744d02ef8 100644 --- a/services/iotanalytics/pom.xml +++ b/services/iotanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iotanalytics AWS Java SDK :: Services :: IoTAnalytics diff --git a/services/iotanalytics/src/main/resources/codegen-resources/customization.config b/services/iotanalytics/src/main/resources/codegen-resources/customization.config index 9c3265f1174c..eae2bdee484c 100644 --- a/services/iotanalytics/src/main/resources/codegen-resources/customization.config +++ b/services/iotanalytics/src/main/resources/codegen-resources/customization.config @@ -13,6 +13,5 @@ "union": true } }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iotdataplane/pom.xml b/services/iotdataplane/pom.xml index 9059eb1d0be8..502157972b05 100644 --- a/services/iotdataplane/pom.xml +++ b/services/iotdataplane/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iotdataplane AWS Java SDK :: Services :: AWS IoT Data Plane diff --git a/services/iotdataplane/src/main/resources/codegen-resources/customization.config b/services/iotdataplane/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/iotdataplane/src/main/resources/codegen-resources/customization.config +++ b/services/iotdataplane/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iotdeviceadvisor/pom.xml b/services/iotdeviceadvisor/pom.xml index 500bb2aa573e..f15de8b58068 100644 --- a/services/iotdeviceadvisor/pom.xml +++ b/services/iotdeviceadvisor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iotdeviceadvisor AWS Java SDK :: Services :: Iot Device Advisor diff --git a/services/iotdeviceadvisor/src/main/resources/codegen-resources/customization.config b/services/iotdeviceadvisor/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/iotdeviceadvisor/src/main/resources/codegen-resources/customization.config +++ b/services/iotdeviceadvisor/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iotevents/pom.xml b/services/iotevents/pom.xml index b749aa036433..c599a5b5d367 100644 --- a/services/iotevents/pom.xml +++ b/services/iotevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iotevents AWS Java SDK :: Services :: IoT Events diff --git a/services/iotevents/src/main/resources/codegen-resources/customization.config b/services/iotevents/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/iotevents/src/main/resources/codegen-resources/customization.config +++ b/services/iotevents/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ioteventsdata/pom.xml b/services/ioteventsdata/pom.xml index 6bdaa247158b..ddaee4dba155 100644 --- a/services/ioteventsdata/pom.xml +++ b/services/ioteventsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ioteventsdata AWS Java SDK :: Services :: IoT Events Data diff --git a/services/ioteventsdata/src/main/resources/codegen-resources/customization.config b/services/ioteventsdata/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/ioteventsdata/src/main/resources/codegen-resources/customization.config +++ b/services/ioteventsdata/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iotfleethub/pom.xml b/services/iotfleethub/pom.xml index ee4aa7cd61eb..4b4d5959399b 100644 --- a/services/iotfleethub/pom.xml +++ b/services/iotfleethub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iotfleethub AWS Java SDK :: Services :: Io T Fleet Hub diff --git a/services/iotfleethub/src/main/resources/codegen-resources/customization.config b/services/iotfleethub/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/iotfleethub/src/main/resources/codegen-resources/customization.config +++ b/services/iotfleethub/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iotfleetwise/pom.xml b/services/iotfleetwise/pom.xml index 5ed239d5664a..4c9d25059828 100644 --- a/services/iotfleetwise/pom.xml +++ b/services/iotfleetwise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iotfleetwise AWS Java SDK :: Services :: Io T Fleet Wise diff --git a/services/iotfleetwise/src/main/resources/codegen-resources/customization.config b/services/iotfleetwise/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/iotfleetwise/src/main/resources/codegen-resources/customization.config +++ b/services/iotfleetwise/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iotfleetwise/src/main/resources/codegen-resources/service-2.json b/services/iotfleetwise/src/main/resources/codegen-resources/service-2.json index 94809d3642d1..19bef5b92d1d 100644 --- a/services/iotfleetwise/src/main/resources/codegen-resources/service-2.json +++ b/services/iotfleetwise/src/main/resources/codegen-resources/service-2.json @@ -5297,7 +5297,7 @@ }, "status":{ "shape":"CampaignStatus", - "documentation":"

        The state of a campaign. The status can be one of:

        • CREATING - Amazon Web Services IoT FleetWise is processing your request to create the campaign.

        • WAITING_FOR_APPROVAL - After a campaign is created, it enters the WAITING_FOR_APPROVAL state. To allow Amazon Web Services IoT FleetWise to deploy the campaign to the target vehicle or fleet, use the API operation to approve the campaign.

        • RUNNING - The campaign is active.

        • SUSPENDED - The campaign is suspended. To resume the campaign, use the API operation.

        " + "documentation":"

        The state of a campaign. The status can be one of:

        • CREATING - Amazon Web Services IoT FleetWise is processing your request to create the campaign.

        • WAITING_FOR_APPROVAL - After you create a campaign, it enters this state. Use the API operation to approve the campaign for deployment to the target vehicle or fleet.

        • RUNNING - The campaign is active.

        • SUSPENDED - The campaign is suspended. To resume the campaign, use the API operation.

        " } } }, @@ -5742,7 +5742,8 @@ "READY", "HEALTHY", "SUSPENDED", - "DELETING" + "DELETING", + "READY_FOR_CHECKIN" ] }, "VehicleStatus":{ @@ -5758,7 +5759,7 @@ }, "status":{ "shape":"VehicleState", - "documentation":"

        The status of a campaign, which can be one of the following:

        • CREATED - The campaign has been created successfully but has not been approved.

        • READY - The campaign has been approved but has not been deployed to the vehicle.

        • HEALTHY - The campaign has been deployed to the vehicle.

        • SUSPENDED - The campaign has been suspended and data collection is paused.

        • DELETING - The campaign is being removed from the vehicle.

        " + "documentation":"

        The status of a campaign, which can be one of the following:

        • CREATED - The campaign exists but is not yet approved.

        • READY - The campaign is approved but has not been deployed to the vehicle. Data has not arrived at the vehicle yet.

        • HEALTHY - The campaign is deployed to the vehicle.

        • SUSPENDED - The campaign is suspended and data collection is paused.

        • DELETING - The campaign is being removed from the vehicle.

        • READY_FOR_CHECKIN - The campaign is approved and waiting for vehicle check-in before deployment.

        " } }, "documentation":"

        Information about a campaign associated with a vehicle.

        " diff --git a/services/iotjobsdataplane/pom.xml b/services/iotjobsdataplane/pom.xml index 24cf66b1f69f..2ad0f30fe934 100644 --- a/services/iotjobsdataplane/pom.xml +++ b/services/iotjobsdataplane/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iotjobsdataplane AWS Java SDK :: Services :: IoT Jobs Data Plane diff --git a/services/iotjobsdataplane/src/main/resources/codegen-resources/customization.config b/services/iotjobsdataplane/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/iotjobsdataplane/src/main/resources/codegen-resources/customization.config +++ b/services/iotjobsdataplane/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iotmanagedintegrations/pom.xml b/services/iotmanagedintegrations/pom.xml index 637603baf21a..595ddbe9f45e 100644 --- a/services/iotmanagedintegrations/pom.xml +++ b/services/iotmanagedintegrations/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iotmanagedintegrations AWS Java SDK :: Services :: IoT Managed Integrations diff --git a/services/iotmanagedintegrations/src/main/resources/codegen-resources/paginators-1.json b/services/iotmanagedintegrations/src/main/resources/codegen-resources/paginators-1.json index ecd0686d2698..6f81bb9a9388 100644 --- a/services/iotmanagedintegrations/src/main/resources/codegen-resources/paginators-1.json +++ b/services/iotmanagedintegrations/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,23 @@ { "pagination": { + "ListAccountAssociations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Items" + }, + "ListCloudConnectors": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Items" + }, + "ListConnectorDestinations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ConnectorDestinationList" + }, "ListCredentialLockers": { "input_token": "NextToken", "output_token": "NextToken", @@ -12,12 +30,30 @@ "limit_key": "MaxResults", "result_key": "DestinationList" }, + "ListDeviceDiscoveries": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Items" + }, + "ListDiscoveredDevices": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Items" + }, "ListEventLogConfigurations": { "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "EventLogConfigurationList" }, + "ListManagedThingAccountAssociations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Items" + }, "ListManagedThingSchemas": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/iotmanagedintegrations/src/main/resources/codegen-resources/service-2.json b/services/iotmanagedintegrations/src/main/resources/codegen-resources/service-2.json index 8706a64b5f5c..4305e77884df 100644 --- a/services/iotmanagedintegrations/src/main/resources/codegen-resources/service-2.json +++ b/services/iotmanagedintegrations/src/main/resources/codegen-resources/service-2.json @@ -13,6 +13,62 @@ "uid":"iot-managed-integrations-2025-03-03" }, "operations":{ + "CreateAccountAssociation":{ + "name":"CreateAccountAssociation", + "http":{ + "method":"POST", + "requestUri":"/account-associations", + "responseCode":201 + }, + "input":{"shape":"CreateAccountAssociationRequest"}, + "output":{"shape":"CreateAccountAssociationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Creates a new account association via the destination id.

        " + }, + "CreateCloudConnector":{ + "name":"CreateCloudConnector", + "http":{ + "method":"POST", + "requestUri":"/cloud-connectors", + "responseCode":201 + }, + "input":{"shape":"CreateCloudConnectorRequest"}, + "output":{"shape":"CreateCloudConnectorResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Creates a C2C (cloud-to-cloud) connector.

        " + }, + "CreateConnectorDestination":{ + "name":"CreateConnectorDestination", + "http":{ + "method":"POST", + "requestUri":"/connector-destinations", + "responseCode":201 + }, + "input":{"shape":"CreateConnectorDestinationRequest"}, + "output":{"shape":"CreateConnectorDestinationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Create a connector destination for connecting a cloud-to-cloud (C2C) connector to the customer's Amazon Web Services account.

        " + }, "CreateCredentialLocker":{ "name":"CreateCredentialLocker", "http":{ @@ -122,8 +178,10 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, {"shape":"ConflictException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

        Create an over-the-air (OTA) task to update a device.

        " }, @@ -161,10 +219,66 @@ {"shape":"UnauthorizedException"}, {"shape":"ConflictException"}, {"shape":"ServiceUnavailableException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

        Create a provisioning profile for a device to execute the provisioning flows using a provisioning template. The provisioning template is a document that defines the set of resources and policies applied to a device during the provisioning process.

        " }, + "DeleteAccountAssociation":{ + "name":"DeleteAccountAssociation", + "http":{ + "method":"DELETE", + "requestUri":"/account-associations/{AccountAssociationId}", + "responseCode":200 + }, + "input":{"shape":"DeleteAccountAssociationRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Remove a third party account and related devices from an end user.

        ", + "idempotent":true + }, + "DeleteCloudConnector":{ + "name":"DeleteCloudConnector", + "http":{ + "method":"DELETE", + "requestUri":"/cloud-connectors/{Identifier}", + "responseCode":200 + }, + "input":{"shape":"DeleteCloudConnectorRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Delete a cloud connector.

        ", + "idempotent":true + }, + "DeleteConnectorDestination":{ + "name":"DeleteConnectorDestination", + "http":{ + "method":"DELETE", + "requestUri":"/connector-destinations/{Identifier}", + "responseCode":200 + }, + "input":{"shape":"DeleteConnectorDestinationRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Delete a connector destination for connecting a cloud-to-cloud (C2C) connector to the customer's Amazon Web Services account.

        ", + "idempotent":true + }, "DeleteCredentialLocker":{ "name":"DeleteCredentialLocker", "http":{ @@ -272,7 +386,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"} ], "documentation":"

        Delete the over-the-air (OTA) task.

        ", "idempotent":true @@ -315,6 +430,79 @@ "documentation":"

        Delete a provisioning profile.

        ", "idempotent":true }, + "DeregisterAccountAssociation":{ + "name":"DeregisterAccountAssociation", + "http":{ + "method":"PUT", + "requestUri":"/managed-thing-associations/deregister", + "responseCode":200 + }, + "input":{"shape":"DeregisterAccountAssociationRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Deregisters an account association, removing the connection between a managed thing and a third-party account.

        ", + "idempotent":true + }, + "GetAccountAssociation":{ + "name":"GetAccountAssociation", + "http":{ + "method":"GET", + "requestUri":"/account-associations/{AccountAssociationId}", + "responseCode":200 + }, + "input":{"shape":"GetAccountAssociationRequest"}, + "output":{"shape":"GetAccountAssociationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Get an account association for an Amazon Web Services account linked to a customer-managed destination.

        " + }, + "GetCloudConnector":{ + "name":"GetCloudConnector", + "http":{ + "method":"GET", + "requestUri":"/cloud-connectors/{Identifier}", + "responseCode":200 + }, + "input":{"shape":"GetCloudConnectorRequest"}, + "output":{"shape":"GetCloudConnectorResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Gets all the information about a connector for a connector developer.

        " + }, + "GetConnectorDestination":{ + "name":"GetConnectorDestination", + "http":{ + "method":"GET", + "requestUri":"/connector-destinations/{Identifier}", + "responseCode":200 + }, + "input":{"shape":"GetConnectorDestinationRequest"}, + "output":{"shape":"GetConnectorDestinationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Get a connector destination of a cloud-to-cloud (C2C) connector connecting to a customer's Amazon Web Services account.

        " + }, "GetCredentialLocker":{ "name":"GetCredentialLocker", "http":{ @@ -527,7 +715,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        Get the metadata information for a managed thing.

        " + "documentation":"

        Get the metadata information for a managed thing.

        The managedThing metadata parameter is used for associating attributes with a managedThing that can be used for grouping over-the-air (OTA) tasks. Name value pairs in metadata can be used in the OtaTargetQueryString parameter for the CreateOtaTask API operation.

        " }, "GetManagedThingState":{ "name":"GetManagedThingState", @@ -661,6 +849,58 @@ ], "documentation":"

        Gets a schema version with the provided information.

        " }, + "ListAccountAssociations":{ + "name":"ListAccountAssociations", + "http":{ + "method":"GET", + "requestUri":"/account-associations", + "responseCode":200 + }, + "input":{"shape":"ListAccountAssociationsRequest"}, + "output":{"shape":"ListAccountAssociationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Lists all account associations, with optional filtering by connector destination ID.

        " + }, + "ListCloudConnectors":{ + "name":"ListCloudConnectors", + "http":{ + "method":"GET", + "requestUri":"/cloud-connectors", + "responseCode":200 + }, + "input":{"shape":"ListCloudConnectorsRequest"}, + "output":{"shape":"ListCloudConnectorsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Returns a list of connectors based on permissions.

        " + }, + "ListConnectorDestinations":{ + "name":"ListConnectorDestinations", + "http":{ + "method":"GET", + "requestUri":"/connector-destinations", + "responseCode":200 + }, + "input":{"shape":"ListConnectorDestinationsRequest"}, + "output":{"shape":"ListConnectorDestinationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Lists all connector destinations, with optional filtering by cloud connector ID.

        " + }, "ListCredentialLockers":{ "name":"ListCredentialLockers", "http":{ @@ -696,6 +936,45 @@ ], "documentation":"

        List all destination names under one Amazon Web Services account.

        " }, + "ListDeviceDiscoveries":{ + "name":"ListDeviceDiscoveries", + "http":{ + "method":"GET", + "requestUri":"/device-discoveries", + "responseCode":200 + }, + "input":{"shape":"ListDeviceDiscoveriesRequest"}, + "output":{"shape":"ListDeviceDiscoveriesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Lists all device discovery tasks, with optional filtering by type and status.

        " + }, + "ListDiscoveredDevices":{ + "name":"ListDiscoveredDevices", + "http":{ + "method":"GET", + "requestUri":"/device-discoveries/{Identifier}/devices", + "responseCode":200 + }, + "input":{"shape":"ListDiscoveredDevicesRequest"}, + "output":{"shape":"ListDiscoveredDevicesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Lists all devices discovered during a specific device discovery task.

        " + }, "ListEventLogConfigurations":{ "name":"ListEventLogConfigurations", "http":{ @@ -713,6 +992,23 @@ ], "documentation":"

        List all event log configurations for an account.

        " }, + "ListManagedThingAccountAssociations":{ + "name":"ListManagedThingAccountAssociations", + "http":{ + "method":"GET", + "requestUri":"/managed-thing-associations", + "responseCode":200 + }, + "input":{"shape":"ListManagedThingAccountAssociationsRequest"}, + "output":{"shape":"ListManagedThingAccountAssociationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Lists all account associations for a specific managed thing.

        " + }, "ListManagedThingSchemas":{ "name":"ListManagedThingSchemas", "http":{ @@ -750,7 +1046,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        List all of the associations and statuses for a managed thing by its owner.

        " + "documentation":"

        Listing all managed things with provision for filters.

        " }, "ListNotificationConfigurations":{ "name":"ListNotificationConfigurations", @@ -799,7 +1095,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"InternalServerException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

        List all of the over-the-air (OTA) task executions.

        " }, @@ -816,7 +1113,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"InternalServerException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

        List all of the over-the-air (OTA) tasks.

        " }, @@ -858,6 +1156,23 @@ ], "documentation":"

        Lists schema versions with the provided information.

        " }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        List tags for the specified resource.

        " + }, "PutDefaultEncryptionConfiguration":{ "name":"PutDefaultEncryptionConfiguration", "http":{ @@ -916,6 +1231,26 @@ "documentation":"

        Set the runtime log configuration for a specific managed thing or for all managed things as a group.

        ", "idempotent":true }, + "RegisterAccountAssociation":{ + "name":"RegisterAccountAssociation", + "http":{ + "method":"PUT", + "requestUri":"/managed-thing-associations/register", + "responseCode":201 + }, + "input":{"shape":"RegisterAccountAssociationRequest"}, + "output":{"shape":"RegisterAccountAssociationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Registers an account association with a managed thing, establishing a connection between a device and a third-party account.

        ", + "idempotent":true + }, "RegisterCustomEndpoint":{ "name":"RegisterCustomEndpoint", "http":{ @@ -954,6 +1289,26 @@ "documentation":"

        Reset a runtime log configuration for a specific managed thing or for all managed things as a group.

        ", "idempotent":true }, + "SendConnectorEvent":{ + "name":"SendConnectorEvent", + "http":{ + "method":"POST", + "requestUri":"/connector-event/{ConnectorId}", + "responseCode":202 + }, + "input":{"shape":"SendConnectorEventRequest"}, + "output":{"shape":"SendConnectorEventResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Relays third-party device events for a connector such as a new device or a device state change event.

        ", + "idempotent":true + }, "SendManagedThingCommand":{ "name":"SendManagedThingCommand", "http":{ @@ -967,13 +1322,33 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, + {"shape":"UnauthorizedException"}, {"shape":"ServiceUnavailableException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], "documentation":"

        Send the command to the device represented by the managed thing.

        " }, - "StartDeviceDiscovery":{ + "StartAccountAssociationRefresh":{ + "name":"StartAccountAssociationRefresh", + "http":{ + "method":"POST", + "requestUri":"/account-associations/{AccountAssociationId}/refresh", + "responseCode":200 + }, + "input":{"shape":"StartAccountAssociationRefreshRequest"}, + "output":{"shape":"StartAccountAssociationRefreshResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Initiates a refresh of an existing account association to update its authorization and connection status.

        " + }, + "StartDeviceDiscovery":{ "name":"StartDeviceDiscovery", "http":{ "method":"POST", @@ -992,7 +1367,101 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        During user-guided setup, this is used to start device discovery. The authentication material (install code) is passed as a message to the controller telling it to start the discovery.

        " + "documentation":"

        This API is used to start device discovery for hub-connected and third-party-connected devices. The authentication material (install code) is passed as a message to the controller telling it to start the discovery.

        " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Add tags for the specified resource.

        ", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Remove tags for the specified resource.

        ", + "idempotent":true + }, + "UpdateAccountAssociation":{ + "name":"UpdateAccountAssociation", + "http":{ + "method":"PUT", + "requestUri":"/account-associations/{AccountAssociationId}", + "responseCode":204 + }, + "input":{"shape":"UpdateAccountAssociationRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Updates the properties of an existing account association.

        ", + "idempotent":true + }, + "UpdateCloudConnector":{ + "name":"UpdateCloudConnector", + "http":{ + "method":"PUT", + "requestUri":"/cloud-connectors/{Identifier}", + "responseCode":200 + }, + "input":{"shape":"UpdateCloudConnectorRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Update an existing cloud connector.

        ", + "idempotent":true + }, + "UpdateConnectorDestination":{ + "name":"UpdateConnectorDestination", + "http":{ + "method":"PUT", + "requestUri":"/connector-destinations/{Identifier}", + "responseCode":204 + }, + "input":{"shape":"UpdateConnectorDestinationRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Updates the properties of an existing connector destination.

        ", + "idempotent":true }, "UpdateDestination":{ "name":"UpdateDestination", @@ -1140,6 +1609,78 @@ }, "exception":true }, + "AccountAssociationArn":{ + "type":"string", + "max":1011, + "min":67, + "pattern":"arn:aws:iotmanagedintegrations:[0-9a-zA-Z-]+:[0-9]+:account-association/[0-9a-zA-Z]+" + }, + "AccountAssociationDescription":{ + "type":"string", + "max":4096, + "min":1, + "pattern":"[A-Za-z0-9-_ ]+" + }, + "AccountAssociationErrorMessage":{ + "type":"string", + "max":4096, + "min":1, + "pattern":"[A-Za-z0-9-_ ]+" + }, + "AccountAssociationId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[0-9a-zA-Z]+" + }, + "AccountAssociationItem":{ + "type":"structure", + "required":[ + "AccountAssociationId", + "AssociationState" + ], + "members":{ + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The unique identifier of the account association.

        " + }, + "AssociationState":{ + "shape":"AssociationState", + "documentation":"

        The current state of the account association, indicating its status in the association lifecycle.

        " + }, + "ErrorMessage":{ + "shape":"AccountAssociationErrorMessage", + "documentation":"

        The error message explaining any issues with the account association, if applicable.

        " + }, + "ConnectorDestinationId":{ + "shape":"ConnectorDestinationId", + "documentation":"

        The identifier of the connector destination associated with this account association.

        " + }, + "Name":{ + "shape":"AccountAssociationName", + "documentation":"

        The name of the account association.

        " + }, + "Description":{ + "shape":"AccountAssociationDescription", + "documentation":"

        A description of the account association.

        " + }, + "Arn":{ + "shape":"AccountAssociationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the account association.

        " + } + }, + "documentation":"

        Structure containing information about an account association, including its identifier, state, and related metadata.

        " + }, + "AccountAssociationListDefinition":{ + "type":"list", + "member":{"shape":"AccountAssociationItem"} + }, + "AccountAssociationName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[A-Za-z0-9-_ ]+" + }, "ActionName":{ "type":"string", "max":128, @@ -1162,6 +1703,16 @@ "min":5, "pattern":"([A-Za-z0-9!#$%&()*\\+\\-;<=>?@^_`{|}~])+" }, + "AssociationState":{ + "type":"string", + "enum":[ + "ASSOCIATION_IN_PROGRESS", + "ASSOCIATION_FAILED", + "ASSOCIATION_SUCCEEDED", + "ASSOCIATION_DELETING", + "REFRESH_TOKEN_EXPIRED" + ] + }, "AttributeName":{ "type":"string", "max":128, @@ -1174,21 +1725,53 @@ "min":0, "pattern":".*[a-zA-Z0-9_.,@/:#-]*.*" }, + "AuthConfig":{ + "type":"structure", + "members":{ + "oAuth":{ + "shape":"OAuthConfig", + "documentation":"

        The OAuth configuration settings used for authentication with the third-party service.

        " + } + }, + "documentation":"

        The authentication configuration details for a connector destination, including OAuth settings and other authentication parameters.

        " + }, + "AuthConfigUpdate":{ + "type":"structure", + "members":{ + "oAuthUpdate":{ + "shape":"OAuthUpdate", + "documentation":"

        The updated OAuth configuration settings for the authentication configuration.

        " + } + }, + "documentation":"

        The updated authentication configuration details for a connector destination.

        " + }, "AuthMaterialString":{ "type":"string", "max":512, "min":1, - "pattern":"[0-9A-Za-z!#$%&()*\\+\\-;<=>?@^_`{|}~\\/: ]+", + "pattern":"[0-9A-Za-z!#$%&()*\\+\\-;<=>?@^_`{|}~\\/: {},\\\\\"]+", "sensitive":true }, "AuthMaterialType":{ "type":"string", "enum":[ + "CUSTOM_PROTOCOL_QR_BAR_CODE", "WIFI_SETUP_QR_BAR_CODE", "ZWAVE_QR_BAR_CODE", - "ZIGBEE_QR_BAR_CODE" + "ZIGBEE_QR_BAR_CODE", + "DISCOVERED_DEVICE" ] }, + "AuthType":{ + "type":"string", + "enum":["OAUTH"] + }, + "AuthUrl":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"https?:\\/\\/(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-zA-Z0-9()]{1,6}([-a-zA-Z0-9()@:%_\\+.~#?&\\/=]*)" + }, "BaseRatePerMinute":{ "type":"integer", "box":true, @@ -1300,7 +1883,7 @@ "CapabilityReportCapabilities":{ "type":"list", "member":{"shape":"CapabilityReportCapability"}, - "max":50, + "max":40, "min":0 }, "CapabilityReportCapability":{ @@ -1367,7 +1950,7 @@ "CapabilityReportEndpoints":{ "type":"list", "member":{"shape":"CapabilityReportEndpoint"}, - "max":50, + "max":40, "min":0 }, "CapabilityReportEvents":{ @@ -1388,6 +1971,45 @@ "min":1, "pattern":"1\\.0\\.0" }, + "CapabilitySchemaItem":{ + "type":"structure", + "required":[ + "Format", + "CapabilityId", + "ExtrinsicId", + "ExtrinsicVersion", + "Schema" + ], + "members":{ + "Format":{ + "shape":"SchemaVersionFormat", + "documentation":"

        The format of the capability schema, which defines how the schema is structured and interpreted.

        " + }, + "CapabilityId":{ + "shape":"SchemaVersionedId", + "documentation":"

        The unique identifier of the capability defined in the schema.

        " + }, + "ExtrinsicId":{ + "shape":"ExtrinsicSchemaId", + "documentation":"

        The external identifier for the capability, used when referencing the capability outside of the AWS ecosystem.

        " + }, + "ExtrinsicVersion":{ + "shape":"MatterCapabilityReportClusterRevisionId", + "documentation":"

        The version of the external capability definition, used to track compatibility with external systems.

        " + }, + "Schema":{ + "shape":"ValidationSchema", + "documentation":"

        The actual schema definition that describes the capability's properties, actions, and events.

        " + } + }, + "documentation":"

        Structure representing a capability schema item that defines the functionality and features supported by a managed thing.

        " + }, + "CapabilitySchemas":{ + "type":"list", + "member":{"shape":"CapabilitySchemaItem"}, + "max":40, + "min":0 + }, "CapabilityVersion":{ "type":"string", "max":64, @@ -1414,6 +2036,31 @@ "min":1, "pattern":"[a-zA-Z0-9=_-]+" }, + "CloudConnectorDescription":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[0-9A-Za-z_\\- ]+" + }, + "CloudConnectorId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[A-Za-z0-9-_]+" + }, + "CloudConnectorType":{ + "type":"string", + "enum":[ + "LISTED", + "UNLISTED" + ] + }, + "ClusterId":{ + "type":"string", + "max":24, + "min":1, + "pattern":"0[xX][0-9a-fA-F]+$|^[0-9]+" + }, "CommandCapabilities":{ "type":"list", "member":{"shape":"CommandCapability"}, @@ -1540,10 +2187,57 @@ "ConnectivityTimestamp":{"type":"timestamp"}, "ConnectorAssociationId":{ "type":"string", + "deprecated":true, + "deprecatedMessage":"ConnectorAssociationId is deprecated", + "deprecatedSince":"2025-06-25", "max":64, "min":1, "pattern":"[0-9a-zA-Z]+" }, + "ConnectorDestinationDescription":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[0-9A-Za-z_\\- ]+" + }, + "ConnectorDestinationId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[A-Za-z0-9-_]+" + }, + "ConnectorDestinationListDefinition":{ + "type":"list", + "member":{"shape":"ConnectorDestinationSummary"} + }, + "ConnectorDestinationName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[A-Za-z0-9-_ ]+" + }, + "ConnectorDestinationSummary":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ConnectorDestinationName", + "documentation":"

        The display name of the connector destination.

        " + }, + "Description":{ + "shape":"ConnectorDestinationDescription", + "documentation":"

        A description of the connector destination.

        " + }, + "CloudConnectorId":{ + "shape":"CloudConnectorId", + "documentation":"

        The identifier of the cloud connector associated with this connector destination.

        " + }, + "Id":{ + "shape":"ConnectorDestinationId", + "documentation":"

        The unique identifier of the connector destination.

        " + } + }, + "documentation":"

        Structure containing summary information about a connector destination, which defines how a cloud-to-cloud connector connects to a customer's AWS account.

        " + }, "ConnectorDeviceId":{ "type":"string", "max":256, @@ -1551,27 +2245,252 @@ "pattern":"[a-zA-Z0-9_.,@-]+", "sensitive":true }, - "ConnectorPolicyId":{ + "ConnectorDeviceName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\p{L}\\p{N} ._-]+", + "sensitive":true + }, + "ConnectorEventMessage":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\sa-zA-Z0-9_.,@-]+", + "sensitive":true + }, + "ConnectorEventOperation":{ + "type":"string", + "enum":[ + "DEVICE_COMMAND_RESPONSE", + "DEVICE_DISCOVERY", + "DEVICE_EVENT", + "DEVICE_COMMAND_REQUEST" + ] + }, + "ConnectorEventOperationVersion":{ + "type":"string", + "max":6, + "min":1, + "pattern":"[0-9.]+", + "sensitive":true + }, + "ConnectorEventStatusCode":{ + "type":"integer", + "box":true, + "max":550, + "min":100, + "sensitive":true + }, + "ConnectorId":{ "type":"string", "max":64, "min":1, "pattern":"[A-Za-z0-9-_]+" }, - "CreateCredentialLockerRequest":{ + "ConnectorItem":{ "type":"structure", + "required":[ + "Name", + "EndpointConfig" + ], "members":{ "Name":{ - "shape":"CredentialLockerName", - "documentation":"

        The name of the credential locker.

        " + "shape":"DisplayName", + "documentation":"

        The display name of the C2C connector.

        " + }, + "EndpointConfig":{ + "shape":"EndpointConfig", + "documentation":"

        The configuration details for the cloud connector endpoint, including connection parameters and authentication requirements.

        " + }, + "Description":{ + "shape":"CloudConnectorDescription", + "documentation":"

        A description of the C2C connector.

        " + }, + "EndpointType":{ + "shape":"EndpointType", + "documentation":"

        The type of endpoint used for the C2C connector.

        " + }, + "Id":{ + "shape":"CloudConnectorId", + "documentation":"

        The identifier of the C2C connector.

        " }, + "Type":{ + "shape":"CloudConnectorType", + "documentation":"

        The type of cloud connector created.

        " + } + }, + "documentation":"

        Structure describing a connector.

        " + }, + "ConnectorList":{ + "type":"list", + "member":{"shape":"ConnectorItem"} + }, + "ConnectorPolicyId":{ + "type":"string", + "deprecated":true, + "deprecatedMessage":"ConnectorPolicyId is deprecated", + "deprecatedSince":"2025-06-25", + "max":64, + "min":1, + "pattern":"[A-Za-z0-9-_]+" + }, + "CreateAccountAssociationRequest":{ + "type":"structure", + "required":["ConnectorDestinationId"], + "members":{ "ClientToken":{ "shape":"ClientToken", "documentation":"

        An idempotency token. If you retry a request that completed successfully initially using the same client token and parameters, then the retry attempt will succeed without performing any further actions.

        ", "idempotencyToken":true }, + "ConnectorDestinationId":{ + "shape":"ConnectorDestinationId", + "documentation":"

        The identifier of the connector destination.

        " + }, + "Name":{ + "shape":"AccountAssociationName", + "documentation":"

        The name of the destination for the new account association.

        " + }, + "Description":{ + "shape":"AccountAssociationDescription", + "documentation":"

        A description of the account association request.

        " + }, "Tags":{ "shape":"TagsMap", - "documentation":"

        A set of key/value pairs that are used to manage the credential locker.

        " + "documentation":"

        A set of key/value pairs that are used to manage the account association.

        " + } + } + }, + "CreateAccountAssociationResponse":{ + "type":"structure", + "required":[ + "OAuthAuthorizationUrl", + "AccountAssociationId", + "AssociationState" + ], + "members":{ + "OAuthAuthorizationUrl":{ + "shape":"OAuthAuthorizationUrl", + "documentation":"

        Third-party IoT platform OAuth authorization server URL backed with all the required parameters to perform end-user authentication.

        " + }, + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The identifier for the account association request.

        " + }, + "AssociationState":{ + "shape":"AssociationState", + "documentation":"

        The current state of the account association request.

        " + }, + "Arn":{ + "shape":"AccountAssociationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the account association.

        " + } + } + }, + "CreateCloudConnectorRequest":{ + "type":"structure", + "required":[ + "Name", + "EndpointConfig" + ], + "members":{ + "Name":{ + "shape":"DisplayName", + "documentation":"

        The display name of the C2C connector.

        " + }, + "EndpointConfig":{ + "shape":"EndpointConfig", + "documentation":"

        The configuration details for the cloud connector endpoint, including connection parameters and authentication requirements.

        " + }, + "Description":{ + "shape":"CloudConnectorDescription", + "documentation":"

        A description of the C2C connector.

        " + }, + "EndpointType":{ + "shape":"EndpointType", + "documentation":"

        The type of endpoint used for the cloud connector, which defines how the connector communicates with external services.

        " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

        An idempotency token. If you retry a request that completed successfully initially using the same client token and parameters, then the retry attempt will succeed without performing any further actions.

        ", + "idempotencyToken":true + } + } + }, + "CreateCloudConnectorResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"CloudConnectorId", + "documentation":"

        The unique identifier assigned to the newly created cloud connector.

        " + } + } + }, + "CreateConnectorDestinationRequest":{ + "type":"structure", + "required":[ + "CloudConnectorId", + "AuthType", + "AuthConfig", + "SecretsManager" + ], + "members":{ + "Name":{ + "shape":"ConnectorDestinationName", + "documentation":"

        The display name of the connector destination.

        " + }, + "Description":{ + "shape":"ConnectorDestinationDescription", + "documentation":"

        A description of the connector destination.

        " + }, + "CloudConnectorId":{ + "shape":"CloudConnectorId", + "documentation":"

        The identifier of the C2C connector.

        " + }, + "AuthType":{ + "shape":"AuthType", + "documentation":"

        The authentication type used for the connector destination, which determines how credentials and access are managed.

        " + }, + "AuthConfig":{ + "shape":"AuthConfig", + "documentation":"

        The authentication configuration details for the connector destination, including OAuth settings and other authentication parameters.

        " + }, + "SecretsManager":{ + "shape":"SecretsManager", + "documentation":"

        The AWS Secrets Manager configuration used to securely store and manage sensitive information for the connector destination.

        " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

        An idempotency token. If you retry a request that completed successfully initially using the same client token and parameters, then the retry attempt will succeed without performing any further actions.

        ", + "idempotencyToken":true + } + } + }, + "CreateConnectorDestinationResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"ConnectorDestinationId", + "documentation":"

        The identifier of the C2C connector destination creation request.

        " + } + } + }, + "CreateCredentialLockerRequest":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"CredentialLockerName", + "documentation":"

        The name of the credential locker.

        " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

        An idempotency token. If you retry a request that completed successfully initially using the same client token and parameters, then the retry attempt will succeed without performing any further actions.

        ", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

        A set of key/value pairs that are used to manage the credential locker.

        " } } }, @@ -1628,7 +2547,10 @@ }, "Tags":{ "shape":"TagsMap", - "documentation":"

        A set of key/value pairs that are used to manage the destination.

        " + "documentation":"

        A set of key/value pairs that are used to manage the destination.

        ", + "deprecated":true, + "deprecatedMessage":"Tags have been deprecated from this api", + "deprecatedSince":"06-25-2025" } } }, @@ -1724,6 +2646,10 @@ "shape":"CapabilityReport", "documentation":"

        A report of the capabilities for the managed thing.

        " }, + "CapabilitySchemas":{ + "shape":"CapabilitySchemas", + "documentation":"

        The capability schemas that define the functionality and features supported by the managed thing, including device capabilities and their associated properties.

        " + }, "Capabilities":{ "shape":"Capabilities", "documentation":"

        The capabilities of the device such as light bulb.

        " @@ -1743,7 +2669,7 @@ }, "MetaData":{ "shape":"MetaData", - "documentation":"

        The metadata for the managed thing.

        " + "documentation":"

        The metadata for the managed thing.

        The managedThing metadata parameter is used for associating attributes with a managedThing that can be used for grouping over-the-air (OTA) tasks. Name value pairs in metadata can be used in the OtaTargetQueryString parameter for the CreateOtaTask API operation.

        " } } }, @@ -1786,7 +2712,10 @@ }, "Tags":{ "shape":"TagsMap", - "documentation":"

        A set of key/value pairs that are used to manage the notification configuration.

        " + "documentation":"

        A set of key/value pairs that are used to manage the notification configuration.

        ", + "deprecated":true, + "deprecatedMessage":"Tags has been deprecated from this api", + "deprecatedSince":"06-25-2025" } } }, @@ -2002,6 +2931,61 @@ }, "documentation":"

        Structure describing one Credential Locker.

        " }, + "CustomProtocolDetail":{ + "type":"map", + "key":{"shape":"CustomProtocolDetailKey"}, + "value":{"shape":"CustomProtocolDetailValue"}, + "max":50, + "min":0 + }, + "CustomProtocolDetailKey":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9 _.-]+" + }, + "CustomProtocolDetailValue":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[a-zA-Z0-9 _.{}:\"-]+" + }, + "DeleteAccountAssociationRequest":{ + "type":"structure", + "required":["AccountAssociationId"], + "members":{ + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The unique identifier of the account association to be deleted.

        ", + "location":"uri", + "locationName":"AccountAssociationId" + } + } + }, + "DeleteCloudConnectorRequest":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"CloudConnectorId", + "documentation":"

        The identifier of the cloud connector.

        ", + "location":"uri", + "locationName":"Identifier" + } + } + }, + "DeleteConnectorDestinationRequest":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"ConnectorDestinationId", + "documentation":"

        The identifier of the connector destination.

        ", + "location":"uri", + "locationName":"Identifier" + } + } + }, "DeleteCredentialLockerRequest":{ "type":"structure", "required":["Identifier"], @@ -2119,6 +3103,24 @@ "type":"string", "enum":["KINESIS"] }, + "DeregisterAccountAssociationRequest":{ + "type":"structure", + "required":[ + "ManagedThingId", + "AccountAssociationId" + ], + "members":{ + "ManagedThingId":{ + "shape":"ManagedThingId", + "documentation":"

        The identifier of the managed thing to be deregistered from the account association.

        " + }, + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The unique identifier of the account association to be deregistered.

        " + } + }, + "documentation":"

        Request for deregister a managed thing from account association

        " + }, "DestinationCreatedAt":{"type":"timestamp"}, "DestinationDescription":{ "type":"string", @@ -2163,6 +3165,36 @@ "documentation":"

        Structure describing a destination for IoT managed integrations to deliver notifications for a device.

        " }, "DestinationUpdatedAt":{"type":"timestamp"}, + "Device":{ + "type":"structure", + "required":[ + "ConnectorDeviceId", + "CapabilityReport" + ], + "members":{ + "ConnectorDeviceId":{ + "shape":"ConnectorDeviceId", + "documentation":"

        The device id as defined by the connector.

        This parameter is used for cloud-to-cloud devices only.

        " + }, + "ConnectorDeviceName":{ + "shape":"ConnectorDeviceName", + "documentation":"

        The name of the device as defined by the connector.

        " + }, + "CapabilityReport":{ + "shape":"MatterCapabilityReport", + "documentation":"

        The capability report for the device.

        " + }, + "CapabilitySchemas":{ + "shape":"CapabilitySchemas", + "documentation":"

        Report of all capabilities supported by the device.

        " + }, + "DeviceMetadata":{ + "shape":"DeviceMetadata", + "documentation":"

        The metadata attributes for a device.

        " + } + }, + "documentation":"

        Describe the device using the relevant metadata and supported clusters for device discovery.

        " + }, "DeviceDiscoveryArn":{ "type":"string", "pattern":"arn:aws:iotmanagedintegrations:[0-9a-zA-Z-]+:[0-9]+:device-discovery/[0-9a-zA-Z]+" @@ -2173,6 +3205,10 @@ "min":1, "pattern":"[A-Za-z0-9]+" }, + "DeviceDiscoveryListDefinition":{ + "type":"list", + "member":{"shape":"DeviceDiscoverySummary"} + }, "DeviceDiscoveryStatus":{ "type":"string", "enum":[ @@ -2182,6 +3218,30 @@ "TIMED_OUT" ] }, + "DeviceDiscoverySummary":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"DeviceDiscoveryId", + "documentation":"

        The unique identifier of the device discovery job.

        " + }, + "DiscoveryType":{ + "shape":"DiscoveryType", + "documentation":"

        The type of discovery process used to find devices.

        " + }, + "Status":{ + "shape":"DeviceDiscoveryStatus", + "documentation":"

        The current status of the device discovery job.

        " + } + }, + "documentation":"

        Structure containing summary information about a device discovery job, including its identifier, type, and status.

        " + }, + "DeviceMetadata":{ + "type":"structure", + "members":{ + }, + "document":true + }, "DeviceSpecificKey":{ "type":"string", "max":128, @@ -2195,12 +3255,20 @@ "min":0, "pattern":"[a-zA-Z0-9=_. ,@\\+\\-/]+" }, + "DeviceTypeList":{ + "type":"list", + "member":{"shape":"DeviceType"} + }, "DeviceTypes":{ "type":"list", "member":{"shape":"DeviceType"}, "max":50, "min":0 }, + "Devices":{ + "type":"list", + "member":{"shape":"Device"} + }, "DisconnectReasonValue":{ "type":"string", "enum":[ @@ -2220,6 +3288,53 @@ "NONE" ] }, + "DiscoveredAt":{"type":"timestamp"}, + "DiscoveredDeviceListDefinition":{ + "type":"list", + "member":{"shape":"DiscoveredDeviceSummary"} + }, + "DiscoveredDeviceSummary":{ + "type":"structure", + "members":{ + "ConnectorDeviceId":{ + "shape":"ConnectorDeviceId", + "documentation":"

        The third-party device identifier as defined by the connector. This identifier must not contain personal identifiable information (PII).

        " + }, + "ConnectorDeviceName":{ + "shape":"ConnectorDeviceName", + "documentation":"

        The name of the device as defined by the connector or third-party system.

        " + }, + "DeviceTypes":{ + "shape":"DeviceTypeList", + "documentation":"

        The list of device types or categories that the discovered device belongs to.

        " + }, + "ManagedThingId":{ + "shape":"ManagedThingId", + "documentation":"

        The identifier of the managed thing created for this discovered device, if one exists.

        " + }, + "Modification":{ + "shape":"DiscoveryModification", + "documentation":"

        The status of the discovered device, indicating whether it has been added, removed, or modified since the last discovery.

        " + }, + "DiscoveredAt":{ + "shape":"DiscoveredAt", + "documentation":"

        The timestamp indicating when the device was discovered.

        " + }, + "Brand":{ + "shape":"Brand", + "documentation":"

        The brand of the discovered device.

        " + }, + "Model":{ + "shape":"Model", + "documentation":"

        The model of the discovered device.

        " + }, + "AuthenticationMaterial":{ + "shape":"AuthMaterialString", + "documentation":"

        The authentication material required for connecting to the discovered device, such as credentials or tokens.

        " + } + }, + "documentation":"

        Structure containing summary information about a device discovered during a device discovery job.

        " + }, "DiscoveryAuthMaterialString":{ "type":"string", "max":64, @@ -2232,15 +3347,30 @@ "enum":["ZWAVE_INSTALL_CODE"] }, "DiscoveryFinishedAt":{"type":"timestamp"}, + "DiscoveryModification":{ + "type":"string", + "enum":[ + "DISCOVERED", + "UPDATED", + "NO_CHANGE" + ] + }, "DiscoveryStartedAt":{"type":"timestamp"}, "DiscoveryType":{ "type":"string", "enum":[ "ZWAVE", "ZIGBEE", - "CLOUD" + "CLOUD", + "CUSTOM" ] }, + "DisplayName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[A-Za-z0-9-_ ]+" + }, "DurationInMinutes":{ "type":"integer", "box":true, @@ -2261,13 +3391,35 @@ "min":1, "pattern":"[A-Za-z0-9._@-]+" }, + "EndpointConfig":{ + "type":"structure", + "members":{ + "lambda":{ + "shape":"LambdaConfig", + "documentation":"

        The Lambda function configuration for the endpoint, used when the endpoint communicates through an AWS Lambda function.

        " + } + }, + "documentation":"

        The configuration details for an endpoint, which defines how to connect to and communicate with external services.

        " + }, "EndpointId":{ "type":"string", "max":64, "min":1, "pattern":"[0-9a-zA-Z]+" }, + "EndpointSemanticTag":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[0-9a-zA-Z._-]+" + }, + "EndpointType":{ + "type":"string", + "enum":["LAMBDA"] + }, "ErrorMessage":{"type":"string"}, + "ErrorResourceId":{"type":"string"}, + "ErrorResourceType":{"type":"string"}, "EventLogConfigurationListDefinition":{ "type":"list", "member":{"shape":"EventLogConfigurationSummary"} @@ -2305,11 +3457,13 @@ "enum":[ "DEVICE_COMMAND", "DEVICE_COMMAND_REQUEST", + "DEVICE_DISCOVERY_STATUS", "DEVICE_EVENT", "DEVICE_LIFE_CYCLE", "DEVICE_STATE", "DEVICE_OTA", "CONNECTOR_ASSOCIATION", + "ACCOUNT_ASSOCIATION", "CONNECTOR_ERROR_REPORT" ] }, @@ -2335,59 +3489,217 @@ }, "documentation":"

        Structure representing exponential rate of rollout for an over-the-air (OTA) task.

        " }, - "GetCredentialLockerRequest":{ + "ExtrinsicSchemaId":{ + "type":"string", + "max":10, + "min":1, + "pattern":"0[xX][0-9a-fA-F]+$|^[0-9]+" + }, + "GetAccountAssociationRequest":{ "type":"structure", - "required":["Identifier"], + "required":["AccountAssociationId"], "members":{ - "Identifier":{ - "shape":"CredentialLockerId", - "documentation":"

        The identifier of the credential locker.

        ", + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The unique identifier of the account association to retrieve.

        ", "location":"uri", - "locationName":"Identifier" + "locationName":"AccountAssociationId" } } }, - "GetCredentialLockerResponse":{ + "GetAccountAssociationResponse":{ "type":"structure", + "required":[ + "AccountAssociationId", + "AssociationState", + "OAuthAuthorizationUrl" + ], "members":{ - "Id":{ - "shape":"CredentialLockerId", - "documentation":"

        The identifier of the credential locker.

        " + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The unique identifier of the retrieved account association.

        " }, - "Arn":{ - "shape":"CredentialLockerArn", - "documentation":"

        The Amazon Resource Name (ARN) of the credential locker.

        " + "AssociationState":{ + "shape":"AssociationState", + "documentation":"

        The current status state for the account association.

        " + }, + "ErrorMessage":{ + "shape":"AccountAssociationErrorMessage", + "documentation":"

        The error message explaining the current account association error.

        " + }, + "ConnectorDestinationId":{ + "shape":"ConnectorDestinationId", + "documentation":"

        The identifier of the connector destination associated with this account association.

        " }, "Name":{ - "shape":"CredentialLockerName", - "documentation":"

        The name of the credential locker.

        " + "shape":"AccountAssociationName", + "documentation":"

        The name of the account association.

        " }, - "CreatedAt":{ - "shape":"CredentialLockerCreatedAt", - "documentation":"

        The timestamp value of when the credential locker requset occurred.

        " + "Description":{ + "shape":"AccountAssociationDescription", + "documentation":"

        The description of the account association.

        " + }, + "Arn":{ + "shape":"AccountAssociationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the account association.

        " + }, + "OAuthAuthorizationUrl":{ + "shape":"OAuthAuthorizationUrl", + "documentation":"

        Third party IoT platform OAuth authorization server URL backed with all the required parameters to perform end-user authentication.

        " }, "Tags":{ "shape":"TagsMap", - "documentation":"

        A set of key/value pairs that are used to manage the credential locker.

        " + "documentation":"

        A set of key/value pairs that are used to manage the account association.

        " } } }, - "GetCustomEndpointRequest":{ - "type":"structure", - "members":{ - } - }, - "GetCustomEndpointResponse":{ + "GetCloudConnectorRequest":{ "type":"structure", - "required":["EndpointAddress"], + "required":["Identifier"], "members":{ - "EndpointAddress":{ - "shape":"EndpointAddress", - "documentation":"

        The IoT managed integrations dedicated, custom endpoint for the device to route traffic through.

        " + "Identifier":{ + "shape":"CloudConnectorId", + "documentation":"

        The identifier of the C2C connector.

        ", + "location":"uri", + "locationName":"Identifier" } } }, - "GetDefaultEncryptionConfigurationRequest":{ + "GetCloudConnectorResponse":{ + "type":"structure", + "required":[ + "Name", + "EndpointConfig" + ], + "members":{ + "Name":{ + "shape":"DisplayName", + "documentation":"

        The display name of the C2C connector.

        " + }, + "EndpointConfig":{ + "shape":"EndpointConfig", + "documentation":"

        The configuration details for the cloud connector endpoint, including connection parameters and authentication requirements.

        " + }, + "Description":{ + "shape":"CloudConnectorDescription", + "documentation":"

        A description of the C2C connector.

        " + }, + "EndpointType":{ + "shape":"EndpointType", + "documentation":"

        The type of endpoint used for the cloud connector, which defines how the connector communicates with external services.

        " + }, + "Id":{ + "shape":"CloudConnectorId", + "documentation":"

        The unique identifier of the cloud connector.

        " + }, + "Type":{ + "shape":"CloudConnectorType", + "documentation":"

        The type of cloud connector created.

        " + } + } + }, + "GetConnectorDestinationRequest":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"ConnectorDestinationId", + "documentation":"

        The identifier of the C2C connector destination.

        ", + "location":"uri", + "locationName":"Identifier" + } + } + }, + "GetConnectorDestinationResponse":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"ConnectorDestinationName", + "documentation":"

        The display name of the connector destination.

        " + }, + "Description":{ + "shape":"ConnectorDestinationDescription", + "documentation":"

        A description of the connector destination.

        " + }, + "CloudConnectorId":{ + "shape":"CloudConnectorId", + "documentation":"

        The identifier of the C2C connector.

        " + }, + "Id":{ + "shape":"ConnectorDestinationId", + "documentation":"

        The unique identifier of the connector destination.

        " + }, + "AuthType":{ + "shape":"AuthType", + "documentation":"

        The authentication type used for the connector destination, which determines how credentials and access are managed.

        " + }, + "AuthConfig":{ + "shape":"AuthConfig", + "documentation":"

        The authentication configuration details for the connector destination, including OAuth settings and other authentication parameters.

        " + }, + "SecretsManager":{ + "shape":"SecretsManager", + "documentation":"

        The AWS Secrets Manager configuration used to securely store and manage sensitive information for the connector destination.

        " + }, + "OAuthCompleteRedirectUrl":{ + "shape":"OAuthCompleteRedirectUrl", + "documentation":"

        The URL where users are redirected after completing the OAuth authorization process for the connector destination.

        " + } + } + }, + "GetCredentialLockerRequest":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"CredentialLockerId", + "documentation":"

        The identifier of the credential locker.

        ", + "location":"uri", + "locationName":"Identifier" + } + } + }, + "GetCredentialLockerResponse":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"CredentialLockerId", + "documentation":"

        The identifier of the credential locker.

        " + }, + "Arn":{ + "shape":"CredentialLockerArn", + "documentation":"

        The Amazon Resource Name (ARN) of the credential locker.

        " + }, + "Name":{ + "shape":"CredentialLockerName", + "documentation":"

        The name of the credential locker.

        " + }, + "CreatedAt":{ + "shape":"CredentialLockerCreatedAt", + "documentation":"

        The timestamp value of when the credential locker requset occurred.

        " + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

        A set of key/value pairs that are used to manage the credential locker.

        " + } + } + }, + "GetCustomEndpointRequest":{ + "type":"structure", + "members":{ + } + }, + "GetCustomEndpointResponse":{ + "type":"structure", + "required":["EndpointAddress"], + "members":{ + "EndpointAddress":{ + "shape":"EndpointAddress", + "documentation":"

        The IoT managed integrations dedicated, custom endpoint for the device to route traffic through.

        " + } + } + }, + "GetDefaultEncryptionConfigurationRequest":{ "type":"structure", "members":{ } @@ -2458,7 +3770,10 @@ }, "Tags":{ "shape":"TagsMap", - "documentation":"

        A set of key/value pairs that are used to manage the customer-managed destination.

        " + "documentation":"

        A set of key/value pairs that are used to manage the customer-managed destination.

        ", + "deprecated":true, + "deprecatedMessage":"Tags has been deprecated from this api", + "deprecatedSince":"06-25-2025" } } }, @@ -2510,7 +3825,14 @@ }, "ConnectorAssociationId":{ "shape":"ConnectorAssociationId", - "documentation":"

        The ID tracking the current discovery process for one connector association.

        " + "documentation":"

        The ID tracking the current discovery process for one connector association.

        ", + "deprecated":true, + "deprecatedMessage":"ConnectorAssociationId has been deprecated", + "deprecatedSince":"2025-06-25" + }, + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The identifier of the account association used for the device discovery.

        " }, "FinishedAt":{ "shape":"DiscoveryFinishedAt", @@ -2518,7 +3840,10 @@ }, "Tags":{ "shape":"TagsMap", - "documentation":"

        A set of key/value pairs that are used to manage the device discovery request.

        " + "documentation":"

        A set of key/value pairs that are used to manage the device discovery request.

        ", + "deprecated":true, + "deprecatedMessage":"Tags have been deprecated from this api", + "deprecatedSince":"06-25-2025" } } }, @@ -2729,7 +4054,14 @@ }, "ConnectorPolicyId":{ "shape":"ConnectorPolicyId", - "documentation":"

        The id of the connector policy.

        This parameter is used for cloud-to-cloud devices only.

        " + "documentation":"

        The id of the connector policy.

        This parameter is used for cloud-to-cloud devices only.

        ", + "deprecated":true, + "deprecatedMessage":"ConnectorPolicyId is deprecated", + "deprecatedSince":"2025-06-25" + }, + "ConnectorDestinationId":{ + "shape":"ConnectorDestinationId", + "documentation":"

        The identifier of the connector destination associated with this managed thing.

        " }, "ConnectorDeviceId":{ "shape":"ConnectorDeviceId", @@ -2832,7 +4164,10 @@ }, "Tags":{ "shape":"TagsMap", - "documentation":"

        A set of key/value pairs that are used to manage the notification configuration.

        " + "documentation":"

        A set of key/value pairs that are used to manage the notification configuration.

        ", + "deprecated":true, + "deprecatedMessage":"Tags has been deprecated for this api", + "deprecatedSince":"06-25-2025" } } }, @@ -2945,6 +4280,10 @@ "Status":{ "shape":"OtaStatus", "documentation":"

        The status of the over-the-air (OTA) task.

        " + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

        A set of key/value pairs that are used to manage the over-the-air (OTA) task.

        " } } }, @@ -3130,13 +4469,172 @@ "pattern":"[0-9]+", "sensitive":true }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

        The request is not valid.

        ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "IoTManagedIntegrationsResourceARN":{ + "type":"string", + "max":200, + "min":1, + "pattern":"arn:aws:iotmanagedintegrations:[0-9a-zA-Z-]+:[0-9]+:(managed-thing|provisioning-profile|ota-task|credential-locker|account-association)/[0-9a-zA-Z]+" + }, "KmsKeyArn":{ "type":"string", "max":200, "min":1, "pattern":"arn:aws:kms:[0-9a-zA-Z-]+:[0-9]+:key/[0-9a-zA-Z-]+" }, + "LambdaArn":{ + "type":"string", + "pattern":"(arn:aws:lambda:[0-9a-zA-Z-]+:[0-9]+:function:)?([a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?)" + }, + "LambdaConfig":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"LambdaArn", + "documentation":"

        The Amazon Resource Name (ARN) of the Lambda function used as an endpoint.

        " + } + }, + "documentation":"

        Configuration details for an AWS Lambda function used as an endpoint for a cloud connector.

        " + }, "LastUpdatedAt":{"type":"timestamp"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

        The request exceeds a service limit or quota. Adjust your request parameters and try again.

        ", + "error":{ + "httpStatusCode":410, + "senderFault":true + }, + "exception":true + }, + "ListAccountAssociationsRequest":{ + "type":"structure", + "members":{ + "ConnectorDestinationId":{ + "shape":"ConnectorDestinationId", + "documentation":"

        The identifier of the connector destination to filter account associations by.

        ", + "location":"querystring", + "locationName":"ConnectorDestinationId" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of account associations to return in a single response.

        ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        A token used for pagination of results.

        ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListAccountAssociationsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"AccountAssociationListDefinition", + "documentation":"

        The list of account associations that match the specified criteria.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        A token used for pagination of results when there are more account associations than can be returned in a single response.

        " + } + } + }, + "ListCloudConnectorsRequest":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"CloudConnectorType", + "documentation":"

        The type of cloud connectors to filter by when listing available connectors.

        ", + "location":"querystring", + "locationName":"Type" + }, + "LambdaArn":{ + "shape":"LambdaArn", + "documentation":"

        The Amazon Resource Name (ARN) of the Lambda function to filter cloud connectors by.

        ", + "location":"querystring", + "locationName":"LambdaArn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of results to return at one time.

        ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        A token that can be used to retrieve the next set of results.

        ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListCloudConnectorsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ConnectorList", + "documentation":"

        The list of connectors.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        A token that can be used to retrieve the next set of results.

        " + } + } + }, + "ListConnectorDestinationsRequest":{ + "type":"structure", + "members":{ + "CloudConnectorId":{ + "shape":"CloudConnectorId", + "documentation":"

        The identifier of the cloud connector to filter connector destinations by.

        ", + "location":"querystring", + "locationName":"CloudConnectorId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        A token used for pagination of results.

        ", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of connector destinations to return in a single response.

        ", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListConnectorDestinationsResponse":{ + "type":"structure", + "members":{ + "ConnectorDestinationList":{ + "shape":"ConnectorDestinationListDefinition", + "documentation":"

        The list of connector destinations that match the specified criteria.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        A token used for pagination of results when there are more connector destinations than can be returned in a single response.

        " + } + } + }, "ListCredentialLockersRequest":{ "type":"structure", "members":{ @@ -3197,6 +4695,85 @@ } } }, + "ListDeviceDiscoveriesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

        A token used for pagination of results.

        ", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of device discovery jobs to return in a single response.

        ", + "location":"querystring", + "locationName":"MaxResults" + }, + "TypeFilter":{ + "shape":"DiscoveryType", + "documentation":"

        The discovery type to filter device discovery jobs by.

        ", + "location":"querystring", + "locationName":"TypeFilter" + }, + "StatusFilter":{ + "shape":"DeviceDiscoveryStatus", + "documentation":"

        The status to filter device discovery jobs by.

        ", + "location":"querystring", + "locationName":"StatusFilter" + } + } + }, + "ListDeviceDiscoveriesResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"DeviceDiscoveryListDefinition", + "documentation":"

        The list of device discovery jobs that match the specified criteria.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        A token used for pagination of results when there are more device discovery jobs than can be returned in a single response.

        " + } + } + }, + "ListDiscoveredDevicesRequest":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"DeviceDiscoveryId", + "documentation":"

        The identifier of the device discovery job to list discovered devices for.

        ", + "location":"uri", + "locationName":"Identifier" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        A token used for pagination of results.

        ", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of discovered devices to return in a single response.

        ", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListDiscoveredDevicesResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"DiscoveredDeviceListDefinition", + "documentation":"

        The list of discovered devices that match the specified criteria.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        A token used for pagination of results when there are more discovered devices than can be returned in a single response.

        " + } + } + }, "ListEventLogConfigurationsRequest":{ "type":"structure", "members":{ @@ -3227,6 +4804,48 @@ } } }, + "ListManagedThingAccountAssociationsRequest":{ + "type":"structure", + "members":{ + "ManagedThingId":{ + "shape":"ManagedThingId", + "documentation":"

        The identifier of the managed thing to list account associations for.

        ", + "location":"querystring", + "locationName":"ManagedThingId" + }, + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The identifier of the account association to filter results by. When specified, only associations with this account association ID will be returned.

        ", + "location":"querystring", + "locationName":"AccountAssociationId" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of account associations to return in a single response.

        ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        A token used for pagination of results.

        ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListManagedThingAccountAssociationsResponse":{ + "type":"structure", + "members":{ + "Items":{ + "shape":"ManagedThingAssociationList", + "documentation":"

        The list of managed thing associations that match the specified criteria, including the managed thing ID and account association ID for each association.

        " + }, + "NextToken":{ + "shape":"String", + "documentation":"

        A token used for pagination of results when there are more account associations than can be returned in a single response.

        " + } + } + }, "ListManagedThingSchemasRequest":{ "type":"structure", "required":["Identifier"], @@ -3306,9 +4925,24 @@ "ConnectorPolicyIdFilter":{ "shape":"ConnectorPolicyId", "documentation":"

        Filter on a connector policy id for a managed thing.

        ", + "deprecated":true, + "deprecatedMessage":"ConnectorPolicyIdFilter is deprecated", + "deprecatedSince":"06-25-2025", "location":"querystring", "locationName":"ConnectorPolicyIdFilter" }, + "ConnectorDestinationIdFilter":{ + "shape":"ConnectorDestinationId", + "documentation":"

        Filter managed things by the connector destination ID they are associated with.

        ", + "location":"querystring", + "locationName":"ConnectorDestinationIdFilter" + }, + "ConnectorDeviceIdFilter":{ + "shape":"ConnectorDeviceId", + "documentation":"

        Filter managed things by the connector device ID they are associated with. When specified, only managed things with this connector device ID will be returned.

        ", + "location":"querystring", + "locationName":"ConnectorDeviceIdFilter" + }, "SerialNumberFilter":{ "shape":"SerialNumber", "documentation":"

        Filter on the serial number of the device.

        ", @@ -3566,6 +5200,27 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"IoTManagedIntegrationsResourceARN", + "documentation":"

        The ARN of the resource for which to list tags.

        ", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagsMap", + "documentation":"

        A set of key/value pairs that are used to manage the resource.

        " + } + } + }, "LocalStoreFileRotationMaxBytes":{ "type":"integer", "box":true @@ -3602,6 +5257,24 @@ "min":32, "pattern":"arn:aws:iotmanagedintegrations:[0-9a-zA-Z-]+:[0-9]+:managed-thing/([0-9a-zA-Z:_-])+" }, + "ManagedThingAssociation":{ + "type":"structure", + "members":{ + "ManagedThingId":{ + "shape":"ManagedThingId", + "documentation":"

        The identifier of the managed thing in the association.

        " + }, + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The identifier of the account association in the association.

        " + } + }, + "documentation":"

        Structure representing an association between a managed thing and an account association, which connects a device to a third-party account.

        " + }, + "ManagedThingAssociationList":{ + "type":"list", + "member":{"shape":"ManagedThingAssociation"} + }, "ManagedThingId":{ "type":"string", "max":64, @@ -3663,7 +5336,14 @@ }, "ConnectorPolicyId":{ "shape":"ConnectorPolicyId", - "documentation":"

        The id of the connector policy.

        This parameter is used for cloud-to-cloud devices only.

        " + "documentation":"

        The id of the connector policy.

        This parameter is used for cloud-to-cloud devices only.

        ", + "deprecated":true, + "deprecatedMessage":"ConnectorPolicyId has been deprecated", + "deprecatedSince":"06-25-2025" + }, + "ConnectorDestinationId":{ + "shape":"ConnectorDestinationId", + "documentation":"

        The identifier of the connector destination associated with this managed thing, if applicable.

        " }, "Model":{ "shape":"Model", @@ -3705,12 +5385,307 @@ "shape":"CreatedAt", "documentation":"

        The timestamp value of when the managed thing was last updated at.

        " }, - "ActivatedAt":{ - "shape":"SetupAt", - "documentation":"

        The timestampe value of when the managed thing was activated at.

        " + "ActivatedAt":{ + "shape":"SetupAt", + "documentation":"

        The timestampe value of when the managed thing was activated at.

        " + } + }, + "documentation":"

        Structure representing one managed thing.

        " + }, + "MatterAttributeId":{ + "type":"string", + "max":24, + "min":1, + "pattern":"0[xX][0-9a-fA-F]+$|^[0-9]+" + }, + "MatterAttributes":{ + "type":"structure", + "members":{ + }, + "document":true, + "sensitive":true + }, + "MatterCapabilityReport":{ + "type":"structure", + "required":[ + "version", + "endpoints" + ], + "members":{ + "version":{ + "shape":"CapabilityReportVersion", + "documentation":"

        The version of the capability report.

        " + }, + "nodeId":{ + "shape":"NodeId", + "documentation":"

        The numeric identifier of the node.

        " + }, + "endpoints":{ + "shape":"MatterCapabilityReportEndpoints", + "documentation":"

        The endpoints used in the capability report.

        " + } + }, + "documentation":"

        Matter based capability report.

        " + }, + "MatterCapabilityReportAttribute":{ + "type":"structure", + "members":{ + "id":{ + "shape":"MatterAttributeId", + "documentation":"

        The id of the Matter attribute.

        " + }, + "name":{ + "shape":"ActionName", + "documentation":"

        Name for the Amazon Web Services Matter capability report attribute.

        " + }, + "value":{ + "shape":"MatterCapabilityReportAttributeValue", + "documentation":"

        Value for the Amazon Web Services Matter capability report attribute.

        " + } + }, + "documentation":"

        Matter attribute used in capability report.

        " + }, + "MatterCapabilityReportAttributeValue":{ + "type":"structure", + "members":{ + }, + "document":true + }, + "MatterCapabilityReportAttributes":{ + "type":"list", + "member":{"shape":"MatterCapabilityReportAttribute"}, + "max":100, + "min":0 + }, + "MatterCapabilityReportCluster":{ + "type":"structure", + "required":[ + "id", + "revision" + ], + "members":{ + "id":{ + "shape":"ClusterId", + "documentation":"

        The id of the Amazon Web Services Matter capability report cluster.

        " + }, + "revision":{ + "shape":"MatterCapabilityReportClusterRevisionId", + "documentation":"

        The id of the revision for the Amazon Web Services Matter capability report.

        " + }, + "publicId":{ + "shape":"SchemaVersionedId", + "documentation":"

        The id of the schema version.

        " + }, + "name":{ + "shape":"CapabilityName", + "documentation":"

        The capability name used in the Amazon Web Services Matter capability report.

        " + }, + "specVersion":{ + "shape":"SpecVersion", + "documentation":"

        The spec version used in the Amazon Web Services Matter capability report.

        " + }, + "attributes":{ + "shape":"MatterCapabilityReportAttributes", + "documentation":"

        The attributes of the Amazon Web Services Matter capability report.

        " + }, + "commands":{ + "shape":"MatterCapabilityReportCommands", + "documentation":"

        The commands used with the Amazon Web Services Matter capability report.

        " + }, + "events":{ + "shape":"MatterCapabilityReportEvents", + "documentation":"

        The events used with the Amazon Web Services Matter capability report.

        " + }, + "featureMap":{ + "shape":"MatterCapabilityReportFeatureMap", + "documentation":"

        32 bit-map used to indicate which features a cluster supports.

        " + }, + "generatedCommands":{ + "shape":"MatterCapabilityReportGeneratedCommands", + "documentation":"

        Matter clusters used in capability report.

        " + }, + "fabricIndex":{ + "shape":"MatterCapabilityReportFabricIndex", + "documentation":"

        The fabric index for the Amazon Web Services Matter capability report.

        " + } + }, + "documentation":"

        Capability used in Matter capability report.

        " + }, + "MatterCapabilityReportClusterRevisionId":{ + "type":"integer", + "box":true, + "max":10, + "min":1 + }, + "MatterCapabilityReportClusters":{ + "type":"list", + "member":{"shape":"MatterCapabilityReportCluster"}, + "max":50, + "min":0 + }, + "MatterCapabilityReportCommands":{ + "type":"list", + "member":{"shape":"MatterCommandId"}, + "max":100, + "min":0 + }, + "MatterCapabilityReportEndpoint":{ + "type":"structure", + "required":[ + "id", + "deviceTypes", + "clusters" + ], + "members":{ + "id":{ + "shape":"EndpointId", + "documentation":"

        The id of the Amazon Web Services Matter capability report endpoint.

        " + }, + "deviceTypes":{ + "shape":"DeviceTypes", + "documentation":"

        The type of device.

        " + }, + "clusters":{ + "shape":"MatterCapabilityReportClusters", + "documentation":"

        Matter clusters used in capability report.

        " + }, + "parts":{ + "shape":"MatterCapabilityReportEndpointParts", + "documentation":"

        Heirachy of child endpoints contained in the given endpoint.

        " + }, + "semanticTags":{ + "shape":"MatterCapabilityReportEndpointSemanticTags", + "documentation":"

        Semantic information related to endpoint.

        " + }, + "clientClusters":{ + "shape":"MatterCapabilityReportEndpointClientClusters", + "documentation":"

        Semantic information related to endpoint.

        " + } + }, + "documentation":"

        Matter endpoint used in capability report.

        " + }, + "MatterCapabilityReportEndpointClientClusters":{ + "type":"list", + "member":{"shape":"ClusterId"}, + "max":32, + "min":0 + }, + "MatterCapabilityReportEndpointParts":{ + "type":"list", + "member":{"shape":"EndpointId"}, + "max":32, + "min":0 + }, + "MatterCapabilityReportEndpointSemanticTags":{ + "type":"list", + "member":{"shape":"EndpointSemanticTag"}, + "max":32, + "min":0 + }, + "MatterCapabilityReportEndpoints":{ + "type":"list", + "member":{"shape":"MatterCapabilityReportEndpoint"}, + "max":50, + "min":0 + }, + "MatterCapabilityReportEvents":{ + "type":"list", + "member":{"shape":"MatterEventId"}, + "max":100, + "min":0 + }, + "MatterCapabilityReportFabricIndex":{ + "type":"integer", + "box":true, + "max":4096, + "min":0 + }, + "MatterCapabilityReportFeatureMap":{ + "type":"long", + "box":true, + "max":4294967295, + "min":0 + }, + "MatterCapabilityReportGeneratedCommands":{ + "type":"list", + "member":{"shape":"MatterCommandId"}, + "max":50, + "min":0 + }, + "MatterCluster":{ + "type":"structure", + "members":{ + "id":{ + "shape":"ClusterId", + "documentation":"

        The cluster id.

        " + }, + "attributes":{ + "shape":"MatterAttributes", + "documentation":"

        The Matter attributes.

        " + }, + "commands":{ + "shape":"MatterCommands", + "documentation":"

        Describe the Matter commands with the Matter command identifier mapped to the command fields.

        " + }, + "events":{ + "shape":"MatterEvents", + "documentation":"

        Describe the Matter events with the Matter event identifier mapped to the event fields.

        " + } + }, + "documentation":"

        Describe a Matter cluster with an id, and the relevant attributes, commands, and events.

        " + }, + "MatterClusters":{ + "type":"list", + "member":{"shape":"MatterCluster"}, + "max":5, + "min":1 + }, + "MatterCommandId":{ + "type":"string", + "max":24, + "min":1, + "pattern":"0[xX][0-9a-fA-F]+$|^[0-9]+" + }, + "MatterCommands":{ + "type":"map", + "key":{"shape":"MatterCommandId"}, + "value":{"shape":"MatterFields"}, + "max":5, + "min":1 + }, + "MatterEndpoint":{ + "type":"structure", + "members":{ + "id":{ + "shape":"EndpointId", + "documentation":"

        The Matter endpoint id.

        " + }, + "clusters":{ + "shape":"MatterClusters", + "documentation":"

        A list of Matter clusters for a managed thing.

        " } }, - "documentation":"

        Structure representing one managed thing.

        " + "documentation":"

        Structure describing a managed thing.

        " + }, + "MatterEventId":{ + "type":"string", + "max":24, + "min":1, + "pattern":"0[xX][0-9a-fA-F]+$|^[0-9]+" + }, + "MatterEvents":{ + "type":"map", + "key":{"shape":"MatterEventId"}, + "value":{"shape":"MatterFields"}, + "max":5, + "min":1 + }, + "MatterFields":{ + "type":"structure", + "members":{ + }, + "document":true, + "sensitive":true }, "MaxResults":{ "type":"integer", @@ -3799,6 +5774,68 @@ "max":100, "min":1 }, + "OAuthAuthorizationUrl":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"(https)://.*", + "sensitive":true + }, + "OAuthCompleteRedirectUrl":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"(http|https)://.*" + }, + "OAuthConfig":{ + "type":"structure", + "required":[ + "authUrl", + "tokenUrl", + "tokenEndpointAuthenticationScheme" + ], + "members":{ + "authUrl":{ + "shape":"AuthUrl", + "documentation":"

        The authorization URL for the OAuth service, where users are directed to authenticate and authorize access.

        " + }, + "tokenUrl":{ + "shape":"TokenUrl", + "documentation":"

        The token URL for the OAuth service, where authorization codes are exchanged for access tokens.

        " + }, + "scope":{ + "shape":"String", + "documentation":"

        The OAuth scopes requested during authorization, which define the permissions granted to the application.

        " + }, + "tokenEndpointAuthenticationScheme":{ + "shape":"TokenEndpointAuthenticationScheme", + "documentation":"

        The authentication scheme used when requesting tokens from the token endpoint.

        " + }, + "oAuthCompleteRedirectUrl":{ + "shape":"String", + "documentation":"

        The URL where users are redirected after completing the OAuth authorization process.

        " + }, + "proactiveRefreshTokenRenewal":{ + "shape":"ProactiveRefreshTokenRenewal", + "documentation":"

        Configuration for proactively refreshing OAuth tokens before they expire.

        " + } + }, + "documentation":"

        Configuration details for OAuth authentication with a third-party service.

        " + }, + "OAuthUpdate":{ + "type":"structure", + "members":{ + "oAuthCompleteRedirectUrl":{ + "shape":"String", + "documentation":"

        The updated URL where users are redirected after completing the OAuth authorization process.

        " + }, + "proactiveRefreshTokenRenewal":{ + "shape":"ProactiveRefreshTokenRenewal", + "documentation":"

        Updated configuration for proactively refreshing OAuth tokens before they expire.

        " + } + }, + "documentation":"

        Structure containing updated OAuth configuration settings.

        " + }, "OtaDescription":{ "type":"string", "max":256, @@ -3843,7 +5880,7 @@ "OtaTaskArn":{ "type":"string", "max":1011, - "min":0, + "min":32, "pattern":"arn:aws:iotmanagedintegrations:[0-9a-zA-Z-]+:[0-9]+:ota-task/[0-9a-zA-Z]+" }, "OtaTaskConfigurationId":{ @@ -4058,6 +6095,25 @@ "min":1, "pattern":"[A-Za-z0-9]+" }, + "ProactiveRefreshTokenRenewal":{ + "type":"structure", + "members":{ + "enabled":{ + "shape":"Boolean", + "documentation":"

        Indicates whether proactive refresh token renewal is enabled.

        " + }, + "DaysBeforeRenewal":{ + "shape":"ProactiveRefreshTokenRenewalDaysBeforeRenewalInteger", + "documentation":"

        The days before token expiration when the system should attempt to renew the token, specified in days.

        " + } + }, + "documentation":"

        Configuration settings for proactively refreshing OAuth tokens before they expire.

        " + }, + "ProactiveRefreshTokenRenewalDaysBeforeRenewalInteger":{ + "type":"integer", + "box":true, + "min":30 + }, "PropertyName":{ "type":"string", "max":128, @@ -4220,6 +6276,45 @@ } }, "QueuedAt":{"type":"timestamp"}, + "RegisterAccountAssociationRequest":{ + "type":"structure", + "required":[ + "ManagedThingId", + "AccountAssociationId", + "DeviceDiscoveryId" + ], + "members":{ + "ManagedThingId":{ + "shape":"ManagedThingId", + "documentation":"

        The identifier of the managed thing to register with the account association.

        " + }, + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The identifier of the account association to register with the managed thing.

        " + }, + "DeviceDiscoveryId":{ + "shape":"DeviceDiscoveryId", + "documentation":"

        The identifier of the device discovery job associated with this registration.

        " + } + } + }, + "RegisterAccountAssociationResponse":{ + "type":"structure", + "members":{ + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The identifier of the account association that was registered.

        " + }, + "DeviceDiscoveryId":{ + "shape":"DeviceDiscoveryId", + "documentation":"

        The identifier of the device discovery job associated with this registration.

        " + }, + "ManagedThingId":{ + "shape":"ManagedThingId", + "documentation":"

        The identifier of the managed thing that was registered with the account association.

        " + } + } + }, "RegisterCustomEndpointRequest":{ "type":"structure", "members":{ @@ -4250,7 +6345,15 @@ "ResourceNotFoundException":{ "type":"structure", "members":{ - "Message":{"shape":"ErrorMessage"} + "Message":{"shape":"ErrorMessage"}, + "ResourceId":{ + "shape":"ErrorResourceId", + "documentation":"

        Id of the affected resource

        " + }, + "ResourceType":{ + "shape":"ErrorResourceType", + "documentation":"

        Type of the affected resource

        " + } }, "documentation":"

        The specified resource does not exist.

        ", "error":{ @@ -4383,14 +6486,14 @@ "SchemaId":{ "type":"string", "max":128, - "min":1, - "pattern":"[a-zA-Z0-9./]+" + "min":3, + "pattern":"[a-zA-Z0-9.]+" }, "SchemaVersionDescription":{ "type":"string", - "max":256, - "min":1, - "pattern":"[a-zA-Z0-9., ]+" + "max":2048, + "min":10, + "pattern":"[a-zA-Z0-9.,/ -]+" }, "SchemaVersionFormat":{ "type":"string", @@ -4436,9 +6539,9 @@ }, "SchemaVersionNamespaceName":{ "type":"string", - "max":256, - "min":1, - "pattern":"[a-z]+" + "max":12, + "min":3, + "pattern":"[a-z0-9]+" }, "SchemaVersionSchema":{ "type":"structure", @@ -4455,9 +6558,9 @@ }, "SchemaVersionVersion":{ "type":"string", - "max":256, - "min":1, - "pattern":"(\\d+\\.\\d+|\\$latest)" + "max":12, + "min":3, + "pattern":"(\\d+\\.\\d+(\\.\\d+)?|\\$latest)" }, "SchemaVersionVisibility":{ "type":"string", @@ -4469,8 +6572,103 @@ "SchemaVersionedId":{ "type":"string", "max":128, - "min":1, - "pattern":"[a-zA-Z0-9.\\/]+(@(\\d+\\.\\d+|\\$latest))?" + "min":7, + "pattern":"[a-zA-Z0-9.]+@(\\d+\\.\\d+(\\.\\d+)?|\\$latest)" + }, + "SecretsManager":{ + "type":"structure", + "required":[ + "arn", + "versionId" + ], + "members":{ + "arn":{ + "shape":"SecretsManagerArn", + "documentation":"

        The Amazon Resource Name (ARN) of the AWS Secrets Manager secret.

        " + }, + "versionId":{ + "shape":"SecretsManagerVersionId", + "documentation":"

        The version ID of the AWS Secrets Manager secret.

        " + } + }, + "documentation":"

        Configuration for AWS Secrets Manager, used to securely store and manage sensitive information for connector destinations.

        " + }, + "SecretsManagerArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws:secretsmanager:[0-9a-zA-Z-]{1,32}:\\d{12}:secret:[A-Za-z0-9/_+=.@-]{8,520}" + }, + "SecretsManagerVersionId":{ + "type":"string", + "max":64, + "min":32, + "pattern":"[a-zA-Z0-9-_]+" + }, + "SendConnectorEventRequest":{ + "type":"structure", + "required":[ + "ConnectorId", + "Operation" + ], + "members":{ + "ConnectorId":{ + "shape":"ConnectorId", + "documentation":"

        The id of the connector between the third-party cloud provider and IoT managed integrations.

        ", + "location":"uri", + "locationName":"ConnectorId" + }, + "UserId":{ + "shape":"ThirdPartyUserId", + "documentation":"

        The id of the third-party cloud provider.

        " + }, + "Operation":{ + "shape":"ConnectorEventOperation", + "documentation":"

        The Open Connectivity Foundation (OCF) operation requested to be performed on the managed thing.

        The field op can have a value of \"I\" or \"U\". The field \"cn\" will contain the capability types.

        " + }, + "OperationVersion":{ + "shape":"ConnectorEventOperationVersion", + "documentation":"

        The Open Connectivity Foundation (OCF) security specification version for the operation being requested on the managed thing. For more information, see OCF Security Specification.

        " + }, + "StatusCode":{ + "shape":"ConnectorEventStatusCode", + "documentation":"

        The status code of the Open Connectivity Foundation (OCF) operation being performed on the managed thing.

        " + }, + "Message":{ + "shape":"ConnectorEventMessage", + "documentation":"

        The device state change event payload.

        This parameter will include the following three fields:

        • uri: schema auc://<PARTNER-DEVICE-ID>/ResourcePath (The Resourcepath corresponds to an OCF resource.)

        • op: For device state changes, this field must populate as n+d.

        • cn: The content depends on the OCF resource referenced in ResourcePath.

        " + }, + "DeviceDiscoveryId":{ + "shape":"DeviceDiscoveryId", + "documentation":"

        The id for the device discovery job.

        " + }, + "ConnectorDeviceId":{ + "shape":"ConnectorDeviceId", + "documentation":"

        The third-party device id as defined by the connector. This device id must not contain personal identifiable information (PII).

        This parameter is used for cloud-to-cloud devices only.

        " + }, + "TraceId":{ + "shape":"TraceId", + "documentation":"

        The trace request identifier used to correlate a command request and response. This is specified by the device owner, but will be generated by IoT managed integrations if not provided by the device owner.

        " + }, + "Devices":{ + "shape":"Devices", + "documentation":"

        The list of devices.

        " + }, + "MatterEndpoint":{ + "shape":"MatterEndpoint", + "documentation":"

        The device endpoint.

        " + } + } + }, + "SendConnectorEventResponse":{ + "type":"structure", + "required":["ConnectorId"], + "members":{ + "ConnectorId":{ + "shape":"ConnectorId", + "documentation":"

        The id of the connector between the third-party cloud provider and IoT managed integrations.

        " + } + } }, "SendManagedThingCommandRequest":{ "type":"structure", @@ -4491,7 +6689,14 @@ }, "ConnectorAssociationId":{ "shape":"ConnectorAssociationId", - "documentation":"

        The ID tracking the current discovery process for one connector association.

        " + "documentation":"

        The ID tracking the current discovery process for one connector association.

        ", + "deprecated":true, + "deprecatedMessage":"ConnectorAssociationId has been deprecated", + "deprecatedSince":"06-25-2025" + }, + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The identifier of the account association to use when sending a command to a managed thing.

        " } } }, @@ -4542,7 +6747,35 @@ }, "SmartHomeResourceType":{ "type":"string", - "pattern":"[*]$|^(managed-thing|credential-locker|provisioning-profile|ota-task)" + "pattern":"[*]$|^(managed-thing|credential-locker|provisioning-profile|ota-task|account-association)" + }, + "SpecVersion":{ + "type":"string", + "max":64, + "min":1, + "pattern":"\\d+\\.\\d+" + }, + "StartAccountAssociationRefreshRequest":{ + "type":"structure", + "required":["AccountAssociationId"], + "members":{ + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The unique identifier of the account association to refresh.

        ", + "location":"uri", + "locationName":"AccountAssociationId" + } + } + }, + "StartAccountAssociationRefreshResponse":{ + "type":"structure", + "required":["OAuthAuthorizationUrl"], + "members":{ + "OAuthAuthorizationUrl":{ + "shape":"OAuthAuthorizationUrl", + "documentation":"

        Third-party IoT platform OAuth authorization server URL with all required parameters to perform end-user authentication during the refresh process.

        " + } + } }, "StartDeviceDiscoveryRequest":{ "type":"structure", @@ -4550,7 +6783,11 @@ "members":{ "DiscoveryType":{ "shape":"DiscoveryType", - "documentation":"

        The discovery type supporting the type of device to be discovered in the device discovery job request.

        " + "documentation":"

        The discovery type supporting the type of device to be discovered in the device discovery task request.

        " + }, + "CustomProtocolDetail":{ + "shape":"CustomProtocolDetail", + "documentation":"

        Additional protocol-specific details required for device discovery, which vary based on the discovery type.

        For a DiscoveryType of CUSTOM, the string-to-string map must have a key value of Name set to a non-empty-string.

        " }, "ControllerIdentifier":{ "shape":"ManagedThingId", @@ -4558,7 +6795,14 @@ }, "ConnectorAssociationIdentifier":{ "shape":"ConnectorAssociationId", - "documentation":"

        The id of the connector association.

        " + "documentation":"

        The id of the connector association.

        ", + "deprecated":true, + "deprecatedMessage":"ConnectorAssociationIdentifier is deprecated", + "deprecatedSince":"06-25-2025" + }, + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The identifier of the cloud-to-cloud account association to use for discovery of third-party devices.

        " }, "AuthenticationMaterial":{ "shape":"DiscoveryAuthMaterialString", @@ -4574,7 +6818,10 @@ }, "Tags":{ "shape":"TagsMap", - "documentation":"

        A set of key/value pairs that are used to manage the device discovery request.

        " + "documentation":"

        A set of key/value pairs that are used to manage the device discovery request.

        ", + "deprecated":true, + "deprecatedMessage":"Tags have been deprecated from this api", + "deprecatedSince":"06-25-2025" } } }, @@ -4656,6 +6903,36 @@ "max":128, "min":1 }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"IoTManagedIntegrationsResourceARN", + "documentation":"

        The ARN of the resource to which to add tags.

        ", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

        A set of key/value pairs that are used to manage the resource

        " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "TagValue":{ "type":"string", "max":256, @@ -4715,6 +6992,13 @@ }, "documentation":"

        Details about the over-the-air (OTA) task process.

        " }, + "ThirdPartyUserId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9_.,@-]+", + "sensitive":true + }, "ThresholdPercentage":{ "type":"double", "box":true, @@ -4732,6 +7016,19 @@ }, "exception":true }, + "TokenEndpointAuthenticationScheme":{ + "type":"string", + "enum":[ + "HTTP_BASIC", + "REQUEST_BODY_CREDENTIALS" + ] + }, + "TokenUrl":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"https?:\\/\\/(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-zA-Z0-9()]{1,6}([-a-zA-Z0-9()@:%_\\+.~#?&\\/=]*)" + }, "TraceId":{ "type":"string", "max":128, @@ -4757,6 +7054,104 @@ "pattern":"[0-9]+", "sensitive":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"IoTManagedIntegrationsResourceARN", + "documentation":"

        The ARN of the resource to which to add tags.

        ", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

        A list of tag keys to remove from the resource.

        ", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateAccountAssociationRequest":{ + "type":"structure", + "required":["AccountAssociationId"], + "members":{ + "AccountAssociationId":{ + "shape":"AccountAssociationId", + "documentation":"

        The unique identifier of the account association to update.

        ", + "location":"uri", + "locationName":"AccountAssociationId" + }, + "Name":{ + "shape":"AccountAssociationName", + "documentation":"

        The new name to assign to the account association.

        " + }, + "Description":{ + "shape":"AccountAssociationDescription", + "documentation":"

        The new description to assign to the account association.

        " + } + } + }, + "UpdateCloudConnectorRequest":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"CloudConnectorId", + "documentation":"

        The unique identifier of the cloud connector to update.

        ", + "location":"uri", + "locationName":"Identifier" + }, + "Name":{ + "shape":"DisplayName", + "documentation":"

        The new display name to assign to the cloud connector.

        " + }, + "Description":{ + "shape":"CloudConnectorDescription", + "documentation":"

        The new description to assign to the cloud connector.

        " + } + } + }, + "UpdateConnectorDestinationRequest":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"ConnectorDestinationId", + "documentation":"

        The unique identifier of the connector destination to update.

        ", + "location":"uri", + "locationName":"Identifier" + }, + "Description":{ + "shape":"ConnectorDestinationDescription", + "documentation":"

        The new description to assign to the connector destination.

        " + }, + "Name":{ + "shape":"ConnectorDestinationName", + "documentation":"

        The new display name to assign to the connector destination.

        " + }, + "AuthType":{ + "shape":"AuthType", + "documentation":"

        The new authentication type to use for the connector destination.

        " + }, + "AuthConfig":{ + "shape":"AuthConfigUpdate", + "documentation":"

        The updated authentication configuration details for the connector destination.

        " + }, + "SecretsManager":{ + "shape":"SecretsManager", + "documentation":"

        The updated AWS Secrets Manager configuration for the connector destination.

        " + } + } + }, "UpdateDestinationRequest":{ "type":"structure", "required":["Name"], @@ -4842,6 +7237,10 @@ "shape":"CapabilityReport", "documentation":"

        A report of the capabilities for the managed thing.

        " }, + "CapabilitySchemas":{ + "shape":"CapabilitySchemas", + "documentation":"

        The updated capability schemas that define the functionality and features supported by the managed thing.

        " + }, "Capabilities":{ "shape":"Capabilities", "documentation":"

        The capabilities of the device such as light bulb.

        " diff --git a/services/iotsecuretunneling/pom.xml b/services/iotsecuretunneling/pom.xml index ef5c36a1d72e..2e20a63599b2 100644 --- a/services/iotsecuretunneling/pom.xml +++ b/services/iotsecuretunneling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iotsecuretunneling AWS Java SDK :: Services :: IoTSecureTunneling diff --git a/services/iotsecuretunneling/src/main/resources/codegen-resources/customization.config b/services/iotsecuretunneling/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/iotsecuretunneling/src/main/resources/codegen-resources/customization.config +++ b/services/iotsecuretunneling/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iotsitewise/pom.xml b/services/iotsitewise/pom.xml index 0fba6f843390..49318d8c4604 100644 --- a/services/iotsitewise/pom.xml +++ b/services/iotsitewise/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iotsitewise AWS Java SDK :: Services :: Io T Site Wise diff --git a/services/iotsitewise/src/main/resources/codegen-resources/customization.config b/services/iotsitewise/src/main/resources/codegen-resources/customization.config index 2880fc39d3a3..cdf857bdc287 100644 --- a/services/iotsitewise/src/main/resources/codegen-resources/customization.config +++ b/services/iotsitewise/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iotthingsgraph/pom.xml b/services/iotthingsgraph/pom.xml index 0d24b2c85ac5..9ceb3152c521 100644 --- a/services/iotthingsgraph/pom.xml +++ b/services/iotthingsgraph/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iotthingsgraph AWS Java SDK :: Services :: IoTThingsGraph diff --git a/services/iotthingsgraph/src/main/resources/codegen-resources/customization.config b/services/iotthingsgraph/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/iotthingsgraph/src/main/resources/codegen-resources/customization.config +++ b/services/iotthingsgraph/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iottwinmaker/pom.xml b/services/iottwinmaker/pom.xml index e0e305420cb0..c032db9a50ab 100644 --- a/services/iottwinmaker/pom.xml +++ b/services/iottwinmaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iottwinmaker AWS Java SDK :: Services :: Io T Twin Maker diff --git a/services/iottwinmaker/src/main/resources/codegen-resources/customization.config b/services/iottwinmaker/src/main/resources/codegen-resources/customization.config index 2880fc39d3a3..cdf857bdc287 100644 --- a/services/iottwinmaker/src/main/resources/codegen-resources/customization.config +++ b/services/iottwinmaker/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/iotwireless/pom.xml b/services/iotwireless/pom.xml index 692261b56fa5..ef8ae6ad01b6 100644 --- a/services/iotwireless/pom.xml +++ b/services/iotwireless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT iotwireless AWS Java SDK :: Services :: IoT Wireless diff --git a/services/iotwireless/src/main/resources/codegen-resources/customization.config b/services/iotwireless/src/main/resources/codegen-resources/customization.config index 6c9a820fdb8b..8c7263446a5a 100644 --- a/services/iotwireless/src/main/resources/codegen-resources/customization.config +++ b/services/iotwireless/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "underscoresInNameBehavior": "ALLOW", - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ivs/pom.xml b/services/ivs/pom.xml index 04c775049133..81e928b2f0b9 100644 --- a/services/ivs/pom.xml +++ b/services/ivs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ivs AWS Java SDK :: Services :: Ivs diff --git a/services/ivs/src/main/resources/codegen-resources/customization.config b/services/ivs/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/ivs/src/main/resources/codegen-resources/customization.config +++ b/services/ivs/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ivschat/pom.xml b/services/ivschat/pom.xml index d92c69086c37..7048ec5fb165 100644 --- a/services/ivschat/pom.xml +++ b/services/ivschat/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ivschat AWS Java SDK :: Services :: Ivschat diff --git a/services/ivschat/src/main/resources/codegen-resources/customization.config b/services/ivschat/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/ivschat/src/main/resources/codegen-resources/customization.config +++ b/services/ivschat/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ivsrealtime/pom.xml b/services/ivsrealtime/pom.xml index 974a68a6d9df..cab263fd39ad 100644 --- a/services/ivsrealtime/pom.xml +++ b/services/ivsrealtime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ivsrealtime AWS Java SDK :: Services :: IVS Real Time diff --git a/services/ivsrealtime/src/main/resources/codegen-resources/customization.config b/services/ivsrealtime/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/ivsrealtime/src/main/resources/codegen-resources/customization.config +++ b/services/ivsrealtime/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ivsrealtime/src/main/resources/codegen-resources/paginators-1.json b/services/ivsrealtime/src/main/resources/codegen-resources/paginators-1.json index 309300d0e943..2b5765be5c1f 100644 --- a/services/ivsrealtime/src/main/resources/codegen-resources/paginators-1.json +++ b/services/ivsrealtime/src/main/resources/codegen-resources/paginators-1.json @@ -21,6 +21,12 @@ "output_token": "nextToken", "limit_key": "maxResults" }, + "ListParticipantReplicas": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "replicas" + }, "ListParticipants": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/ivsrealtime/src/main/resources/codegen-resources/service-2.json b/services/ivsrealtime/src/main/resources/codegen-resources/service-2.json index 19089d49a743..2a82fdd4ada3 100644 --- a/services/ivsrealtime/src/main/resources/codegen-resources/service-2.json +++ b/services/ivsrealtime/src/main/resources/codegen-resources/service-2.json @@ -436,6 +436,21 @@ ], "documentation":"

        Lists events for a specified participant that occurred during a specified stage session.

        " }, + "ListParticipantReplicas":{ + "name":"ListParticipantReplicas", + "http":{ + "method":"POST", + "requestUri":"/ListParticipantReplicas", + "responseCode":200 + }, + "input":{"shape":"ListParticipantReplicasRequest"}, + "output":{"shape":"ListParticipantReplicasResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

        Lists all the replicas for a participant from a source stage.

        " + }, "ListParticipants":{ "name":"ListParticipants", "http":{ @@ -551,6 +566,26 @@ ], "documentation":"

        Starts a Composition from a stage based on the configuration provided in the request.

        A Composition is an ephemeral resource that exists after this operation returns successfully. Composition stops and the resource is deleted:

        • When StopComposition is called.

        • After a 1-minute timeout, when all participants are disconnected from the stage.

        • After a 1-minute timeout, if there are no participants in the stage when StartComposition is called.

        • When broadcasting to the IVS channel fails and all retries are exhausted.

        • When broadcasting is disconnected and all attempts to reconnect are exhausted.

        " }, + "StartParticipantReplication":{ + "name":"StartParticipantReplication", + "http":{ + "method":"POST", + "requestUri":"/StartParticipantReplication", + "responseCode":200 + }, + "input":{"shape":"StartParticipantReplicationRequest"}, + "output":{"shape":"StartParticipantReplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"PendingVerification"} + ], + "documentation":"

        Starts replicating a publishing participant from a source stage to a destination stage.

        " + }, "StopComposition":{ "name":"StopComposition", "http":{ @@ -570,6 +605,23 @@ ], "documentation":"

        Stops and deletes a Composition resource. Any broadcast from the Composition resource is stopped.

        " }, + "StopParticipantReplication":{ + "name":"StopParticipantReplication", + "http":{ + "method":"POST", + "requestUri":"/StopParticipantReplication", + "responseCode":200 + }, + "input":{"shape":"StopParticipantReplicationRequest"}, + "output":{"shape":"StopParticipantReplicationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Stops a replicated participant session.

        " + }, "TagResource":{ "name":"TagResource", "http":{ @@ -734,6 +786,10 @@ "hlsConfiguration":{ "shape":"ParticipantRecordingHlsConfiguration", "documentation":"

        HLS configuration object for individual participant recording.

        " + }, + "recordParticipantReplicas":{ + "shape":"RecordParticipantReplicas", + "documentation":"

        Optional field to disable replica participant recording. If this is set to false when a participant is a replica, replica participants are not recorded. Default: true.

        " } }, "documentation":"

        Object specifying a configuration for individual participant recording.

        " @@ -834,7 +890,7 @@ "members":{ "targetSegmentDurationSeconds":{ "shape":"CompositionRecordingTargetSegmentDurationSeconds", - "documentation":"

        Defines the target duration for recorded segments generated when using composite recording. Segments may have durations shorter than the specified value when needed to ensure each segment begins with a keyframe. Default: 2.

        " + "documentation":"

        Defines the target duration for recorded segments generated when using composite recording. Default: 2.

        " } }, "documentation":"

        An object representing a configuration of HLS recordings for server-side composition.

        " @@ -1467,6 +1523,18 @@ "errorCode":{ "shape":"EventErrorCode", "documentation":"

        If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null.

        • B_FRAME_PRESENT — The participant's stream includes B-frames. For details, see IVS RTMP Publishing.

        • BITRATE_EXCEEDED — The participant exceeded the maximum supported bitrate. For details, see Service Quotas.

        • INSUFFICIENT_CAPABILITIES — The participant tried to take an action that the participant’s token is not allowed to do. For details on participant capabilities, see the capabilities field in CreateParticipantToken.

        • INTERNAL_SERVER_EXCEPTION — The participant failed to publish to the stage due to an internal server error.

        • INVALID_AUDIO_CODEC — The participant is using an invalid audio codec. For details, see Stream Ingest.

        • INVALID_INPUT — The participant is using an invalid input stream.

        • INVALID_PROTOCOL — The participant's IngestConfiguration resource is configured for RTMPS but they tried streaming with RTMP. For details, see IVS RTMP Publishing.

        • INVALID_STREAM_KEY — The participant is using an invalid stream key. For details, see IVS RTMP Publishing.

        • INVALID_VIDEO_CODEC — The participant is using an invalid video codec. For details, see Stream Ingest.

        • PUBLISHER_NOT_FOUND — The participant tried to subscribe to a publisher that doesn’t exist.

        • QUOTA_EXCEEDED — The number of participants who want to publish/subscribe to a stage exceeds the quota. For details, see Service Quotas.

        • RESOLUTION_EXCEEDED — The participant exceeded the maximum supported resolution. For details, see Service Quotas.

        • REUSE_OF_STREAM_KEY — The participant tried to use a stream key that is associated with another active stage session.

        • STREAM_DURATION_EXCEEDED — The participant exceeded the maximum allowed stream duration. For details, see Service Quotas.

        " + }, + "destinationStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage where the participant is replicated. Applicable only if the event name is REPLICATION_STARTED or REPLICATION_STOPPED.

        " + }, + "destinationSessionId":{ + "shape":"StageSessionId", + "documentation":"

        ID of the session within the destination stage. Applicable only if the event name is REPLICATION_STARTED or REPLICATION_STOPPED.

        " + }, + "replica":{ + "shape":"Replica", + "documentation":"

        If true, this indicates the participantId is a replicated participant. If this is a subscribe event, then this flag refers to remoteParticipantId.

        " } }, "documentation":"

        An occurrence during a stage session.

        " @@ -1505,7 +1573,9 @@ "SUBSCRIBE_STOPPED", "PUBLISH_ERROR", "SUBSCRIBE_ERROR", - "JOIN_ERROR" + "JOIN_ERROR", + "REPLICATION_STARTED", + "REPLICATION_STOPPED" ] }, "Framerate":{ @@ -2096,6 +2166,45 @@ } } }, + "ListParticipantReplicasRequest":{ + "type":"structure", + "required":[ + "sourceStageArn", + "participantId" + ], + "members":{ + "sourceStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage where the participant is publishing.

        " + }, + "participantId":{ + "shape":"ParticipantId", + "documentation":"

        Participant ID of the publisher that has been replicated. This is assigned by IVS and returned by CreateParticipantToken or the jti (JWT ID) used to create a self signed token.

        " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

        The first participant to retrieve. This is used for pagination; see the nextToken response field.

        " + }, + "maxResults":{ + "shape":"MaxParticipantReplicaResults", + "documentation":"

        Maximum number of results to return. Default: 50.

        " + } + } + }, + "ListParticipantReplicasResponse":{ + "type":"structure", + "required":["replicas"], + "members":{ + "replicas":{ + "shape":"ParticipantReplicaList", + "documentation":"

        List of all participant replicas.

        " + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

        If there are more participants than maxResults, use nextToken in the request to get the next set.

        " + } + } + }, "ListParticipantsRequest":{ "type":"structure", "required":[ @@ -2310,6 +2419,12 @@ "max":100, "min":1 }, + "MaxParticipantReplicaResults":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, "MaxParticipantResults":{ "type":"integer", "box":true, @@ -2404,7 +2519,7 @@ }, "recordingS3Prefix":{ "shape":"ParticipantRecordingS3Prefix", - "documentation":"

        S3 prefix of the S3 bucket where the participant is being recorded, if individual participant recording is enabled, or \"\" (empty string), if recording is not enabled.

        " + "documentation":"

        S3 prefix of the S3 bucket where the participant is being recorded, if individual participant recording is enabled, or \"\" (empty string), if recording is not enabled. If individual participant recording merge is enabled, and if a stage publisher disconnects from a stage and then reconnects, IVS tries to record to the same S3 prefix as the previous session. See Merge Fragmented Individual Participant Recordings.

        " }, "recordingState":{ "shape":"ParticipantRecordingState", @@ -2413,6 +2528,22 @@ "protocol":{ "shape":"ParticipantProtocol", "documentation":"

        Type of ingest protocol that the participant employs for broadcasting.

        " + }, + "replicationType":{ + "shape":"ReplicationType", + "documentation":"

        Indicates if the participant has been replicated to another stage or is a replica from another stage. Default: NONE.

        " + }, + "replicationState":{ + "shape":"ReplicationState", + "documentation":"

        The participant's replication state.

        " + }, + "sourceStageArn":{ + "shape":"StageArn", + "documentation":"

        Source stage ARN from which this participant is replicated, if replicationType is REPLICA.

        " + }, + "sourceSessionId":{ + "shape":"StageSessionId", + "documentation":"

        ID of the session within the source stage, if replicationType is REPLICA.

        " } }, "documentation":"

        Object describing a participant that has joined a stage.

        " @@ -2515,6 +2646,48 @@ "max":10, "min":2 }, + "ParticipantReplica":{ + "type":"structure", + "required":[ + "sourceStageArn", + "participantId", + "sourceSessionId", + "destinationStageArn", + "destinationSessionId", + "replicationState" + ], + "members":{ + "sourceStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage from which this participant is replicated.

        " + }, + "participantId":{ + "shape":"ParticipantId", + "documentation":"

        Participant ID of the publisher that will be replicated. This is assigned by IVS and returned by CreateParticipantToken or the jti (JWT ID) used to create a self signed token.

        " + }, + "sourceSessionId":{ + "shape":"StageSessionId", + "documentation":"

        ID of the session within the source stage.

        " + }, + "destinationStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage where the participant is replicated.

        " + }, + "destinationSessionId":{ + "shape":"StageSessionId", + "documentation":"

        ID of the session within the destination stage.

        " + }, + "replicationState":{ + "shape":"ReplicationState", + "documentation":"

        Replica’s current replication state.

        " + } + }, + "documentation":"

        Information about the replicated destination stage for a participant.

        " + }, + "ParticipantReplicaList":{ + "type":"list", + "member":{"shape":"ParticipantReplica"} + }, "ParticipantState":{ "type":"string", "enum":[ @@ -2548,6 +2721,22 @@ "recordingState":{ "shape":"ParticipantRecordingState", "documentation":"

        The participant’s recording state.

        " + }, + "replicationType":{ + "shape":"ReplicationType", + "documentation":"

        Indicates if the participant has been replicated to another stage or is a replica from another stage. Default: NONE.

        " + }, + "replicationState":{ + "shape":"ReplicationState", + "documentation":"

        The participant's replication state.

        " + }, + "sourceStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage from which this participant is replicated.

        " + }, + "sourceSessionId":{ + "shape":"StageSessionId", + "documentation":"

        ID of the session within the source stage, if replicationType is REPLICA.

        " } }, "documentation":"

        Summary object describing a participant that has joined a stage.

        " @@ -2660,7 +2849,12 @@ "type":"timestamp", "timestampFormat":"iso8601" }, - "ParticipantTokenId":{"type":"string"}, + "ParticipantTokenId":{ + "type":"string", + "max":64, + "min":0, + "pattern":"[a-zA-Z0-9-_]*" + }, "ParticipantTokenList":{ "type":"list", "member":{"shape":"ParticipantToken"} @@ -2879,6 +3073,13 @@ "documentation":"

        Summary information about a public key.

        " }, "Published":{"type":"boolean"}, + "ReconnectWindowSeconds":{ + "type":"integer", + "box":true, + "max":60, + "min":0 + }, + "RecordParticipantReplicas":{"type":"boolean"}, "RecordingConfiguration":{ "type":"structure", "members":{ @@ -2897,6 +3098,22 @@ "type":"string", "enum":["HLS"] }, + "Replica":{"type":"boolean"}, + "ReplicationState":{ + "type":"string", + "enum":[ + "ACTIVE", + "STOPPED" + ] + }, + "ReplicationType":{ + "type":"string", + "enum":[ + "SOURCE", + "REPLICA", + "NONE" + ] + }, "ResourceArn":{ "type":"string", "max":128, @@ -3265,6 +3482,83 @@ } } }, + "StartParticipantReplicationRequest":{ + "type":"structure", + "required":[ + "sourceStageArn", + "destinationStageArn", + "participantId" + ], + "members":{ + "sourceStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage where the participant is publishing.

        " + }, + "destinationStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage to which the participant will be replicated.

        " + }, + "participantId":{ + "shape":"ParticipantId", + "documentation":"

        Participant ID of the publisher that will be replicated. This is assigned by IVS and returned by CreateParticipantToken or the jti (JWT ID) used to create a self signed token.

        " + }, + "reconnectWindowSeconds":{ + "shape":"ReconnectWindowSeconds", + "documentation":"

        If the participant disconnects and then reconnects within the specified interval, replication will continue to be ACTIVE. Default: 0.

        " + }, + "attributes":{ + "shape":"ParticipantAttributes", + "documentation":"

        Application-provided attributes to set on the replicated participant in the destination stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

        These attributes are merged with any attributes set for this participant when creating the token. If there is overlap in keys, the values in these attributes are replaced.

        " + } + } + }, + "StartParticipantReplicationResponse":{ + "type":"structure", + "members":{ + "accessControlAllowOrigin":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Access-Control-Allow-Origin" + }, + "accessControlExposeHeaders":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Access-Control-Expose-Headers" + }, + "cacheControl":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Cache-Control" + }, + "contentSecurityPolicy":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Content-Security-Policy" + }, + "strictTransportSecurity":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Strict-Transport-Security" + }, + "xContentTypeOptions":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"X-Content-Type-Options" + }, + "xFrameOptions":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"X-Frame-Options" + } + } + }, "StopCompositionRequest":{ "type":"structure", "required":["arn"], @@ -3280,6 +3574,75 @@ "members":{ } }, + "StopParticipantReplicationRequest":{ + "type":"structure", + "required":[ + "sourceStageArn", + "destinationStageArn", + "participantId" + ], + "members":{ + "sourceStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage where the participant is publishing.

        " + }, + "destinationStageArn":{ + "shape":"StageArn", + "documentation":"

        ARN of the stage where the participant has been replicated.

        " + }, + "participantId":{ + "shape":"ParticipantId", + "documentation":"

        Participant ID of the publisher that has been replicated. This is assigned by IVS and returned by CreateParticipantToken or the jti (JWT ID) used to create a self signed token.

        " + } + } + }, + "StopParticipantReplicationResponse":{ + "type":"structure", + "members":{ + "accessControlAllowOrigin":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Access-Control-Allow-Origin" + }, + "accessControlExposeHeaders":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Access-Control-Expose-Headers" + }, + "cacheControl":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Cache-Control" + }, + "contentSecurityPolicy":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Content-Security-Policy" + }, + "strictTransportSecurity":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"Strict-Transport-Security" + }, + "xContentTypeOptions":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"X-Content-Type-Options" + }, + "xFrameOptions":{ + "shape":"String", + "documentation":"

        ", + "location":"header", + "locationName":"X-Frame-Options" + } + } + }, "StorageConfiguration":{ "type":"structure", "required":["arn"], @@ -3616,5 +3979,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"

        The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

        Key Concepts

        • Stage — A virtual space where participants can exchange video in real time.

        • Participant token — A token that authenticates a participant when they join a stage.

        • Participant object — Represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID. There is a 1:1 mapping between participant tokens and participants.

        For server-side composition:

        • Composition process — Composites participants of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels). Composition operations support this process.

        • Composition — Controls the look of the outputs, including how participants are positioned in the video.

        For more information about your IVS live stream, also see Getting Started with Amazon IVS Real-Time Streaming.

        Tagging

        A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific constraints beyond what is documented there.

        Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

        The Amazon IVS real-time API has these tag-related operations: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage.

        At most 50 tags can be applied to a resource.

        " + "documentation":"

        The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

        Key Concepts

        • Stage — A virtual space where participants can exchange video in real time.

        • Participant token — A token that authenticates a participant when they join a stage.

        • Participant object — Represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID. There is a 1:1 mapping between participant tokens and participants.

        For server-side composition:

        • Composition process — Composites participants of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels). Composition operations support this process.

        • Composition — Controls the look of the outputs, including how participants are positioned in the video.

        For participant replication:

        • Source stage — The stage where the participant originally joined, which is used as the source for replication.

        • Destination stage — The stage to which the participant is replicated.

        • Replicated participant — A participant in a stage that is replicated to one or more destination stages.

        • Replica participant — A participant in a destination stage that is replicated from another stage (the source stage).

        For more information about your IVS live stream, also see Getting Started with Amazon IVS Real-Time Streaming.

        Tagging

        A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Best practices and strategies in Tagging AWS Resources and Tag Editor for details, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific constraints beyond what is documented there.

        Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

        The Amazon IVS real-time API has these tag-related operations: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage.

        At most 50 tags can be applied to a resource.

        " } diff --git a/services/kafka/pom.xml b/services/kafka/pom.xml index 9451f3897767..b7a258f7464a 100644 --- a/services/kafka/pom.xml +++ b/services/kafka/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT kafka AWS Java SDK :: Services :: Kafka diff --git a/services/kafka/src/main/resources/codegen-resources/customization.config b/services/kafka/src/main/resources/codegen-resources/customization.config index 2e7c25143ce8..9ebe291adc93 100644 --- a/services/kafka/src/main/resources/codegen-resources/customization.config +++ b/services/kafka/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listClusters" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/kafkaconnect/pom.xml b/services/kafkaconnect/pom.xml index 26e7dcc595f0..88a4f1dd4710 100644 --- a/services/kafkaconnect/pom.xml +++ b/services/kafkaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT kafkaconnect AWS Java SDK :: Services :: Kafka Connect diff --git a/services/kafkaconnect/src/main/resources/codegen-resources/customization.config b/services/kafkaconnect/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/kafkaconnect/src/main/resources/codegen-resources/customization.config +++ b/services/kafkaconnect/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/kendra/pom.xml b/services/kendra/pom.xml index ea0b31148eba..77749c163c32 100644 --- a/services/kendra/pom.xml +++ b/services/kendra/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT kendra AWS Java SDK :: Services :: Kendra diff --git a/services/kendra/src/main/resources/codegen-resources/customization.config b/services/kendra/src/main/resources/codegen-resources/customization.config index 856e186a78e8..d5dea02b2d47 100644 --- a/services/kendra/src/main/resources/codegen-resources/customization.config +++ b/services/kendra/src/main/resources/codegen-resources/customization.config @@ -4,6 +4,5 @@ "union": true } }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/kendraranking/pom.xml b/services/kendraranking/pom.xml index ae59d2fdf2e2..504019bda670 100644 --- a/services/kendraranking/pom.xml +++ b/services/kendraranking/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT kendraranking AWS Java SDK :: Services :: Kendra Ranking diff --git a/services/kendraranking/src/main/resources/codegen-resources/customization.config b/services/kendraranking/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/kendraranking/src/main/resources/codegen-resources/customization.config +++ b/services/kendraranking/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/keyspaces/pom.xml b/services/keyspaces/pom.xml index 529ba2ba2f84..24bcb41d6600 100644 --- a/services/keyspaces/pom.xml +++ b/services/keyspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT keyspaces AWS Java SDK :: Services :: Keyspaces diff --git a/services/keyspaces/src/main/resources/codegen-resources/customization.config b/services/keyspaces/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/keyspaces/src/main/resources/codegen-resources/customization.config +++ b/services/keyspaces/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/keyspaces/src/main/resources/codegen-resources/service-2.json b/services/keyspaces/src/main/resources/codegen-resources/service-2.json index 28e092372399..7c954a0bf296 100644 --- a/services/keyspaces/src/main/resources/codegen-resources/service-2.json +++ b/services/keyspaces/src/main/resources/codegen-resources/service-2.json @@ -461,6 +461,60 @@ "box":true, "min":1 }, + "CdcPropagateTags":{ + "type":"string", + "enum":[ + "TABLE", + "NONE" + ] + }, + "CdcSpecification":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"CdcStatus", + "documentation":"

        The status of the CDC stream. You can enable or disable a stream for a table.

        " + }, + "viewType":{ + "shape":"ViewType", + "documentation":"

        The view type specifies the changes Amazon Keyspaces records for each changed row in the stream. After you create the stream, you can't make changes to this selection.

        The options are:

        • NEW_AND_OLD_IMAGES - both versions of the row, before and after the change. This is the default.

        • NEW_IMAGE - the version of the row after the change.

        • OLD_IMAGE - the version of the row before the change.

        • KEYS_ONLY - the partition and clustering keys of the row that was changed.

        " + }, + "tags":{ + "shape":"TagList", + "documentation":"

        The tags (key-value pairs) that you want to apply to the stream.

        " + }, + "propagateTags":{ + "shape":"CdcPropagateTags", + "documentation":"

        Specifies that the stream inherits the tags from the table.

        " + } + }, + "documentation":"

        The settings for the CDC stream of a table. For more information about CDC streams, see Working with change data capture (CDC) streams in Amazon Keyspaces in the Amazon Keyspaces Developer Guide.

        " + }, + "CdcSpecificationSummary":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"CdcStatus", + "documentation":"

        The status of the CDC stream. Specifies if the table has a CDC stream.

        " + }, + "viewType":{ + "shape":"ViewType", + "documentation":"

        The view type specifies the changes Amazon Keyspaces records for each changed row in the stream. This setting can't be changed, after the stream has been created.

        The options are:

        • NEW_AND_OLD_IMAGES - both versions of the row, before and after the change. This is the default.

        • NEW_IMAGE - the version of the row after the change.

        • OLD_IMAGE - the version of the row before the change.

        • KEYS_ONLY - the partition and clustering keys of the row that was changed.

        " + } + }, + "documentation":"

        The settings of the CDC stream of the table. For more information about CDC streams, see Working with change data capture (CDC) streams in Amazon Keyspaces in the Amazon Keyspaces Developer Guide.

        " + }, + "CdcStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "ENABLING", + "DISABLED", + "DISABLING" + ] + }, "ClientSideTimestamps":{ "type":"structure", "required":["status"], @@ -630,6 +684,10 @@ "replicaSpecifications":{ "shape":"ReplicaSpecificationList", "documentation":"

        The optional Amazon Web Services Region specific settings of a multi-Region table. These settings overwrite the general settings of the table for the specified Region.

        For a multi-Region table in provisioned capacity mode, you can configure the table's read capacity differently for each Region's replica. The write capacity, however, remains synchronized between all replicas to ensure that there's enough capacity to replicate writes across all Regions. To define the read capacity for a table replica in a specific Region, you can do so by configuring the following parameters.

        • region: The Region where these settings are applied. (Required)

        • readCapacityUnits: The provisioned read capacity units. (Optional)

        • readCapacityAutoScaling: The read capacity auto scaling settings for the table. (Optional)

        " + }, + "cdcSpecification":{ + "shape":"CdcSpecification", + "documentation":"

        The CDC stream settings of the table.

        " } } }, @@ -979,6 +1037,14 @@ "replicaSpecifications":{ "shape":"ReplicaSpecificationSummaryList", "documentation":"

        Returns the Amazon Web Services Region specific settings of all Regions a multi-Region table is replicated in.

        " + }, + "latestStreamArn":{ + "shape":"StreamArn", + "documentation":"

        The Amazon Resource Name (ARN) of the stream.

        " + }, + "cdcSpecification":{ + "shape":"CdcSpecificationSummary", + "documentation":"

        The CDC stream settings of the table.

        " } } }, @@ -1534,6 +1600,12 @@ "type":"list", "member":{"shape":"StaticColumn"} }, + "StreamArn":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"arn:(aws[a-zA-Z0-9-]*):cassandra:.+.*" + }, "String":{"type":"string"}, "TableName":{ "type":"string", @@ -1807,6 +1879,10 @@ "replicaSpecifications":{ "shape":"ReplicaSpecificationList", "documentation":"

        The Region specific settings of a multi-Regional table.

        " + }, + "cdcSpecification":{ + "shape":"CdcSpecification", + "documentation":"

        The CDC stream settings of the table.

        " } } }, @@ -1831,6 +1907,15 @@ "documentation":"

        The operation failed due to an invalid or malformed request.

        ", "exception":true }, + "ViewType":{ + "type":"string", + "enum":[ + "NEW_IMAGE", + "OLD_IMAGE", + "KEYS_ONLY", + "NEW_AND_OLD_IMAGES" + ] + }, "kmsKeyARN":{ "type":"string", "max":5096, diff --git a/services/keyspacesstreams/pom.xml b/services/keyspacesstreams/pom.xml new file mode 100644 index 000000000000..9277d9d6f6fb --- /dev/null +++ b/services/keyspacesstreams/pom.xml @@ -0,0 +1,60 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.31.76-SNAPSHOT + + keyspacesstreams + AWS Java SDK :: Services :: Keyspaces Streams + The AWS Java SDK for Keyspaces Streams module holds the client classes that are used for + communicating with Keyspaces Streams. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.keyspacesstreams + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + software.amazon.awssdk + http-auth-aws + ${awsjavasdk.version} + + + diff --git a/services/keyspacesstreams/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/keyspacesstreams/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..287f83f9e80e --- /dev/null +++ b/services/keyspacesstreams/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,137 @@ +{ + "version": "1.0", + "parameters": { + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://cassandra-streams-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://cassandra-streams.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/services/keyspacesstreams/src/main/resources/codegen-resources/endpoint-tests.json b/services/keyspacesstreams/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..03a21dc65e40 --- /dev/null +++ b/services/keyspacesstreams/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,201 @@ +{ + "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams-fips.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams-fips.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams-fips.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://cassandra-streams.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/keyspacesstreams/src/main/resources/codegen-resources/paginators-1.json b/services/keyspacesstreams/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..570c7a7f4b5c --- /dev/null +++ b/services/keyspacesstreams/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,16 @@ +{ + "pagination": { + "GetStream": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "shards" + }, + "ListStreams": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "streams" + } + } +} diff --git a/services/keyspacesstreams/src/main/resources/codegen-resources/service-2.json b/services/keyspacesstreams/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..69db32381ef1 --- /dev/null +++ b/services/keyspacesstreams/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,755 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2024-09-09", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"cassandra-streams", + "jsonVersion":"1.0", + "protocol":"json", + "protocols":["json"], + "serviceFullName":"Amazon Keyspaces Streams", + "serviceId":"KeyspacesStreams", + "signatureVersion":"v4", + "signingName":"cassandra", + "targetPrefix":"KeyspacesStreams", + "uid":"keyspacesstreams-2024-09-09" + }, + "operations":{ + "GetRecords":{ + "name":"GetRecords", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetRecordsInput"}, + "output":{"shape":"GetRecordsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Retrieves data records from a specified shard in an Amazon Keyspaces data stream. This operation returns a collection of data records from the shard, including the primary key columns and information about modifications made to the captured table data. Each record represents a single data modification in the Amazon Keyspaces table and includes metadata about when the change occurred.

        " + }, + "GetShardIterator":{ + "name":"GetShardIterator", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetShardIteratorInput"}, + "output":{"shape":"GetShardIteratorOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns a shard iterator that serves as a bookmark for reading data from a specific position in an Amazon Keyspaces data stream's shard. The shard iterator specifies the shard position from which to start reading data records sequentially. You can specify whether to begin reading at the latest record, the oldest record, or at a particular sequence number within the shard.

        " + }, + "GetStream":{ + "name":"GetStream", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetStreamInput"}, + "output":{"shape":"GetStreamOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns detailed information about a specific data capture stream for an Amazon Keyspaces table. The information includes the stream's Amazon Resource Name (ARN), creation time, current status, retention period, shard composition, and associated table details. This operation helps you monitor and manage the configuration of your Amazon Keyspaces data streams.

        " + }, + "ListStreams":{ + "name":"ListStreams", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListStreamsInput"}, + "output":{"shape":"ListStreamsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns a list of all data capture streams associated with your Amazon Keyspaces account or for a specific keyspace or table. The response includes information such as stream ARNs, table associations, creation timestamps, and current status. This operation helps you discover and manage all active data streams in your Amazon Keyspaces environment.

        " + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"String", + "documentation":"

        You don't have sufficient permissions to perform this action.

        " + } + }, + "documentation":"

        You don't have sufficient access permissions to perform this operation.

        This exception occurs when your IAM user or role lacks the required permissions to access the Amazon Keyspaces resource or perform the requested action. Check your IAM policies and ensure they grant the necessary permissions.

        ", + "exception":true + }, + "Blob":{"type":"blob"}, + "Boolean":{ + "type":"boolean", + "box":true + }, + "Date":{"type":"timestamp"}, + "GetRecordsInput":{ + "type":"structure", + "required":["shardIterator"], + "members":{ + "shardIterator":{ + "shape":"ShardIterator", + "documentation":"

        The unique identifier of the shard iterator. A shard iterator specifies the position in the shard from which you want to start reading data records sequentially. You obtain this value by calling the GetShardIterator operation. Each shard iterator is valid for 5 minutes after creation.

        " + }, + "maxResults":{ + "shape":"GetRecordsInputMaxResultsInteger", + "documentation":"

        The maximum number of records to return in a single GetRecords request. Default value is 1000. You can specify a limit between 1 and 10000, but the actual number returned might be less than the specified maximum if the size of the data for the returned records exceeds the internal size limit.

        " + } + } + }, + "GetRecordsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "GetRecordsOutput":{ + "type":"structure", + "members":{ + "changeRecords":{ + "shape":"RecordList", + "documentation":"

        An array of change data records retrieved from the specified shard. Each record represents a single data modification (insert, update, or delete) to a row in the Amazon Keyspaces table. Records include the primary key columns and information about what data was modified.

        " + }, + "nextShardIterator":{ + "shape":"ShardIterator", + "documentation":"

        The next position in the shard from which to start sequentially reading data records. If null, the shard has been closed and the requested iterator doesn't return any more data.

        " + } + } + }, + "GetShardIteratorInput":{ + "type":"structure", + "required":[ + "streamArn", + "shardId", + "shardIteratorType" + ], + "members":{ + "streamArn":{ + "shape":"StreamArn", + "documentation":"

        The Amazon Resource Name (ARN) of the stream for which to get the shard iterator. The ARN uniquely identifies the stream within Amazon Keyspaces.

        " + }, + "shardId":{ + "shape":"ShardId", + "documentation":"

        The identifier of the shard within the stream. The shard ID uniquely identifies a subset of the stream's data records that you want to access.

        " + }, + "shardIteratorType":{ + "shape":"ShardIteratorType", + "documentation":"

        Determines how the shard iterator is positioned. Must be one of the following:

        • TRIM_HORIZON - Start reading at the last untrimmed record in the shard, which is the oldest data record in the shard.

        • AT_SEQUENCE_NUMBER - Start reading exactly from the specified sequence number.

        • AFTER_SEQUENCE_NUMBER - Start reading right after the specified sequence number.

        • LATEST - Start reading just after the most recent record in the shard, so that you always read the most recent data.

        " + }, + "sequenceNumber":{ + "shape":"SequenceNumber", + "documentation":"

        The sequence number of the data record in the shard from which to start reading. Required if ShardIteratorType is AT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER. This parameter is ignored for other iterator types.

        " + } + } + }, + "GetShardIteratorOutput":{ + "type":"structure", + "members":{ + "shardIterator":{ + "shape":"ShardIterator", + "documentation":"

        The unique identifier for the shard iterator. This value is used in the GetRecords operation to retrieve data records from the specified shard. Each shard iterator expires 5 minutes after it is returned to the requester.

        " + } + } + }, + "GetStreamInput":{ + "type":"structure", + "required":["streamArn"], + "members":{ + "streamArn":{ + "shape":"StreamArn", + "documentation":"

        The Amazon Resource Name (ARN) of the stream for which detailed information is requested. This uniquely identifies the specific stream you want to get information about.

        " + }, + "maxResults":{ + "shape":"GetStreamInputMaxResultsInteger", + "documentation":"

        The maximum number of shard objects to return in a single GetStream request. Default value is 100. The minimum value is 1 and the maximum value is 1000.

        " + }, + "shardFilter":{ + "shape":"ShardFilter", + "documentation":"

        Optional filter criteria to apply when retrieving shards. You can filter shards based on their state or other attributes to narrow down the results returned by the GetStream operation.

        " + }, + "nextToken":{ + "shape":"ShardIdToken", + "documentation":"

        An optional pagination token provided by a previous GetStream operation. If this parameter is specified, the response includes only records beyond the token, up to the value specified by maxResults.

        " + } + } + }, + "GetStreamInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "GetStreamOutput":{ + "type":"structure", + "required":[ + "streamArn", + "streamLabel", + "streamStatus", + "streamViewType", + "creationRequestDateTime", + "keyspaceName", + "tableName" + ], + "members":{ + "streamArn":{ + "shape":"StreamArn", + "documentation":"

        The Amazon Resource Name (ARN) that uniquely identifies the stream within Amazon Keyspaces. This ARN can be used in other API operations to reference this specific stream.

        " + }, + "streamLabel":{ + "shape":"String", + "documentation":"

        A timestamp that serves as a unique identifier for this stream, used for debugging and monitoring purposes. The stream label represents the point in time when the stream was created.

        " + }, + "streamStatus":{ + "shape":"StreamStatus", + "documentation":"

        The current status of the stream. Values can be ENABLING, ENABLED, DISABLING, or DISABLED. Operations on the stream depend on its current status.

        " + }, + "streamViewType":{ + "shape":"StreamViewType", + "documentation":"

        The format of the data records in this stream. Currently, this can be one of the following options:

        • NEW_AND_OLD_IMAGES - both versions of the row, before and after the change. This is the default.

        • NEW_IMAGE - the version of the row after the change.

        • OLD_IMAGE - the version of the row before the change.

        • KEYS_ONLY - the partition and clustering keys of the row that was changed.

        " + }, + "creationRequestDateTime":{ + "shape":"Date", + "documentation":"

        The date and time when the request to create this stream was issued. The value is represented in ISO 8601 format.

        " + }, + "keyspaceName":{ + "shape":"KeyspaceName", + "documentation":"

        The name of the keyspace containing the table associated with this stream. The keyspace name is part of the table's hierarchical identifier in Amazon Keyspaces.

        " + }, + "tableName":{ + "shape":"TableName", + "documentation":"

        The name of the table associated with this stream. The stream captures changes to rows in this Amazon Keyspaces table.

        " + }, + "shards":{ + "shape":"ShardDescriptionList", + "documentation":"

        An array of shard objects associated with this stream. Each shard contains a subset of the stream's data records and has its own unique identifier. The collection of shards represents the complete stream data.

        " + }, + "nextToken":{ + "shape":"ShardIdToken", + "documentation":"

        A pagination token that can be used in a subsequent GetStream request. This token is returned if the response contains more shards than can be returned in a single response.

        " + } + } + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"String", + "documentation":"

        The service encountered an internal error. Try your request again.

        " + } + }, + "documentation":"

        The Amazon Keyspaces service encountered an unexpected error while processing the request.

        This internal server error is not related to your request parameters. Retry your request after a brief delay. If the issue persists, contact Amazon Web Services Support with details of your request to help identify and resolve the problem.

        ", + "exception":true, + "fault":true + }, + "KeyspaceName":{ + "type":"string", + "max":48, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_]{0,47}" + }, + "KeyspacesCell":{ + "type":"structure", + "members":{ + "value":{ + "shape":"KeyspacesCellValue", + "documentation":"

        The value stored in this cell, which can be of various data types supported by Amazon Keyspaces.

        " + }, + "metadata":{ + "shape":"KeyspacesMetadata", + "documentation":"

        Metadata associated with this cell, such as time-to-live (TTL) expiration time and write timestamp.

        " + } + }, + "documentation":"

        Represents a cell in an Amazon Keyspaces table, containing both the value and metadata about the cell.

        " + }, + "KeyspacesCellList":{ + "type":"list", + "member":{"shape":"KeyspacesCell"} + }, + "KeyspacesCellMap":{ + "type":"list", + "member":{"shape":"KeyspacesCellMapDefinition"} + }, + "KeyspacesCellMapDefinition":{ + "type":"structure", + "members":{ + "key":{ + "shape":"KeyspacesCellValue", + "documentation":"

        The key of this map entry in the Amazon Keyspaces cell.

        " + }, + "value":{ + "shape":"KeyspacesCellValue", + "documentation":"

        The value associated with the key in this map entry.

        " + }, + "metadata":{ + "shape":"KeyspacesMetadata", + "documentation":"

        Metadata for this specific key-value pair within the map, such as timestamps and TTL information.

        " + } + }, + "documentation":"

        Represents a key-value pair within a map data type in Amazon Keyspaces, including the associated metadata.

        " + }, + "KeyspacesCellValue":{ + "type":"structure", + "members":{ + "asciiT":{ + "shape":"String", + "documentation":"

        A value of ASCII text type, containing US-ASCII characters.

        " + }, + "bigintT":{ + "shape":"String", + "documentation":"

        A 64-bit signed integer value.

        " + }, + "blobT":{ + "shape":"Blob", + "documentation":"

        A binary large object (BLOB) value stored as a Base64-encoded string.

        " + }, + "boolT":{ + "shape":"Boolean", + "documentation":"

        A Boolean value, either true or false.

        " + }, + "counterT":{ + "shape":"String", + "documentation":"

        A distributed counter value that can be incremented and decremented.

        " + }, + "dateT":{ + "shape":"String", + "documentation":"

        A date value without a time component, represented as days since epoch (January 1, 1970).

        " + }, + "decimalT":{ + "shape":"String", + "documentation":"

        A variable-precision decimal number value.

        " + }, + "doubleT":{ + "shape":"String", + "documentation":"

        A 64-bit double-precision floating point value.

        " + }, + "floatT":{ + "shape":"String", + "documentation":"

        A 32-bit single-precision floating point value.

        " + }, + "inetT":{ + "shape":"String", + "documentation":"

        An IP address value, either IPv4 or IPv6 format.

        " + }, + "intT":{ + "shape":"String", + "documentation":"

        A 32-bit signed integer value.

        " + }, + "listT":{ + "shape":"KeyspacesCellList", + "documentation":"

        An ordered collection of elements that can contain duplicate values.

        " + }, + "mapT":{ + "shape":"KeyspacesCellMap", + "documentation":"

        A collection of key-value pairs where each key is unique.

        " + }, + "setT":{ + "shape":"KeyspacesCellList", + "documentation":"

        An unordered collection of unique elements.

        " + }, + "smallintT":{ + "shape":"String", + "documentation":"

        A 16-bit signed integer value.

        " + }, + "textT":{ + "shape":"String", + "documentation":"

        A UTF-8 encoded string value.

        " + }, + "timeT":{ + "shape":"String", + "documentation":"

        A time value without a date component, with nanosecond precision.

        " + }, + "timestampT":{ + "shape":"String", + "documentation":"

        A timestamp value representing date and time with millisecond precision.

        " + }, + "timeuuidT":{ + "shape":"String", + "documentation":"

        A universally unique identifier (UUID) that includes a timestamp component, ensuring both uniqueness and time ordering.

        " + }, + "tinyintT":{ + "shape":"String", + "documentation":"

        An 8-bit signed integer value.

        " + }, + "tupleT":{ + "shape":"KeyspacesCellList", + "documentation":"

        A fixed-length ordered list of elements, where each element can be of a different data type.

        " + }, + "uuidT":{ + "shape":"String", + "documentation":"

        A universally unique identifier (UUID) value.

        " + }, + "varcharT":{ + "shape":"String", + "documentation":"

        A UTF-8 encoded string value, functionally equivalent to text type.

        " + }, + "varintT":{ + "shape":"String", + "documentation":"

        A variable precision integer value with arbitrary length.

        " + }, + "udtT":{ + "shape":"KeyspacesUdtMap", + "documentation":"

        A user-defined type (UDT) value consisting of named fields, each with its own data type.

        " + } + }, + "documentation":"

        Represents the value of a cell in an Amazon Keyspaces table, supporting various data types with type-specific fields.

        ", + "union":true + }, + "KeyspacesCells":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"KeyspacesCell"} + }, + "KeyspacesKeysMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"KeyspacesCellValue"} + }, + "KeyspacesMetadata":{ + "type":"structure", + "members":{ + "expirationTime":{ + "shape":"String", + "documentation":"

        The time at which the associated data will expire, based on the time-to-live (TTL) setting.

        " + }, + "writeTime":{ + "shape":"String", + "documentation":"

        The timestamp at which the associated data was written to the database.

        " + } + }, + "documentation":"

        Contains metadata information associated with Amazon Keyspaces cells and rows.

        " + }, + "KeyspacesRow":{ + "type":"structure", + "members":{ + "valueCells":{ + "shape":"KeyspacesCells", + "documentation":"

        A map of regular (non-static) column cells in the row, where keys are column names and values are the corresponding cells.

        " + }, + "staticCells":{ + "shape":"KeyspacesCells", + "documentation":"

        A map of static column cells shared by all rows with the same partition key, where keys are column names and values are the corresponding cells.

        " + }, + "rowMetadata":{ + "shape":"KeyspacesMetadata", + "documentation":"

        Metadata that applies to the entire row, such as timestamps and TTL information.

        " + } + }, + "documentation":"

        Represents a row in an Amazon Keyspaces table, containing regular column values, static column values, and row-level metadata.

        " + }, + "KeyspacesUdtMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"KeyspacesCell"} + }, + "ListStreamsInput":{ + "type":"structure", + "members":{ + "keyspaceName":{ + "shape":"KeyspaceName", + "documentation":"

        The name of the keyspace for which to list streams. If specified, only streams associated with tables in this keyspace are returned. If omitted, streams from all keyspaces are included in the results.

        " + }, + "tableName":{ + "shape":"TableName", + "documentation":"

        The name of the table for which to list streams. Must be used together with keyspaceName. If specified, only streams associated with this specific table are returned.

        " + }, + "maxResults":{ + "shape":"ListStreamsInputMaxResultsInteger", + "documentation":"

        The maximum number of streams to return in a single ListStreams request. Default value is 100. The minimum value is 1 and the maximum value is 1000.

        " + }, + "nextToken":{ + "shape":"StreamArnToken", + "documentation":"

        An optional pagination token provided by a previous ListStreams operation. If this parameter is specified, the response includes only records beyond the token, up to the value specified by maxResults.

        " + } + } + }, + "ListStreamsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListStreamsOutput":{ + "type":"structure", + "members":{ + "streams":{ + "shape":"StreamList", + "documentation":"

        An array of stream objects, each containing summary information about a stream including its ARN, status, and associated table information. This list includes all streams that match the request criteria.

        " + }, + "nextToken":{ + "shape":"StreamArnToken", + "documentation":"

        A pagination token that can be used in a subsequent ListStreams request. This token is returned if the response contains more streams than can be returned in a single response based on the MaxResults parameter.

        " + } + } + }, + "OriginType":{ + "type":"string", + "enum":[ + "USER", + "REPLICATION", + "TTL" + ] + }, + "Record":{ + "type":"structure", + "members":{ + "eventVersion":{ + "shape":"String", + "documentation":"

        The version of the record format, used to track the evolution of the record structure over time.

        " + }, + "createdAt":{ + "shape":"Date", + "documentation":"

        The timestamp indicating when this change data capture record was created.

        " + }, + "origin":{ + "shape":"OriginType", + "documentation":"

        The origin or source of this change data capture record.

        " + }, + "partitionKeys":{ + "shape":"KeyspacesKeysMap", + "documentation":"

        The partition key columns and their values for the affected row.

        " + }, + "clusteringKeys":{ + "shape":"KeyspacesKeysMap", + "documentation":"

        The clustering key columns and their values for the affected row, which determine the order of rows within a partition.

        " + }, + "newImage":{ + "shape":"KeyspacesRow", + "documentation":"

        The state of the row after the change operation that generated this record.

        " + }, + "oldImage":{ + "shape":"KeyspacesRow", + "documentation":"

        The state of the row before the change operation that generated this record.

        " + }, + "sequenceNumber":{ + "shape":"SequenceNumber", + "documentation":"

        A unique identifier assigned to this record within the shard, used for ordering and tracking purposes.

        " + } + }, + "documentation":"

        Represents a change data capture record for a row in an Amazon Keyspaces table, containing both the new and old states of the row.

        " + }, + "RecordList":{ + "type":"list", + "member":{"shape":"Record"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"String", + "documentation":"

        The requested resource wasn't found. Verify that the resource exists and try again.

        " + } + }, + "documentation":"

        The requested resource doesn't exist or could not be found.

        This exception occurs when you attempt to access a keyspace, table, stream, or other Amazon Keyspaces resource that doesn't exist or that has been deleted. Verify that the resource identifier is correct and that the resource exists in your account.

        ", + "exception":true + }, + "SequenceNumber":{ + "type":"string", + "max":48, + "min":21 + }, + "SequenceNumberRange":{ + "type":"structure", + "members":{ + "startingSequenceNumber":{ + "shape":"SequenceNumber", + "documentation":"

        The starting sequence number of the range.

        " + }, + "endingSequenceNumber":{ + "shape":"SequenceNumber", + "documentation":"

        The ending sequence number of the range, which may be null for open-ended ranges.

        " + } + }, + "documentation":"

        Defines a range of sequence numbers within a change data capture stream's shard for Amazon Keyspaces.

        " + }, + "Shard":{ + "type":"structure", + "members":{ + "shardId":{ + "shape":"ShardId", + "documentation":"

        A unique identifier for this shard within the stream.

        " + }, + "sequenceNumberRange":{ + "shape":"SequenceNumberRange", + "documentation":"

        The range of sequence numbers contained within this shard.

        " + }, + "parentShardIds":{ + "shape":"ShardIdList", + "documentation":"

        The identifiers of parent shards that this shard evolved from, if this shard was created through resharding.

        " + } + }, + "documentation":"

        Represents a uniquely identified group of change records within a change data capture stream for Amazon Keyspaces.

        " + }, + "ShardDescriptionList":{ + "type":"list", + "member":{"shape":"Shard"} + }, + "ShardFilter":{ + "type":"structure", + "members":{ + "type":{ + "shape":"ShardFilterType", + "documentation":"

        The type of shard filter to use, which determines how the shardId parameter is interpreted.

        " + }, + "shardId":{ + "shape":"ShardId", + "documentation":"

        The identifier of a specific shard used to filter results based on the specified filter type.

        " + } + }, + "documentation":"

        A filter used to limit the shards returned by a GetStream operation.

        " + }, + "ShardFilterType":{ + "type":"string", + "enum":["CHILD_SHARDS"] + }, + "ShardId":{ + "type":"string", + "max":65, + "min":28 + }, + "ShardIdList":{ + "type":"list", + "member":{"shape":"ShardId"} + }, + "ShardIdToken":{ + "type":"string", + "max":3000, + "min":80 + }, + "ShardIterator":{ + "type":"string", + "max":4096, + "min":1 + }, + "ShardIteratorType":{ + "type":"string", + "enum":[ + "TRIM_HORIZON", + "LATEST", + "AT_SEQUENCE_NUMBER", + "AFTER_SEQUENCE_NUMBER" + ] + }, + "Stream":{ + "type":"structure", + "required":[ + "streamArn", + "keyspaceName", + "tableName", + "streamLabel" + ], + "members":{ + "streamArn":{ + "shape":"StreamArn", + "documentation":"

        The Amazon Resource Name (ARN) that uniquely identifies this stream.

        " + }, + "keyspaceName":{ + "shape":"KeyspaceName", + "documentation":"

        The name of the keyspace containing the table associated with this stream.

        " + }, + "tableName":{ + "shape":"TableName", + "documentation":"

        The name of the table associated with this stream.

        " + }, + "streamLabel":{ + "shape":"String", + "documentation":"

        A unique identifier for this stream that can be used in stream operations.

        " + } + }, + "documentation":"

        Represents a change data capture stream for an Amazon Keyspaces table, which enables tracking and processing of data changes.

        " + }, + "StreamArn":{ + "type":"string", + "max":1024, + "min":37 + }, + "StreamArnToken":{ + "type":"string", + "max":3000, + "min":80 + }, + "StreamList":{ + "type":"list", + "member":{"shape":"Stream"} + }, + "StreamStatus":{ + "type":"string", + "enum":[ + "ENABLING", + "ENABLED", + "DISABLING", + "DISABLED" + ] + }, + "StreamViewType":{ + "type":"string", + "enum":[ + "NEW_IMAGE", + "OLD_IMAGE", + "NEW_AND_OLD_IMAGES", + "KEYS_ONLY" + ] + }, + "String":{"type":"string"}, + "TableName":{ + "type":"string", + "max":48, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_]{0,47}" + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"String", + "documentation":"

        The request was denied due to request throttling. Reduce the frequency of requests and try again.

        " + } + }, + "documentation":"

        The request rate is too high and exceeds the service's throughput limits.

        This exception occurs when you send too many requests in a short period of time. Implement exponential backoff in your retry strategy to handle this exception. Reducing your request frequency or distributing requests more evenly can help avoid throughput exceptions.

        ", + "exception":true + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"String", + "documentation":"

        The input fails to satisfy the constraints specified by the service. Check the error details and modify your request.

        " + }, + "errorCode":{ + "shape":"ValidationExceptionType", + "documentation":"

        An error occurred validating your request. See the error message for details.

        " + } + }, + "documentation":"

        The request validation failed because one or more input parameters failed validation.

        This exception occurs when there are syntax errors in the request, field constraints are violated, or required parameters are missing. To help you fix the issue, the exception message provides details about which parameter failed and why.

        ", + "exception":true + }, + "ValidationExceptionType":{ + "type":"string", + "enum":[ + "InvalidFormat", + "TrimmedDataAccess", + "ExpiredIterator", + "ExpiredNextToken" + ] + } + }, + "documentation":"

        Amazon Keyspaces (for Apache Cassandra) change data capture (CDC) records change events for Amazon Keyspaces tables. The change events captured in a stream are time-ordered and de-duplicated write operations. Using stream data you can build event driven applications that incorporate near-real time change events from Amazon Keyspaces tables.

        Amazon Keyspaces CDC is serverless and scales the infrastructure for change events automatically based on the volume of changes on your table.

        This API reference describes the Amazon Keyspaces CDC stream API in detail.

        For more information about Amazon Keyspaces CDC, see Working with change data capture (CDC) streams in Amazon Keyspaces in the Amazon Keyspaces Developer Guide.

        To learn how Amazon Keyspaces CDC API actions are recorded with CloudTrail, see Amazon Keyspaces information in CloudTrail in the Amazon Keyspaces Developer Guide.

        To see the metrics Amazon Keyspaces CDC sends to Amazon CloudWatch, see Amazon Keyspaces change data capture (CDC) CloudWatch metrics in the Amazon Keyspaces Developer Guide.

        " +} diff --git a/services/kinesis/pom.xml b/services/kinesis/pom.xml index 16748776ae91..01a8461ce41a 100644 --- a/services/kinesis/pom.xml +++ b/services/kinesis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT kinesis AWS Java SDK :: Services :: Amazon Kinesis diff --git a/services/kinesis/src/main/resources/codegen-resources/customization.config b/services/kinesis/src/main/resources/codegen-resources/customization.config index 4b4a9752a841..df57fdb8aefb 100644 --- a/services/kinesis/src/main/resources/codegen-resources/customization.config +++ b/services/kinesis/src/main/resources/codegen-resources/customization.config @@ -52,6 +52,5 @@ "Invalid ARN: Kinesis ARNs only support stream arn types": "Test is broken for client tests, need operationInputs.", "RegionMismatch: client region should be used for endpoint region": "Test is broken for client tests, need operationInputs." }, - "usePriorKnowledgeForH2": true, - "enableFastUnmarshaller": true + "usePriorKnowledgeForH2": true } diff --git a/services/kinesis/src/test/java/software/amazon/awssdk/services/kinesis/KinesisExceptionTest.java b/services/kinesis/src/test/java/software/amazon/awssdk/services/kinesis/KinesisExceptionTest.java new file mode 100644 index 000000000000..2c52ee033bcc --- /dev/null +++ b/services/kinesis/src/test/java/software/amazon/awssdk/services/kinesis/KinesisExceptionTest.java @@ -0,0 +1,103 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.kinesis; + +import com.github.tomakehurst.wiremock.WireMockServer; +import com.github.tomakehurst.wiremock.client.WireMock; +import com.github.tomakehurst.wiremock.core.WireMockConfiguration; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; +import software.amazon.awssdk.services.kinesis.model.InvalidArgumentException; +import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; + +import java.net.URI; + +import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +public class KinesisExceptionTest { + private static final Logger logger = LoggerFactory.getLogger(KinesisExceptionTest.class); + private WireMockServer wireMock; + private KinesisClient client; + + @Before + public void setup() { + wireMock = new WireMockServer(WireMockConfiguration.wireMockConfig().dynamicPort()); + wireMock.start(); + + client = KinesisClient.builder() + .endpointOverride(URI.create(wireMock.baseUrl())) + .region(Region.US_WEST_2) + .credentialsProvider(StaticCredentialsProvider.create( + AwsBasicCredentials.create("fake-key", "fake-secret"))) + .build(); + } + + @After + public void tearDown() { + wireMock.stop(); + } + + @Test + public void testInvalidArgumentException() { + wireMock.stubFor(WireMock.post(WireMock.urlPathEqualTo("/")) + .willReturn(WireMock.aResponse() + .withStatus(400) + .withHeader("x-amzn-ErrorType", "InvalidArgumentException") + .withHeader("Content-Type", "application/json"))); + + GetRecordsRequest request = GetRecordsRequest.builder() + .shardIterator("Invalid-Shard-Iterator") + .build(); + + assertThatThrownBy(() -> client.getRecords(request)) + .isInstanceOf(InvalidArgumentException.class) + .satisfies(e -> { + InvalidArgumentException exception = (InvalidArgumentException) e; + assertThat(exception.statusCode()).isEqualTo(400); + assertThat(exception.awsErrorDetails().errorCode()).isEqualTo("InvalidArgumentException"); + }); + } + + + @Test + public void testResourceNotFoundException() { + wireMock.stubFor(WireMock.post(WireMock.urlPathEqualTo("/")) + .willReturn(WireMock.aResponse() + .withStatus(400) + .withHeader("x-amzn-ErrorType", "ResourceNotFoundException"))); + + GetRecordsRequest request = GetRecordsRequest.builder() + .shardIterator("NonExistent-Shard-Iterator") + .build(); + + assertThatThrownBy(() -> client.getRecords(request)) + .isInstanceOf(ResourceNotFoundException.class) + .satisfies(e -> { + ResourceNotFoundException exception = (ResourceNotFoundException) e; + assertThat(exception.statusCode()).isEqualTo(400); + assertThat(exception.awsErrorDetails().errorCode()).isEqualTo("ResourceNotFoundException"); + }); + } +} diff --git a/services/kinesisanalytics/pom.xml b/services/kinesisanalytics/pom.xml index 4e0585f897af..526ba9d6f0de 100644 --- a/services/kinesisanalytics/pom.xml +++ b/services/kinesisanalytics/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT kinesisanalytics AWS Java SDK :: Services :: Amazon Kinesis Analytics diff --git a/services/kinesisanalytics/src/main/resources/codegen-resources/customization.config b/services/kinesisanalytics/src/main/resources/codegen-resources/customization.config index 42b702f5d098..2faadebcd541 100644 --- a/services/kinesisanalytics/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisanalytics/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ "excludedSimpleMethods": [ "discoverInputSchema" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/kinesisanalyticsv2/pom.xml b/services/kinesisanalyticsv2/pom.xml index c7608461b1ce..adfc5ed34fe7 100644 --- a/services/kinesisanalyticsv2/pom.xml +++ b/services/kinesisanalyticsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT kinesisanalyticsv2 AWS Java SDK :: Services :: Kinesis Analytics V2 diff --git a/services/kinesisanalyticsv2/src/main/resources/codegen-resources/customization.config b/services/kinesisanalyticsv2/src/main/resources/codegen-resources/customization.config index 8bd5b51b0231..e5f468b10579 100644 --- a/services/kinesisanalyticsv2/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisanalyticsv2/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listApplications" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/kinesisvideo/pom.xml b/services/kinesisvideo/pom.xml index 953a5b9795cb..16baaea11711 100644 --- a/services/kinesisvideo/pom.xml +++ b/services/kinesisvideo/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 kinesisvideo diff --git a/services/kinesisvideo/src/main/resources/codegen-resources/customization.config b/services/kinesisvideo/src/main/resources/codegen-resources/customization.config index 02d1ae27ebcc..c501ccd5d7ac 100644 --- a/services/kinesisvideo/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisvideo/src/main/resources/codegen-resources/customization.config @@ -6,6 +6,5 @@ "listTagsForStream", "describeStream" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/kinesisvideoarchivedmedia/pom.xml b/services/kinesisvideoarchivedmedia/pom.xml index 55e4a191ec48..5498b8a56d0f 100644 --- a/services/kinesisvideoarchivedmedia/pom.xml +++ b/services/kinesisvideoarchivedmedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT kinesisvideoarchivedmedia AWS Java SDK :: Services :: Kinesis Video Archived Media diff --git a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config index c39009a834f4..9a222fcf67a0 100644 --- a/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisvideoarchivedmedia/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "excludedSimpleMethods": [ "getHLSStreamingSessionURL" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/kinesisvideomedia/pom.xml b/services/kinesisvideomedia/pom.xml index fa9383073361..bcedc0ffe521 100644 --- a/services/kinesisvideomedia/pom.xml +++ b/services/kinesisvideomedia/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT kinesisvideomedia AWS Java SDK :: Services :: Kinesis Video Media diff --git a/services/kinesisvideomedia/src/main/resources/codegen-resources/customization.config b/services/kinesisvideomedia/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/kinesisvideomedia/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisvideomedia/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/kinesisvideosignaling/pom.xml b/services/kinesisvideosignaling/pom.xml index 8a80d6ebb38a..4b9a3c7b11b9 100644 --- a/services/kinesisvideosignaling/pom.xml +++ b/services/kinesisvideosignaling/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT kinesisvideosignaling AWS Java SDK :: Services :: Kinesis Video Signaling diff --git a/services/kinesisvideosignaling/src/main/resources/codegen-resources/customization.config b/services/kinesisvideosignaling/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/kinesisvideosignaling/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisvideosignaling/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/kinesisvideowebrtcstorage/pom.xml b/services/kinesisvideowebrtcstorage/pom.xml index 665b74b053c5..be89de301eea 100644 --- a/services/kinesisvideowebrtcstorage/pom.xml +++ b/services/kinesisvideowebrtcstorage/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT kinesisvideowebrtcstorage AWS Java SDK :: Services :: Kinesis Video Web RTC Storage diff --git a/services/kinesisvideowebrtcstorage/src/main/resources/codegen-resources/customization.config b/services/kinesisvideowebrtcstorage/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/kinesisvideowebrtcstorage/src/main/resources/codegen-resources/customization.config +++ b/services/kinesisvideowebrtcstorage/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/kms/pom.xml b/services/kms/pom.xml index ac3c34b67bfb..72c775fffa62 100644 --- a/services/kms/pom.xml +++ b/services/kms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT kms AWS Java SDK :: Services :: AWS KMS diff --git a/services/kms/src/main/resources/codegen-resources/customization.config b/services/kms/src/main/resources/codegen-resources/customization.config index 15fb90a0c17c..5ad7080232ca 100644 --- a/services/kms/src/main/resources/codegen-resources/customization.config +++ b/services/kms/src/main/resources/codegen-resources/customization.config @@ -7,6 +7,5 @@ "listAliases", "listKeys" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/kms/src/main/resources/codegen-resources/service-2.json b/services/kms/src/main/resources/codegen-resources/service-2.json index 8e4c88b3ee10..a4629390211a 100644 --- a/services/kms/src/main/resources/codegen-resources/service-2.json +++ b/services/kms/src/main/resources/codegen-resources/service-2.json @@ -30,7 +30,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Cancels the deletion of a KMS key. When this operation succeeds, the key state of the KMS key is Disabled. To enable the KMS key, use EnableKey.

        For more information about scheduling and canceling deletion of a KMS key, see Deleting KMS keys in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:CancelKeyDeletion (key policy)

        Related operations: ScheduleKeyDeletion

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Cancels the deletion of a KMS key. When this operation succeeds, the key state of the KMS key is Disabled. To enable the KMS key, use EnableKey.

        For more information about scheduling and canceling deletion of a KMS key, see Deleting KMS keys in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:CancelKeyDeletion (key policy)

        Related operations: ScheduleKeyDeletion

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ConnectCustomKeyStore":{ "name":"ConnectCustomKeyStore", @@ -47,7 +47,7 @@ {"shape":"KMSInternalException"}, {"shape":"CloudHsmClusterInvalidConfigurationException"} ], - "documentation":"

        Connects or reconnects a custom key store to its backing key store. For an CloudHSM key store, ConnectCustomKeyStore connects the key store to its associated CloudHSM cluster. For an external key store, ConnectCustomKeyStore connects the key store to the external key store proxy that communicates with your external key manager.

        The custom key store must be connected before you can create KMS keys in the key store or use the KMS keys it contains. You can disconnect and reconnect a custom key store at any time.

        The connection process for a custom key store can take an extended amount of time to complete. This operation starts the connection process, but it does not wait for it to complete. When it succeeds, this operation quickly returns an HTTP 200 response and a JSON object with no properties. However, this response does not indicate that the custom key store is connected. To get the connection state of the custom key store, use the DescribeCustomKeyStores operation.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        The ConnectCustomKeyStore operation might fail for various reasons. To find the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode in the response. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

        To fix the failure, use the DisconnectCustomKeyStore operation to disconnect the custom key store, correct the error, use the UpdateCustomKeyStore operation if necessary, and then use ConnectCustomKeyStore again.

        CloudHSM key store

        During the connection process for an CloudHSM key store, KMS finds the CloudHSM cluster that is associated with the custom key store, creates the connection infrastructure, connects to the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates its password.

        To connect an CloudHSM key store, its associated CloudHSM cluster must have at least one active HSM. To get the number of active HSMs in a cluster, use the DescribeClusters operation. To add HSMs to the cluster, use the CreateHsm operation. Also, the kmsuser crypto user (CU) must not be logged into the cluster. This prevents KMS from using this account to log in.

        If you are having trouble connecting or disconnecting a CloudHSM key store, see Troubleshooting an CloudHSM key store in the Key Management Service Developer Guide.

        External key store

        When you connect an external key store that uses public endpoint connectivity, KMS tests its ability to communicate with your external key manager by sending a request via the external key store proxy.

        When you connect to an external key store that uses VPC endpoint service connectivity, KMS establishes the networking elements that it needs to communicate with your external key manager via the external key store proxy. This includes creating an interface endpoint to the VPC endpoint service and a private hosted zone for traffic between KMS and the VPC endpoint service.

        To connect an external key store, KMS must be able to connect to the external key store proxy, the external key store proxy must be able to communicate with your external key manager, and the external key manager must be available for cryptographic operations.

        If you are having trouble connecting or disconnecting an external key store, see Troubleshooting an external key store in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:ConnectCustomKeyStore (IAM policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Connects or reconnects a custom key store to its backing key store. For an CloudHSM key store, ConnectCustomKeyStore connects the key store to its associated CloudHSM cluster. For an external key store, ConnectCustomKeyStore connects the key store to the external key store proxy that communicates with your external key manager.

        The custom key store must be connected before you can create KMS keys in the key store or use the KMS keys it contains. You can disconnect and reconnect a custom key store at any time.

        The connection process for a custom key store can take an extended amount of time to complete. This operation starts the connection process, but it does not wait for it to complete. When it succeeds, this operation quickly returns an HTTP 200 response and a JSON object with no properties. However, this response does not indicate that the custom key store is connected. To get the connection state of the custom key store, use the DescribeCustomKeyStores operation.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        The ConnectCustomKeyStore operation might fail for various reasons. To find the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode in the response. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

        To fix the failure, use the DisconnectCustomKeyStore operation to disconnect the custom key store, correct the error, use the UpdateCustomKeyStore operation if necessary, and then use ConnectCustomKeyStore again.

        CloudHSM key store

        During the connection process for an CloudHSM key store, KMS finds the CloudHSM cluster that is associated with the custom key store, creates the connection infrastructure, connects to the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates its password.

        To connect an CloudHSM key store, its associated CloudHSM cluster must have at least one active HSM. To get the number of active HSMs in a cluster, use the DescribeClusters operation. To add HSMs to the cluster, use the CreateHsm operation. Also, the kmsuser crypto user (CU) must not be logged into the cluster. This prevents KMS from using this account to log in.

        If you are having trouble connecting or disconnecting a CloudHSM key store, see Troubleshooting an CloudHSM key store in the Key Management Service Developer Guide.

        External key store

        When you connect an external key store that uses public endpoint connectivity, KMS tests its ability to communicate with your external key manager by sending a request via the external key store proxy.

        When you connect to an external key store that uses VPC endpoint service connectivity, KMS establishes the networking elements that it needs to communicate with your external key manager via the external key store proxy. This includes creating an interface endpoint to the VPC endpoint service and a private hosted zone for traffic between KMS and the VPC endpoint service.

        To connect an external key store, KMS must be able to connect to the external key store proxy, the external key store proxy must be able to communicate with your external key manager, and the external key manager must be available for cryptographic operations.

        If you are having trouble connecting or disconnecting an external key store, see Troubleshooting an external key store in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:ConnectCustomKeyStore (IAM policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "CreateAlias":{ "name":"CreateAlias", @@ -65,7 +65,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Creates a friendly name for a KMS key.

        Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        You can use an alias to identify a KMS key in the KMS console, in the DescribeKey operation and in cryptographic operations, such as Encrypt and GenerateDataKey. You can also change the KMS key that's associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) at any time. These operations don't affect the underlying KMS key.

        You can associate the alias with any customer managed key in the same Amazon Web Services Region. Each alias is associated with only one KMS key at a time, but a KMS key can have multiple aliases. A valid KMS key is required. You can't create an alias without a KMS key.

        The alias must be unique in the account and Region, but you can have aliases with the same name in different Regions. For detailed information about aliases, see Using aliases in the Key Management Service Developer Guide.

        This operation does not return a response. To get the alias that you created, use the ListAliases operation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

        Required permissions

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Creates a friendly name for a KMS key.

        Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        You can use an alias to identify a KMS key in the KMS console, in the DescribeKey operation and in cryptographic operations, such as Encrypt and GenerateDataKey. You can also change the KMS key that's associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) at any time. These operations don't affect the underlying KMS key.

        You can associate the alias with any customer managed key in the same Amazon Web Services Region. Each alias is associated with only one KMS key at a time, but a KMS key can have multiple aliases. A valid KMS key is required. You can't create an alias without a KMS key.

        The alias must be unique in the account and Region, but you can have aliases with the same name in different Regions. For detailed information about aliases, see Aliases in KMS in the Key Management Service Developer Guide.

        This operation does not return a response. To get the alias that you created, use the ListAliases operation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

        Required permissions

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "CreateCustomKeyStore":{ "name":"CreateCustomKeyStore", @@ -94,7 +94,7 @@ {"shape":"XksProxyInvalidResponseException"}, {"shape":"XksProxyInvalidConfigurationException"} ], - "documentation":"

        Creates a custom key store backed by a key store that you own and manage. When you use a KMS key in a custom key store for a cryptographic operation, the cryptographic operation is actually performed in your key store using your keys. KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key store proxy and external key manager outside of Amazon Web Services.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        Before you create the custom key store, the required elements must be in place and operational. We recommend that you use the test tools that KMS provides to verify the configuration your external key store proxy. For details about the required elements and verification tests, see Assemble the prerequisites (for CloudHSM key stores) or Assemble the prerequisites (for external key stores) in the Key Management Service Developer Guide.

        To create a custom key store, use the following parameters.

        • To create an CloudHSM key store, specify the CustomKeyStoreName, CloudHsmClusterId, KeyStorePassword, and TrustAnchorCertificate. The CustomKeyStoreType parameter is optional for CloudHSM key stores. If you include it, set it to the default value, AWS_CLOUDHSM. For help with failures, see Troubleshooting an CloudHSM key store in the Key Management Service Developer Guide.

        • To create an external key store, specify the CustomKeyStoreName and a CustomKeyStoreType of EXTERNAL_KEY_STORE. Also, specify values for XksProxyConnectivity, XksProxyAuthenticationCredential, XksProxyUriEndpoint, and XksProxyUriPath. If your XksProxyConnectivity value is VPC_ENDPOINT_SERVICE, specify the XksProxyVpcEndpointServiceName parameter. For help with failures, see Troubleshooting an external key store in the Key Management Service Developer Guide.

        For external key stores:

        Some external key managers provide a simpler method for creating an external key store. For details, see your external key manager documentation.

        When creating an external key store in the KMS console, you can upload a JSON-based proxy configuration file with the desired values. You cannot use a proxy configuration with the CreateCustomKeyStore operation. However, you can use the values in the file to help you determine the correct values for the CreateCustomKeyStore parameters.

        When the operation completes successfully, it returns the ID of the new custom key store. Before you can use your new custom key store, you need to use the ConnectCustomKeyStore operation to connect a new CloudHSM key store to its CloudHSM cluster, or to connect a new external key store to the external key store proxy for your external key manager. Even if you are not going to use your custom key store immediately, you might want to connect it to verify that all settings are correct and then disconnect it until you are ready to use it.

        For help with failures, see Troubleshooting a custom key store in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:CreateCustomKeyStore (IAM policy).

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Creates a custom key store backed by a key store that you own and manage. When you use a KMS key in a custom key store for a cryptographic operation, the cryptographic operation is actually performed in your key store using your keys. KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key store proxy and external key manager outside of Amazon Web Services.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        Before you create the custom key store, the required elements must be in place and operational. We recommend that you use the test tools that KMS provides to verify the configuration your external key store proxy. For details about the required elements and verification tests, see Assemble the prerequisites (for CloudHSM key stores) or Assemble the prerequisites (for external key stores) in the Key Management Service Developer Guide.

        To create a custom key store, use the following parameters.

        • To create an CloudHSM key store, specify the CustomKeyStoreName, CloudHsmClusterId, KeyStorePassword, and TrustAnchorCertificate. The CustomKeyStoreType parameter is optional for CloudHSM key stores. If you include it, set it to the default value, AWS_CLOUDHSM. For help with failures, see Troubleshooting an CloudHSM key store in the Key Management Service Developer Guide.

        • To create an external key store, specify the CustomKeyStoreName and a CustomKeyStoreType of EXTERNAL_KEY_STORE. Also, specify values for XksProxyConnectivity, XksProxyAuthenticationCredential, XksProxyUriEndpoint, and XksProxyUriPath. If your XksProxyConnectivity value is VPC_ENDPOINT_SERVICE, specify the XksProxyVpcEndpointServiceName parameter. For help with failures, see Troubleshooting an external key store in the Key Management Service Developer Guide.

        For external key stores:

        Some external key managers provide a simpler method for creating an external key store. For details, see your external key manager documentation.

        When creating an external key store in the KMS console, you can upload a JSON-based proxy configuration file with the desired values. You cannot use a proxy configuration with the CreateCustomKeyStore operation. However, you can use the values in the file to help you determine the correct values for the CreateCustomKeyStore parameters.

        When the operation completes successfully, it returns the ID of the new custom key store. Before you can use your new custom key store, you need to use the ConnectCustomKeyStore operation to connect a new CloudHSM key store to its CloudHSM cluster, or to connect a new external key store to the external key store proxy for your external key manager. Even if you are not going to use your custom key store immediately, you might want to connect it to verify that all settings are correct and then disconnect it until you are ready to use it.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:CreateCustomKeyStore (IAM policy).

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "CreateGrant":{ "name":"CreateGrant", @@ -115,7 +115,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Adds a grant to a KMS key.

        A grant is a policy instrument that allows Amazon Web Services principals to use KMS keys in cryptographic operations. It also can allow them to view a KMS key (DescribeKey) and create and manage grants. When authorizing access to a KMS key, grants are considered along with key policies and IAM policies. Grants are often used for temporary permissions because you can create one, use its permissions, and delete it without changing your key policies or IAM policies.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

        The CreateGrant operation returns a GrantToken and a GrantId.

        • When you create, retire, or revoke a grant, there might be a brief delay, usually less than five minutes, until the grant is available throughout KMS. This state is known as eventual consistency. Once the grant has achieved eventual consistency, the grantee principal can use the permissions in the grant without identifying the grant.

          However, to use the permissions in the grant immediately, use the GrantToken that CreateGrant returns. For details, see Using a grant token in the Key Management Service Developer Guide .

        • The CreateGrant operation also returns a GrantId. You can use the GrantId and a key identifier to identify the grant in the RetireGrant and RevokeGrant operations. To find the grant ID, use the ListGrants or ListRetirableGrants operations.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:CreateGrant (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Adds a grant to a KMS key.

        A grant is a policy instrument that allows Amazon Web Services principals to use KMS keys in cryptographic operations. It also can allow them to view a KMS key (DescribeKey) and create and manage grants. When authorizing access to a KMS key, grants are considered along with key policies and IAM policies. Grants are often used for temporary permissions because you can create one, use its permissions, and delete it without changing your key policies or IAM policies.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of creating grants in several programming languages, see Use CreateGrant with an Amazon Web Services SDK or CLI.

        The CreateGrant operation returns a GrantToken and a GrantId.

        • When you create, retire, or revoke a grant, there might be a brief delay, usually less than five minutes, until the grant is available throughout KMS. This state is known as eventual consistency. Once the grant has achieved eventual consistency, the grantee principal can use the permissions in the grant without identifying the grant.

          However, to use the permissions in the grant immediately, use the GrantToken that CreateGrant returns. For details, see Using a grant token in the Key Management Service Developer Guide .

        • The CreateGrant operation also returns a GrantId. You can use the GrantId and a key identifier to identify the grant in the RetireGrant and RevokeGrant operations. To find the grant ID, use the ListGrants or ListRetirableGrants operations.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:CreateGrant (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "CreateKey":{ "name":"CreateKey", @@ -140,7 +140,7 @@ {"shape":"XksKeyAlreadyInUseException"}, {"shape":"XksKeyNotFoundException"} ], - "documentation":"

        Creates a unique customer managed KMS key in your Amazon Web Services account and Region. You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services services let you use KMS keys that you create and manage to protect your service resources.

        A KMS key is a logical representation of a cryptographic key. In addition to the key material used in cryptographic operations, a KMS key includes metadata, such as the key ID, key policy, creation date, description, and key state. For details, see Managing keys in the Key Management Service Developer Guide

        Use the parameters of CreateKey to specify the type of KMS key, the source of its key material, its key policy, description, tags, and other properties.

        KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

        To create different types of KMS keys, use the following guidance:

        Symmetric encryption KMS key

        By default, CreateKey creates a symmetric encryption KMS key with key material that KMS generates. This is the basic and most widely used type of KMS key, and provides the best performance.

        To create a symmetric encryption KMS key, you don't need to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric encryption KMS key with KMS key material.

        If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

        Asymmetric KMS keys

        To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

        Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. Each KMS key can have only one key usage. KMS keys with RSA key pairs can be used to encrypt and decrypt data or sign and verify messages (but not both). KMS keys with NIST-recommended ECC key pairs can be used to sign and verify messages or derive shared secrets (but not both). KMS keys with ECC_SECG_P256K1 can be used only to sign and verify messages. KMS keys with SM2 key pairs (China Regions only) can be used to either encrypt and decrypt data, sign and verify messages, or derive shared secrets (you must choose one key usage type). For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        HMAC KMS key

        To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

        HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

        Multi-Region primary keys
        Imported key material

        To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

        You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

        This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        To import your own key material into a KMS key, begin by creating a KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token. Use the wrapping public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

        You can import key material into KMS keys of all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't import key material into a KMS key in a custom key store.

        To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        Custom key store

        A custom key store lets you protect your Amazon Web Services resources using keys in a backing key store that you own and manage. When you request a cryptographic operation with a KMS key in a custom key store, the operation is performed in the backing key store using its cryptographic keys.

        KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store, KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you specify an existing encryption key in the external key manager.

        Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

        Before you create a KMS key in a custom key store, the ConnectionState of the key store must be CONNECTED. To connect the custom key store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores operation.

        To create a KMS key in a custom key store, use the CustomKeyStoreId. Use the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is supported in a custom key store.

        To create a KMS key in an CloudHSM key store, use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

        To create a KMS key in an external key store, use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId parameter that identifies an existing external key.

        Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

        Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

        Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Creates a unique customer managed KMS key in your Amazon Web Services account and Region. You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services services let you use KMS keys that you create and manage to protect your service resources.

        A KMS key is a logical representation of a cryptographic key. In addition to the key material used in cryptographic operations, a KMS key includes metadata, such as the key ID, key policy, creation date, description, and key state.

        Use the parameters of CreateKey to specify the type of KMS key, the source of its key material, its key policy, description, tags, and other properties.

        KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

        To create different types of KMS keys, use the following guidance:

        Symmetric encryption KMS key

        By default, CreateKey creates a symmetric encryption KMS key with key material that KMS generates. This is the basic and most widely used type of KMS key, and provides the best performance.

        To create a symmetric encryption KMS key, you don't need to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric encryption KMS key with KMS key material.

        If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair.

        Asymmetric KMS keys

        To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created.

        Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, ML-DSA key pair or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. Each KMS key can have only one key usage. KMS keys with RSA key pairs can be used to encrypt and decrypt data or sign and verify messages (but not both). KMS keys with NIST-recommended ECC key pairs can be used to sign and verify messages or derive shared secrets (but not both). KMS keys with ECC_SECG_P256K1 can be used only to sign and verify messages. KMS keys with ML-DSA key pairs can be used to sign and verify messages. KMS keys with SM2 key pairs (China Regions only) can be used to either encrypt and decrypt data, sign and verify messages, or derive shared secrets (you must choose one key usage type). For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        HMAC KMS key

        To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created.

        HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

        Multi-Region primary keys
        Imported key material

        To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation.

        You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store.

        This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        To import your own key material into a KMS key, begin by creating a KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token. Use the wrapping public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide .

        You can import key material into KMS keys of all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't import key material into a KMS key in a custom key store.

        To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material step 1. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        Custom key store

        A custom key store lets you protect your Amazon Web Services resources using keys in a backing key store that you own and manage. When you request a cryptographic operation with a KMS key in a custom key store, the operation is performed in the backing key store using its cryptographic keys.

        KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store, KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you specify an existing encryption key in the external key manager.

        Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

        Before you create a KMS key in a custom key store, the ConnectionState of the key store must be CONNECTED. To connect the custom key store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores operation.

        To create a KMS key in a custom key store, use the CustomKeyStoreId. Use the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is supported in a custom key store.

        To create a KMS key in an CloudHSM key store, use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region.

        To create a KMS key in an external key store, use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId parameter that identifies an existing external key.

        Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation.

        Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account.

        Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "Decrypt":{ "name":"Decrypt", @@ -163,7 +163,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Decrypts ciphertext that was encrypted by a KMS key using any of the following operations:

        You can use this operation to decrypt ciphertext that was encrypted under a symmetric encryption KMS key or an asymmetric encryption KMS key. When the KMS key is asymmetric, you must specify the KMS key and the encryption algorithm that was used to encrypt the ciphertext. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        The Decrypt operation also decrypts ciphertext that was encrypted outside of KMS by the public key in an KMS asymmetric KMS key. However, it cannot decrypt symmetric ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

        If the ciphertext was encrypted under a symmetric encryption KMS key, the KeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the KMS key is always recommended as a best practice. When you use the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the Decrypt operation fails. This practice ensures that you use the KMS key that you intend.

        Whenever possible, use key policies to give users permission to call the Decrypt operation on a particular KMS key, instead of using &IAM; policies. Otherwise, you might create an &IAM; policy that gives the user Decrypt permission on all KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in other accounts if the key policy for the cross-account KMS key permits it. If you must use an IAM policy for Decrypt permissions, limit the user to particular KMS keys or particular trusted accounts. For details, see Best practices for IAM policies in the Key Management Service Developer Guide.

        Decrypt also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call Decrypt for a Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. Instead of the plaintext data, the response includes the plaintext data encrypted with the public key from the attestation document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. If you use the KeyId parameter to identify a KMS key in a different Amazon Web Services account, specify the key ARN or the alias ARN of the KMS key.

        Required permissions: kms:Decrypt (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Decrypts ciphertext that was encrypted by a KMS key using any of the following operations:

        You can use this operation to decrypt ciphertext that was encrypted under a symmetric encryption KMS key or an asymmetric encryption KMS key. When the KMS key is asymmetric, you must specify the KMS key and the encryption algorithm that was used to encrypt the ciphertext. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        The Decrypt operation also decrypts ciphertext that was encrypted outside of KMS by the public key in an KMS asymmetric KMS key. However, it cannot decrypt symmetric ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

        If the ciphertext was encrypted under a symmetric encryption KMS key, the KeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the KMS key is always recommended as a best practice. When you use the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the Decrypt operation fails. This practice ensures that you use the KMS key that you intend.

        Whenever possible, use key policies to give users permission to call the Decrypt operation on a particular KMS key, instead of using IAM policies. Otherwise, you might create an IAM policy that gives the user Decrypt permission on all KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in other accounts if the key policy for the cross-account KMS key permits it. If you must use an IAM policy for Decrypt permissions, limit the user to particular KMS keys or particular trusted accounts. For details, see Best practices for IAM policies in the Key Management Service Developer Guide.

        Decrypt also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call Decrypt for a Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. Instead of the plaintext data, the response includes the plaintext data encrypted with the public key from the attestation document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. If you use the KeyId parameter to identify a KMS key in a different Amazon Web Services account, specify the key ARN or the alias ARN of the KMS key.

        Required permissions: kms:Decrypt (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DeleteAlias":{ "name":"DeleteAlias", @@ -178,7 +178,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Deletes the specified alias.

        Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        Because an alias is not a property of a KMS key, you can delete and change the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys, use the ListAliases operation.

        Each KMS key can have multiple aliases. To change the alias of a KMS key, use DeleteAlias to delete the current alias and CreateAlias to create a new alias. To associate an existing alias with a different KMS key, call UpdateAlias.

        Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

        Required permissions

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Deletes the specified alias.

        Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        Because an alias is not a property of a KMS key, you can delete and change the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys, use the ListAliases operation.

        Each KMS key can have multiple aliases. To change the alias of a KMS key, use DeleteAlias to delete the current alias and CreateAlias to create a new alias. To associate an existing alias with a different KMS key, call UpdateAlias.

        Cross-account use: No. You cannot perform this operation on an alias in a different Amazon Web Services account.

        Required permissions

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DeleteCustomKeyStore":{ "name":"DeleteCustomKeyStore", @@ -194,7 +194,7 @@ {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"KMSInternalException"} ], - "documentation":"

        Deletes a custom key store. This operation does not affect any backing elements of the custom key store. It does not delete the CloudHSM cluster that is associated with an CloudHSM key store, or affect any users or keys in the cluster. For an external key store, it does not affect the external key store proxy, external key manager, or any external keys.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        The custom key store that you delete cannot contain any KMS keys. Before deleting the key store, verify that you will never need to use any of the KMS keys in the key store for any cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. After the required waiting period expires and all KMS keys are deleted from the custom key store, use DisconnectCustomKeyStore to disconnect the key store from KMS. Then, you can delete the custom key store.

        For keys in an CloudHSM key store, the ScheduleKeyDeletion operation makes a best effort to delete the key material from the associated cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups. KMS never creates, manages, or deletes cryptographic keys in the external key manager associated with an external key store. You must manage them using your external key manager tools.

        Instead of deleting the custom key store, consider using the DisconnectCustomKeyStore operation to disconnect the custom key store from its backing key store. While the key store is disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to delete KMS keys and you can reconnect a disconnected custom key store at any time.

        If the operation succeeds, it returns a JSON object with no properties.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:DeleteCustomKeyStore (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Deletes a custom key store. This operation does not affect any backing elements of the custom key store. It does not delete the CloudHSM cluster that is associated with an CloudHSM key store, or affect any users or keys in the cluster. For an external key store, it does not affect the external key store proxy, external key manager, or any external keys.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        The custom key store that you delete cannot contain any KMS keys. Before deleting the key store, verify that you will never need to use any of the KMS keys in the key store for any cryptographic operations. Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. After the required waiting period expires and all KMS keys are deleted from the custom key store, use DisconnectCustomKeyStore to disconnect the key store from KMS. Then, you can delete the custom key store.

        For keys in an CloudHSM key store, the ScheduleKeyDeletion operation makes a best effort to delete the key material from the associated cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups. KMS never creates, manages, or deletes cryptographic keys in the external key manager associated with an external key store. You must manage them using your external key manager tools.

        Instead of deleting the custom key store, consider using the DisconnectCustomKeyStore operation to disconnect the custom key store from its backing key store. While the key store is disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to delete KMS keys and you can reconnect a disconnected custom key store at any time.

        If the operation succeeds, it returns a JSON object with no properties.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:DeleteCustomKeyStore (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DeleteImportedKeyMaterial":{ "name":"DeleteImportedKeyMaterial", @@ -203,6 +203,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteImportedKeyMaterialRequest"}, + "output":{"shape":"DeleteImportedKeyMaterialResponse"}, "errors":[ {"shape":"InvalidArnException"}, {"shape":"UnsupportedOperationException"}, @@ -211,7 +212,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Deletes key material that was previously imported. This operation makes the specified KMS key temporarily unusable. To restore the usability of the KMS key, reimport the same key material. For more information about importing key material into KMS, see Importing Key Material in the Key Management Service Developer Guide.

        When the specified KMS key is in the PendingDeletion state, this operation does not change the KMS key's state. Otherwise, it changes the KMS key's state to PendingImport.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:DeleteImportedKeyMaterial (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Deletes key material that was previously imported. This operation makes the specified KMS key temporarily unusable. To restore the usability of the KMS key, reimport the same key material. For more information about importing key material into KMS, see Importing Key Material in the Key Management Service Developer Guide.

        When the specified KMS key is in the PendingDeletion state, this operation does not change the KMS key's state. Otherwise, it changes the KMS key's state to PendingImport.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:DeleteImportedKeyMaterial (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DeriveSharedSecret":{ "name":"DeriveSharedSecret", @@ -232,7 +233,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Derives a shared secret using a key agreement algorithm.

        You must use an asymmetric NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) KMS key pair with a KeyUsage value of KEY_AGREEMENT to call DeriveSharedSecret.

        DeriveSharedSecret uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive (ECDH) to establish a key agreement between two peers by deriving a shared secret from their elliptic curve public-private key pairs. You can use the raw shared secret that DeriveSharedSecret returns to derive a symmetric key that can encrypt and decrypt data that is sent between the two peers, or that can generate and verify HMACs. KMS recommends that you follow NIST recommendations for key derivation when using the raw shared secret to derive a symmetric key.

        The following workflow demonstrates how to establish key agreement over an insecure communication channel using DeriveSharedSecret.

        1. Alice calls CreateKey to create an asymmetric KMS key pair with a KeyUsage value of KEY_AGREEMENT.

          The asymmetric KMS key must use a NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) key spec.

        2. Bob creates an elliptic curve key pair.

          Bob can call CreateKey to create an asymmetric KMS key pair or generate a key pair outside of KMS. Bob's key pair must use the same NIST-recommended elliptic curve (ECC) or SM2 (China Regions ony) curve as Alice.

        3. Alice and Bob exchange their public keys through an insecure communication channel (like the internet).

          Use GetPublicKey to download the public key of your asymmetric KMS key pair.

          KMS strongly recommends verifying that the public key you receive came from the expected party before using it to derive a shared secret.

        4. Alice calls DeriveSharedSecret.

          KMS uses the private key from the KMS key pair generated in Step 1, Bob's public key, and the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to derive the shared secret. The private key in your KMS key pair never leaves KMS unencrypted. DeriveSharedSecret returns the raw shared secret.

        5. Bob uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to calculate the same raw secret using his private key and Alice's public key.

        To derive a shared secret you must provide a key agreement algorithm, the private key of the caller's asymmetric NIST-recommended elliptic curve or SM2 (China Regions only) KMS key pair, and the public key from your peer's NIST-recommended elliptic curve or SM2 (China Regions only) key pair. The public key can be from another asymmetric KMS key pair or from a key pair generated outside of KMS, but both key pairs must be on the same elliptic curve.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:DeriveSharedSecret (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Derives a shared secret using a key agreement algorithm.

        You must use an asymmetric NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) KMS key pair with a KeyUsage value of KEY_AGREEMENT to call DeriveSharedSecret.

        DeriveSharedSecret uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive (ECDH) to establish a key agreement between two peers by deriving a shared secret from their elliptic curve public-private key pairs. You can use the raw shared secret that DeriveSharedSecret returns to derive a symmetric key that can encrypt and decrypt data that is sent between the two peers, or that can generate and verify HMACs. KMS recommends that you follow NIST recommendations for key derivation when using the raw shared secret to derive a symmetric key.

        The following workflow demonstrates how to establish key agreement over an insecure communication channel using DeriveSharedSecret.

        1. Alice calls CreateKey to create an asymmetric KMS key pair with a KeyUsage value of KEY_AGREEMENT.

          The asymmetric KMS key must use a NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) key spec.

        2. Bob creates an elliptic curve key pair.

          Bob can call CreateKey to create an asymmetric KMS key pair or generate a key pair outside of KMS. Bob's key pair must use the same NIST-recommended elliptic curve (ECC) or SM2 (China Regions ony) curve as Alice.

        3. Alice and Bob exchange their public keys through an insecure communication channel (like the internet).

          Use GetPublicKey to download the public key of your asymmetric KMS key pair.

          KMS strongly recommends verifying that the public key you receive came from the expected party before using it to derive a shared secret.

        4. Alice calls DeriveSharedSecret.

          KMS uses the private key from the KMS key pair generated in Step 1, Bob's public key, and the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to derive the shared secret. The private key in your KMS key pair never leaves KMS unencrypted. DeriveSharedSecret returns the raw shared secret.

        5. Bob uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to calculate the same raw secret using his private key and Alice's public key.

        To derive a shared secret you must provide a key agreement algorithm, the private key of the caller's asymmetric NIST-recommended elliptic curve or SM2 (China Regions only) KMS key pair, and the public key from your peer's NIST-recommended elliptic curve or SM2 (China Regions only) key pair. The public key can be from another asymmetric KMS key pair or from a key pair generated outside of KMS, but both key pairs must be on the same elliptic curve.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:DeriveSharedSecret (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DescribeCustomKeyStores":{ "name":"DescribeCustomKeyStores", @@ -247,7 +248,7 @@ {"shape":"InvalidMarkerException"}, {"shape":"KMSInternalException"} ], - "documentation":"

        Gets information about custom key stores in the account and Region.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        By default, this operation returns information about all custom key stores in the account and Region. To get only information about a particular custom key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter (but not both).

        To determine whether the custom key store is connected to its CloudHSM cluster or external key store proxy, use the ConnectionState element in the response. If an attempt to connect the custom key store failed, the ConnectionState value is FAILED and the ConnectionErrorCode element in the response indicates the cause of the failure. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

        Custom key stores have a DISCONNECTED connection state if the key store has never been connected or you used the DisconnectCustomKeyStore operation to disconnect it. Otherwise, the connection state is CONNECTED. If your custom key store connection state is CONNECTED but you are having trouble using it, verify that the backing store is active and available. For an CloudHSM key store, verify that the associated CloudHSM cluster is active and contains the minimum number of HSMs required for the operation, if any. For an external key store, verify that the external key store proxy and its associated external key manager are reachable and enabled.

        For help repairing your CloudHSM key store, see the Troubleshooting CloudHSM key stores. For help repairing your external key store, see the Troubleshooting external key stores. Both topics are in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:DescribeCustomKeyStores (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Gets information about custom key stores in the account and Region.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        By default, this operation returns information about all custom key stores in the account and Region. To get only information about a particular custom key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter (but not both).

        To determine whether the custom key store is connected to its CloudHSM cluster or external key store proxy, use the ConnectionState element in the response. If an attempt to connect the custom key store failed, the ConnectionState value is FAILED and the ConnectionErrorCode element in the response indicates the cause of the failure. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.

        Custom key stores have a DISCONNECTED connection state if the key store has never been connected or you used the DisconnectCustomKeyStore operation to disconnect it. Otherwise, the connection state is CONNECTED. If your custom key store connection state is CONNECTED but you are having trouble using it, verify that the backing store is active and available. For an CloudHSM key store, verify that the associated CloudHSM cluster is active and contains the minimum number of HSMs required for the operation, if any. For an external key store, verify that the external key store proxy and its associated external key manager are reachable and enabled.

        For help repairing your CloudHSM key store, see the Troubleshooting CloudHSM key stores. For help repairing your external key store, see the Troubleshooting external key stores. Both topics are in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:DescribeCustomKeyStores (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DescribeKey":{ "name":"DescribeKey", @@ -263,7 +264,7 @@ {"shape":"DependencyTimeoutException"}, {"shape":"KMSInternalException"} ], - "documentation":"

        Provides detailed information about a KMS key. You can run DescribeKey on a customer managed key or an Amazon Web Services managed key.

        This detailed information includes the key ARN, creation date (and deletion date, if applicable), the key state, and the origin and expiration date (if any) of the key material. It includes fields, like KeySpec, that help you distinguish different types of KMS keys. It also displays the key usage (encryption, signing, or generating and verifying MACs) and the algorithms that the KMS key supports.

        For multi-Region keys, DescribeKey displays the primary key and all related replica keys. For KMS keys in CloudHSM key stores, it includes information about the key store, such as the key store ID and the CloudHSM cluster ID. For KMS keys in external key stores, it includes the custom key store ID and the ID of the external key.

        DescribeKey does not return the following information:

        • Aliases associated with the KMS key. To get this information, use ListAliases.

        • Whether automatic key rotation is enabled on the KMS key. To get this information, use GetKeyRotationStatus. Also, some key states prevent a KMS key from being automatically rotated. For details, see How Automatic Key Rotation Works in the Key Management Service Developer Guide.

        • Tags on the KMS key. To get this information, use ListResourceTags.

        • Key policies and grants on the KMS key. To get this information, use GetKeyPolicy and ListGrants.

        In general, DescribeKey is a non-mutating operation. It returns data about KMS keys, but doesn't change them. However, Amazon Web Services services use DescribeKey to create Amazon Web Services managed keys from a predefined Amazon Web Services alias with no key ID.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:DescribeKey (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Provides detailed information about a KMS key. You can run DescribeKey on a customer managed key or an Amazon Web Services managed key.

        This detailed information includes the key ARN, creation date (and deletion date, if applicable), the key state, and the origin and expiration date (if any) of the key material. It includes fields, like KeySpec, that help you distinguish different types of KMS keys. It also displays the key usage (encryption, signing, or generating and verifying MACs) and the algorithms that the KMS key supports.

        For multi-Region keys, DescribeKey displays the primary key and all related replica keys. For KMS keys in CloudHSM key stores, it includes information about the key store, such as the key store ID and the CloudHSM cluster ID. For KMS keys in external key stores, it includes the custom key store ID and the ID of the external key.

        DescribeKey does not return the following information:

        • Aliases associated with the KMS key. To get this information, use ListAliases.

        • Whether automatic key rotation is enabled on the KMS key. To get this information, use GetKeyRotationStatus. Also, some key states prevent a KMS key from being automatically rotated. For details, see How key rotation works in the Key Management Service Developer Guide.

        • Tags on the KMS key. To get this information, use ListResourceTags.

        • Key policies and grants on the KMS key. To get this information, use GetKeyPolicy and ListGrants.

        In general, DescribeKey is a non-mutating operation. It returns data about KMS keys, but doesn't change them. However, Amazon Web Services services use DescribeKey to create Amazon Web Services managed keys from a predefined Amazon Web Services alias with no key ID.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:DescribeKey (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DisableKey":{ "name":"DisableKey", @@ -279,7 +280,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Sets the state of a KMS key to disabled. This change temporarily prevents use of the KMS key for cryptographic operations.

        For more information about how key state affects the use of a KMS key, see Key states of KMS keys in the Key Management Service Developer Guide .

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:DisableKey (key policy)

        Related operations: EnableKey

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Sets the state of a KMS key to disabled. This change temporarily prevents use of the KMS key for cryptographic operations.

        The KMS key that you use for this operation must be in a compatible key state. For more information about how key state affects the use of a KMS key, see Key states of KMS keys in the Key Management Service Developer Guide .

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:DisableKey (key policy)

        Related operations: EnableKey

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DisableKeyRotation":{ "name":"DisableKeyRotation", @@ -297,7 +298,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

        Disables automatic rotation of the key material of the specified symmetric encryption KMS key.

        Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        You can enable (EnableKeyRotation) and disable automatic rotation of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material for every year. Rotation of Amazon Web Services owned KMS keys varies.

        In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:DisableKeyRotation (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Disables automatic rotation of the key material of the specified symmetric encryption KMS key.

        Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        You can enable (EnableKeyRotation) and disable automatic rotation of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material for every year. Rotation of Amazon Web Services owned KMS keys varies.

        In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:DisableKeyRotation (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "DisconnectCustomKeyStore":{ "name":"DisconnectCustomKeyStore", @@ -312,7 +313,7 @@ {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"KMSInternalException"} ], - "documentation":"

        Disconnects the custom key store from its backing key store. This operation disconnects an CloudHSM key store from its associated CloudHSM cluster or disconnects an external key store from the external key store proxy that communicates with your external key manager.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        While a custom key store is disconnected, you can manage the custom key store and its KMS keys, but you cannot create or use its KMS keys. You can reconnect the custom key store at any time.

        While a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in cryptographic operations will fail. This action can prevent users from storing and accessing sensitive data.

        When you disconnect a custom key store, its ConnectionState changes to Disconnected. To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the ConnectCustomKeyStore operation.

        If the operation succeeds, it returns a JSON object with no properties.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:DisconnectCustomKeyStore (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Disconnects the custom key store from its backing key store. This operation disconnects an CloudHSM key store from its associated CloudHSM cluster or disconnects an external key store from the external key store proxy that communicates with your external key manager.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        While a custom key store is disconnected, you can manage the custom key store and its KMS keys, but you cannot create or use its KMS keys. You can reconnect the custom key store at any time.

        While a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in cryptographic operations will fail. This action can prevent users from storing and accessing sensitive data.

        When you disconnect a custom key store, its ConnectionState changes to Disconnected. To find the connection state of a custom key store, use the DescribeCustomKeyStores operation. To reconnect a custom key store, use the ConnectCustomKeyStore operation.

        If the operation succeeds, it returns a JSON object with no properties.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:DisconnectCustomKeyStore (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "EnableKey":{ "name":"EnableKey", @@ -329,7 +330,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Sets the key state of a KMS key to enabled. This allows you to use the KMS key for cryptographic operations.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:EnableKey (key policy)

        Related operations: DisableKey

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Sets the key state of a KMS key to enabled. This allows you to use the KMS key for cryptographic operations.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:EnableKey (key policy)

        Related operations: DisableKey

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "EnableKeyRotation":{ "name":"EnableKeyRotation", @@ -347,7 +348,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

        Enables automatic rotation of the key material of the specified symmetric encryption KMS key.

        By default, when you enable automatic rotation of a customer managed KMS key, KMS rotates the key material of the KMS key one year (approximately 365 days) from the enable date and every year thereafter. You can use the optional RotationPeriodInDays parameter to specify a custom rotation period when you enable key rotation, or you can use RotationPeriodInDays to modify the rotation period of a key that you previously enabled automatic key rotation on.

        You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch. To disable rotation of the key material in a customer managed KMS key, use the DisableKeyRotation operation. You can use the GetKeyRotationStatus operation to identify any in progress rotations. You can use the ListKeyRotations operation to view the details of completed rotations.

        Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        You cannot enable or disable automatic rotation of Amazon Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services managed keys every year. Rotation of Amazon Web Services owned KMS keys is managed by the Amazon Web Services service that owns the key.

        In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years (approximately 1,095 days) to every year (approximately 365 days).

        New Amazon Web Services managed keys are automatically rotated one year after they are created, and approximately every year thereafter.

        Existing Amazon Web Services managed keys are automatically rotated one year after their most recent rotation, and every year thereafter.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:EnableKeyRotation (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Enables automatic rotation of the key material of the specified symmetric encryption KMS key.

        By default, when you enable automatic rotation of a customer managed KMS key, KMS rotates the key material of the KMS key one year (approximately 365 days) from the enable date and every year thereafter. You can use the optional RotationPeriodInDays parameter to specify a custom rotation period when you enable key rotation, or you can use RotationPeriodInDays to modify the rotation period of a key that you previously enabled automatic key rotation on.

        You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch. To disable rotation of the key material in a customer managed KMS key, use the DisableKeyRotation operation. You can use the GetKeyRotationStatus operation to identify any in progress rotations. You can use the ListKeyRotations operation to view the details of completed rotations.

        Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        You cannot enable or disable automatic rotation of Amazon Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services managed keys every year. Rotation of Amazon Web Services owned KMS keys is managed by the Amazon Web Services service that owns the key.

        In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years (approximately 1,095 days) to every year (approximately 365 days).

        New Amazon Web Services managed keys are automatically rotated one year after they are created, and approximately every year thereafter.

        Existing Amazon Web Services managed keys are automatically rotated one year after their most recent rotation, and every year thereafter.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:EnableKeyRotation (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "Encrypt":{ "name":"Encrypt", @@ -368,7 +369,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric or asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT.

        You can use this operation to encrypt small amounts of arbitrary data, such as a personal identifier or database password, or other sensitive information. You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a plaintext data key and an encrypted copy of that data key.

        If you use a symmetric encryption KMS key, you can use an encryption context to add additional security to your encryption operation. If you specify an EncryptionContext when encrypting data, you must specify the same encryption context (a case-sensitive exact match) when decrypting the data. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The algorithm must be compatible with the KMS key spec.

        When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

        You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

        The maximum size of the data that you can encrypt varies with the type of KMS key and the encryption algorithm that you choose.

        • Symmetric encryption KMS keys

          • SYMMETRIC_DEFAULT: 4096 bytes

        • RSA_2048

          • RSAES_OAEP_SHA_1: 214 bytes

          • RSAES_OAEP_SHA_256: 190 bytes

        • RSA_3072

          • RSAES_OAEP_SHA_1: 342 bytes

          • RSAES_OAEP_SHA_256: 318 bytes

        • RSA_4096

          • RSAES_OAEP_SHA_1: 470 bytes

          • RSAES_OAEP_SHA_256: 446 bytes

        • SM2PKE: 1024 bytes (China Regions only)

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:Encrypt (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric or asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT.

        You can use this operation to encrypt small amounts of arbitrary data, such as a personal identifier or database password, or other sensitive information. You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations return a plaintext data key and an encrypted copy of that data key.

        If you use a symmetric encryption KMS key, you can use an encryption context to add additional security to your encryption operation. If you specify an EncryptionContext when encrypting data, you must specify the same encryption context (a case-sensitive exact match) when decrypting the data. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The algorithm must be compatible with the KMS key spec.

        When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

        You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

        The maximum size of the data that you can encrypt varies with the type of KMS key and the encryption algorithm that you choose.

        • Symmetric encryption KMS keys

          • SYMMETRIC_DEFAULT: 4096 bytes

        • RSA_2048

          • RSAES_OAEP_SHA_1: 214 bytes

          • RSAES_OAEP_SHA_256: 190 bytes

        • RSA_3072

          • RSAES_OAEP_SHA_1: 342 bytes

          • RSAES_OAEP_SHA_256: 318 bytes

        • RSA_4096

          • RSAES_OAEP_SHA_1: 470 bytes

          • RSAES_OAEP_SHA_256: 446 bytes

        • SM2PKE: 1024 bytes (China Regions only)

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:Encrypt (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GenerateDataKey":{ "name":"GenerateDataKey", @@ -389,7 +390,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Returns a unique symmetric data key for use outside of KMS. This operation returns a plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the plaintext key are random; they are not related to the caller or the KMS key. You can use the plaintext key to encrypt your data outside of KMS and store the encrypted data key with the encrypted data.

        To generate a data key, specify the symmetric encryption KMS key that will be used to encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the type of your KMS key, use the DescribeKey operation.

        You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

        To generate a 128-bit SM4 data key (China Regions only), specify a KeySpec value of AES_128 or a NumberOfBytes value of 16. The symmetric encryption key used in China Regions to encrypt your data key is an SM4 encryption key.

        To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random byte string, use GenerateRandom.

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        GenerateDataKey also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call GenerateDataKey for an Amazon Web Services Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. GenerateDataKey returns a copy of the data key encrypted under the specified KMS key, as usual. But instead of a plaintext copy of the data key, the response includes a copy of the data key encrypted under the public key from the attestation document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide..

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        How to use your data key

        We recommend that you use the following pattern to encrypt data locally in your application. You can write your own code or use a client-side encryption library, such as the Amazon Web Services Encryption SDK, the Amazon DynamoDB Encryption Client, or Amazon S3 client-side encryption to do these tasks for you.

        To encrypt data outside of KMS:

        1. Use the GenerateDataKey operation to get a data key.

        2. Use the plaintext data key (in the Plaintext field of the response) to encrypt your data outside of KMS. Then erase the plaintext data key from memory.

        3. Store the encrypted data key (in the CiphertextBlob field of the response) with the encrypted data.

        To decrypt data outside of KMS:

        1. Use the Decrypt operation to decrypt the encrypted data key. The operation returns a plaintext copy of the data key.

        2. Use the plaintext data key to decrypt data outside of KMS, then erase the plaintext data key from memory.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKey (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns a unique symmetric data key for use outside of KMS. This operation returns a plaintext copy of the data key and a copy that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the plaintext key are random; they are not related to the caller or the KMS key. You can use the plaintext key to encrypt your data outside of KMS and store the encrypted data key with the encrypted data.

        To generate a data key, specify the symmetric encryption KMS key that will be used to encrypt the data key. You cannot use an asymmetric KMS key to encrypt data keys. To get the type of your KMS key, use the DescribeKey operation.

        You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

        To generate a 128-bit SM4 data key (China Regions only), specify a KeySpec value of AES_128 or a NumberOfBytes value of 16. The symmetric encryption key used in China Regions to encrypt your data key is an SM4 encryption key.

        To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get a cryptographically secure random byte string, use GenerateRandom.

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        GenerateDataKey also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call GenerateDataKey for an Amazon Web Services Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. GenerateDataKey returns a copy of the data key encrypted under the specified KMS key, as usual. But instead of a plaintext copy of the data key, the response includes a copy of the data key encrypted under the public key from the attestation document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide..

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        How to use your data key

        We recommend that you use the following pattern to encrypt data locally in your application. You can write your own code or use a client-side encryption library, such as the Amazon Web Services Encryption SDK, the Amazon DynamoDB Encryption Client, or Amazon S3 client-side encryption to do these tasks for you.

        To encrypt data outside of KMS:

        1. Use the GenerateDataKey operation to get a data key.

        2. Use the plaintext data key (in the Plaintext field of the response) to encrypt your data outside of KMS. Then erase the plaintext data key from memory.

        3. Store the encrypted data key (in the CiphertextBlob field of the response) with the encrypted data.

        To decrypt data outside of KMS:

        1. Use the Decrypt operation to decrypt the encrypted data key. The operation returns a plaintext copy of the data key.

        2. Use the plaintext data key to decrypt data outside of KMS, then erase the plaintext data key from memory.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKey (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GenerateDataKeyPair":{ "name":"GenerateDataKeyPair", @@ -411,7 +412,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Returns a unique asymmetric data key pair for use outside of KMS. This operation returns a plaintext public key, a plaintext private key, and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. You can use the data key pair to perform asymmetric cryptography and implement digital signatures outside of KMS. The bytes in the keys are random; they are not related to the caller or to the KMS key that is used to encrypt the private key.

        You can use the public key that GenerateDataKeyPair returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

        To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

        Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends that you use ECC key pairs for signing, and use RSA and SM2 key pairs for either encryption or signing, but not both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

        If you are using the data key pair to encrypt data, or for any operation where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an encrypted private key, but omits the plaintext private key that you need only to decrypt ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use the Decrypt operation to decrypt the encrypted private key in the data key pair.

        GenerateDataKeyPair returns a unique data key pair for each request. The bytes in the keys are random; they are not related to the caller or the KMS key that is used to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280. The private key is a DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC 5958.

        GenerateDataKeyPair also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call GenerateDataKeyPair for an Amazon Web Services Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. GenerateDataKeyPair returns the public data key and a copy of the private data key encrypted under the specified KMS key, as usual. But instead of a plaintext copy of the private data key (PrivateKeyPlaintext), the response includes a copy of the private data key encrypted under the public key from the attestation document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide..

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKeyPair (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns a unique asymmetric data key pair for use outside of KMS. This operation returns a plaintext public key, a plaintext private key, and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. You can use the data key pair to perform asymmetric cryptography and implement digital signatures outside of KMS. The bytes in the keys are random; they are not related to the caller or to the KMS key that is used to encrypt the private key.

        You can use the public key that GenerateDataKeyPair returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

        To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

        Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends that you use ECC key pairs for signing, and use RSA and SM2 key pairs for either encryption or signing, but not both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

        If you are using the data key pair to encrypt data, or for any operation where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an encrypted private key, but omits the plaintext private key that you need only to decrypt ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use the Decrypt operation to decrypt the encrypted private key in the data key pair.

        GenerateDataKeyPair returns a unique data key pair for each request. The bytes in the keys are random; they are not related to the caller or the KMS key that is used to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280. The private key is a DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC 5958.

        GenerateDataKeyPair also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call GenerateDataKeyPair for an Amazon Web Services Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. GenerateDataKeyPair returns the public data key and a copy of the private data key encrypted under the specified KMS key, as usual. But instead of a plaintext copy of the private data key (PrivateKeyPlaintext), the response includes a copy of the private data key encrypted under the public key from the attestation document (CiphertextForRecipient). For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide..

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKeyPair (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GenerateDataKeyPairWithoutPlaintext":{ "name":"GenerateDataKeyPairWithoutPlaintext", @@ -433,7 +434,7 @@ {"shape":"UnsupportedOperationException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Returns a unique asymmetric data key pair for use outside of KMS. This operation returns a plaintext public key and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. Unlike GenerateDataKeyPair, this operation does not return a plaintext private key. The bytes in the keys are random; they are not related to the caller or to the KMS key that is used to encrypt the private key.

        You can use the public key that GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

        To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

        Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends that you use ECC key pairs for signing, and use RSA and SM2 key pairs for either encryption or signing, but not both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

        GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each request. The bytes in the key are not related to the caller or KMS key that is used to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280.

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns a unique asymmetric data key pair for use outside of KMS. This operation returns a plaintext public key and a copy of the private key that is encrypted under the symmetric encryption KMS key you specify. Unlike GenerateDataKeyPair, this operation does not return a plaintext private key. The bytes in the keys are random; they are not related to the caller or to the KMS key that is used to encrypt the private key.

        You can use the public key that GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key with the data. When you are ready to decrypt data or sign a message, you can use the Decrypt operation to decrypt the encrypted private key.

        To generate a data key pair, you must specify a symmetric encryption KMS key to encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key store. To get the type and origin of your KMS key, use the DescribeKey operation.

        Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data key pair. In China Regions, you can also choose an SM2 data key pair. KMS recommends that you use ECC key pairs for signing, and use RSA and SM2 key pairs for either encryption or signing, but not both. However, KMS cannot enforce any restrictions on the use of data key pairs outside of KMS.

        GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each request. The bytes in the key are not related to the caller or KMS key that is used to encrypt the private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280.

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GenerateDataKeyWithoutPlaintext":{ "name":"GenerateDataKeyWithoutPlaintext", @@ -454,7 +455,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Returns a unique symmetric data key for use outside of KMS. This operation returns a data key that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the key are random; they are not related to the caller or to the KMS key.

        GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that it does not return a plaintext copy of the data key.

        This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

        It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

        To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

        To generate a data key, you must specify the symmetric encryption KMS key that is used to encrypt the data key. You cannot use an asymmetric KMS key or a key in a custom key store to generate a data key. To get the type of your KMS key, use the DescribeKey operation.

        You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

        To generate an SM4 data key (China Regions only), specify a KeySpec value of AES_128 or NumberOfBytes value of 16. The symmetric encryption key used in China Regions to encrypt your data key is an SM4 encryption key.

        If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKeyWithoutPlaintext (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns a unique symmetric data key for use outside of KMS. This operation returns a data key that is encrypted under a symmetric encryption KMS key that you specify. The bytes in the key are random; they are not related to the caller or to the KMS key.

        GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation except that it does not return a plaintext copy of the data key.

        This operation is useful for systems that need to encrypt data at some point, but not immediately. When you need to encrypt the data, you call the Decrypt operation on the encrypted copy of the key.

        It's also useful in distributed systems with different levels of trust. For example, you might store encrypted data in containers. One component of your system creates new containers and stores an encrypted data key with each container. Then, a different component puts the data into the containers. That component first decrypts the data key, uses the plaintext data key to encrypt data, puts the encrypted data into the container, and then destroys the plaintext data key. In this system, the component that creates the containers never sees the plaintext data key.

        To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operations.

        To generate a data key, you must specify the symmetric encryption KMS key that is used to encrypt the data key. You cannot use an asymmetric KMS key or a key in a custom key store to generate a data key. To get the type of your KMS key, use the DescribeKey operation.

        You must also specify the length of the data key. Use either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter.

        To generate an SM4 data key (China Regions only), specify a KeySpec value of AES_128 or NumberOfBytes value of 16. The symmetric encryption key used in China Regions to encrypt your data key is an SM4 encryption key.

        If the operation succeeds, you will find the encrypted copy of the data key in the CiphertextBlob field.

        You can use an optional encryption context to add additional security to the encryption operation. If you specify an EncryptionContext, you must specify the same encryption context (a case-sensitive exact match) when decrypting the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. For more information, see Encryption Context in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateDataKeyWithoutPlaintext (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GenerateMac":{ "name":"GenerateMac", @@ -474,7 +475,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Generates a hash-based message authentication code (HMAC) for a message using an HMAC KMS key and a MAC algorithm that the key supports. HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104.

        You can use value that GenerateMac returns in the VerifyMac operation to demonstrate that the original message has not changed. Also, because a secret key is used to create the hash, you can verify that the party that generated the hash has the required secret key. You can also use the raw result to implement HMAC-based algorithms such as key derivation functions. This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide .

        Best practices recommend that you limit the time during which any signing mechanism, including an HMAC, is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. HMAC tags do not include a timestamp, but you can include a timestamp in the token or message to help you detect when its time to refresh the HMAC.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateMac (key policy)

        Related operations: VerifyMac

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Generates a hash-based message authentication code (HMAC) for a message using an HMAC KMS key and a MAC algorithm that the key supports. HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104.

        You can use value that GenerateMac returns in the VerifyMac operation to demonstrate that the original message has not changed. Also, because a secret key is used to create the hash, you can verify that the party that generated the hash has the required secret key. You can also use the raw result to implement HMAC-based algorithms such as key derivation functions. This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide .

        Best practices recommend that you limit the time during which any signing mechanism, including an HMAC, is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. HMAC tags do not include a timestamp, but you can include a timestamp in the token or message to help you detect when its time to refresh the HMAC.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GenerateMac (key policy)

        Related operations: VerifyMac

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GenerateRandom":{ "name":"GenerateRandom", @@ -491,7 +492,7 @@ {"shape":"CustomKeyStoreNotFoundException"}, {"shape":"CustomKeyStoreInvalidStateException"} ], - "documentation":"

        Returns a random byte string that is cryptographically secure.

        You must use the NumberOfBytes parameter to specify the length of the random byte string. There is no default value for string length.

        By default, the random byte string is generated in KMS. To generate the byte string in the CloudHSM cluster associated with an CloudHSM key store, use the CustomKeyStoreId parameter.

        GenerateRandom also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call GenerateRandom for a Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. Instead of plaintext bytes, the response includes the plaintext bytes encrypted under the public key from the attestation document (CiphertextForRecipient).For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        For more information about entropy and random number generation, see Key Management Service Cryptographic Details.

        Cross-account use: Not applicable. GenerateRandom does not use any account-specific resources, such as KMS keys.

        Required permissions: kms:GenerateRandom (IAM policy)

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns a random byte string that is cryptographically secure.

        You must use the NumberOfBytes parameter to specify the length of the random byte string. There is no default value for string length.

        By default, the random byte string is generated in KMS. To generate the byte string in the CloudHSM cluster associated with an CloudHSM key store, use the CustomKeyStoreId parameter.

        GenerateRandom also supports Amazon Web Services Nitro Enclaves, which provide an isolated compute environment in Amazon EC2. To call GenerateRandom for a Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. Use the Recipient parameter to provide the attestation document for the enclave. Instead of plaintext bytes, the response includes the plaintext bytes encrypted under the public key from the attestation document (CiphertextForRecipient).For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        For more information about entropy and random number generation, see Entropy and random number generation in the Key Management Service Developer Guide.

        Cross-account use: Not applicable. GenerateRandom does not use any account-specific resources, such as KMS keys.

        Required permissions: kms:GenerateRandom (IAM policy)

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GetKeyPolicy":{ "name":"GetKeyPolicy", @@ -508,7 +509,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Gets a key policy attached to the specified KMS key.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:GetKeyPolicy (key policy)

        Related operations: PutKeyPolicy

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Gets a key policy attached to the specified KMS key.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:GetKeyPolicy (key policy)

        Related operations: PutKeyPolicy

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GetKeyRotationStatus":{ "name":"GetKeyRotationStatus", @@ -526,7 +527,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

        Provides detailed information about the rotation status for a KMS key, including whether automatic rotation of the key material is enabled for the specified KMS key, the rotation period, and the next scheduled rotation date.

        Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key..

        You can enable (EnableKeyRotation) and disable automatic rotation (DisableKeyRotation) of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material in Amazon Web Services managed KMS keys every year. The key rotation status for Amazon Web Services managed KMS keys is always true.

        You can perform on-demand (RotateKeyOnDemand) rotation of the key material in customer managed KMS keys, regardless of whether or not automatic key rotation is enabled. You can use GetKeyRotationStatus to identify the date and time that an in progress on-demand rotation was initiated. You can use ListKeyRotations to view the details of completed rotations.

        In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        • Disabled: The key rotation status does not change when you disable a KMS key. However, while the KMS key is disabled, KMS does not rotate the key material. When you re-enable the KMS key, rotation resumes. If the key material in the re-enabled KMS key hasn't been rotated in one year, KMS rotates it immediately, and every year thereafter. If it's been less than a year since the key material in the re-enabled KMS key was rotated, the KMS key resumes its prior rotation schedule.

        • Pending deletion: While a KMS key is pending deletion, its key rotation status is false and KMS does not rotate the key material. If you cancel the deletion, the original key rotation status returns to true.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:GetKeyRotationStatus (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Provides detailed information about the rotation status for a KMS key, including whether automatic rotation of the key material is enabled for the specified KMS key, the rotation period, and the next scheduled rotation date.

        Automatic key rotation is supported only on symmetric encryption KMS keys. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        You can enable (EnableKeyRotation) and disable automatic rotation (DisableKeyRotation) of the key material in customer managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys is not configurable. KMS always rotates the key material in Amazon Web Services managed KMS keys every year. The key rotation status for Amazon Web Services managed KMS keys is always true.

        You can perform on-demand (RotateKeyOnDemand) rotation of the key material in customer managed KMS keys, regardless of whether or not automatic key rotation is enabled. You can use GetKeyRotationStatus to identify the date and time that an in progress on-demand rotation was initiated. You can use ListKeyRotations to view the details of completed rotations.

        In May 2022, KMS changed the rotation schedule for Amazon Web Services managed keys from every three years to every year. For details, see EnableKeyRotation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        • Disabled: The key rotation status does not change when you disable a KMS key. However, while the KMS key is disabled, KMS does not rotate the key material. When you re-enable the KMS key, rotation resumes. If the key material in the re-enabled KMS key hasn't been rotated in one year, KMS rotates it immediately, and every year thereafter. If it's been less than a year since the key material in the re-enabled KMS key was rotated, the KMS key resumes its prior rotation schedule.

        • Pending deletion: While a KMS key is pending deletion, its key rotation status is false and KMS does not rotate the key material. If you cancel the deletion, the original key rotation status returns to true.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:GetKeyRotationStatus (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GetParametersForImport":{ "name":"GetParametersForImport", @@ -544,7 +545,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Returns the public key and an import token you need to import or reimport key material for a KMS key.

        By default, KMS keys are created with key material that KMS generates. This operation supports Importing key material, an advanced feature that lets you generate and import the cryptographic key material for a KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide.

        Before calling GetParametersForImport, use the CreateKey operation with an Origin value of EXTERNAL to create a KMS key with no key material. You can import key material for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store. You can also use GetParametersForImport to get a public key and import token to reimport the original key material into a KMS key whose key material expired or was deleted.

        GetParametersForImport returns the items that you need to import your key material.

        • The public key (or \"wrapping key\") of an RSA key pair that KMS generates.

          You will use this public key to encrypt (\"wrap\") your key material while it's in transit to KMS.

        • A import token that ensures that KMS can decrypt your key material and associate it with the correct KMS key.

        The public key and its import token are permanently linked and must be used together. Each public key and import token set is valid for 24 hours. The expiration date and time appear in the ParametersValidTo field in the GetParametersForImport response. You cannot use an expired public key or import token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request.

        GetParametersForImport requires the following information:

        • The key ID of the KMS key for which you are importing the key material.

        • The key spec of the public key (\"wrapping key\") that you will use to encrypt your key material during import.

        • The wrapping algorithm that you will use with the public key to encrypt your key material.

        You can use the same or a different public key spec and wrapping algorithm each time you import or reimport the same key material.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:GetParametersForImport (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns the public key and an import token you need to import or reimport key material for a KMS key.

        By default, KMS keys are created with key material that KMS generates. This operation supports Importing key material, an advanced feature that lets you generate and import the cryptographic key material for a KMS key.

        Before calling GetParametersForImport, use the CreateKey operation with an Origin value of EXTERNAL to create a KMS key with no key material. You can import key material for a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store. You can also use GetParametersForImport to get a public key and import token to reimport the original key material into a KMS key whose key material expired or was deleted.

        GetParametersForImport returns the items that you need to import your key material.

        • The public key (or \"wrapping key\") of an RSA key pair that KMS generates.

          You will use this public key to encrypt (\"wrap\") your key material while it's in transit to KMS.

        • A import token that ensures that KMS can decrypt your key material and associate it with the correct KMS key.

        The public key and its import token are permanently linked and must be used together. Each public key and import token set is valid for 24 hours. The expiration date and time appear in the ParametersValidTo field in the GetParametersForImport response. You cannot use an expired public key or import token in an ImportKeyMaterial request. If your key and token expire, send another GetParametersForImport request.

        GetParametersForImport requires the following information:

        • The key ID of the KMS key for which you are importing the key material.

        • The key spec of the public key (\"wrapping key\") that you will use to encrypt your key material during import.

        • The wrapping algorithm that you will use with the public key to encrypt your key material.

        You can use the same or a different public key spec and wrapping algorithm each time you import or reimport the same key material.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:GetParametersForImport (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "GetPublicKey":{ "name":"GetPublicKey", @@ -566,7 +567,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey permission can download the public key of an asymmetric KMS key. You can share the public key to allow others to encrypt messages and verify signatures outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        You do not need to download the public key. Instead, you can use the public key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the public key within KMS, you benefit from the authentication, authorization, and logging that are part of every KMS operation. You also reduce of risk of encrypting data that cannot be decrypted. These features are not effective outside of KMS.

        To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including:

        • KeySpec: The type of key material in the public key, such as RSA_4096 or ECC_NIST_P521.

        • KeyUsage: Whether the key is used for encryption, signing, or deriving a shared secret.

        • EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the signing algorithms for the key.

        Although KMS cannot enforce these restrictions on external operations, it is crucial that you use this information to prevent the public key from being used improperly. For example, you can prevent a public signing key from being used encrypt data, or prevent a public key from being used with an encryption algorithm that is not supported by KMS. You can also avoid errors, such as using the wrong signing algorithm in a verification operation.

        To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GetPublicKey (key policy)

        Related operations: CreateKey

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey permission can download the public key of an asymmetric KMS key. You can share the public key to allow others to encrypt messages and verify signatures outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        You do not need to download the public key. Instead, you can use the public key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the public key within KMS, you benefit from the authentication, authorization, and logging that are part of every KMS operation. You also reduce of risk of encrypting data that cannot be decrypted. These features are not effective outside of KMS.

        To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including:

        Although KMS cannot enforce these restrictions on external operations, it is crucial that you use this information to prevent the public key from being used improperly. For example, you can prevent a public signing key from being used encrypt data, or prevent a public key from being used with an encryption algorithm that is not supported by KMS. You can also avoid errors, such as using the wrong signing algorithm in a verification operation.

        To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:GetPublicKey (key policy)

        Related operations: CreateKey

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ImportKeyMaterial":{ "name":"ImportKeyMaterial", @@ -588,7 +589,7 @@ {"shape":"ExpiredImportTokenException"}, {"shape":"InvalidImportTokenException"} ], - "documentation":"

        Imports or reimports key material into an existing KMS key that was created without key material. ImportKeyMaterial also sets the expiration model and expiration date of the imported key material.

        By default, KMS keys are created with key material that KMS generates. This operation supports Importing key material, an advanced feature that lets you generate and import the cryptographic key material for a KMS key. For more information about importing key material into KMS, see Importing key material in the Key Management Service Developer Guide.

        After you successfully import key material into a KMS key, you can reimport the same key material into that KMS key, but you cannot import different key material. You might reimport key material to replace key material that expired or key material that you deleted. You might also reimport key material to change the expiration model or expiration date of the key material.

        Each time you import key material into KMS, you can determine whether (ExpirationModel) and when (ValidTo) the key material expires. To change the expiration of your key material, you must import it again, either by calling ImportKeyMaterial or using the import features of the KMS console.

        Before calling ImportKeyMaterial:

        • Create or identify a KMS key with no key material. The KMS key must have an Origin value of EXTERNAL, which indicates that the KMS key is designed for imported key material.

          To create an new KMS key for imported key material, call the CreateKey operation with an Origin value of EXTERNAL. You can create a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store.

        • Use the DescribeKey operation to verify that the KeyState of the KMS key is PendingImport, which indicates that the KMS key has no key material.

          If you are reimporting the same key material into an existing KMS key, you might need to call the DeleteImportedKeyMaterial to delete its existing key material.

        • Call the GetParametersForImport operation to get a public key and import token set for importing key material.

        • Use the public key in the GetParametersForImport response to encrypt your key material.

        Then, in an ImportKeyMaterial request, you submit your encrypted key material and import token. When calling this operation, you must specify the following values:

        • The key ID or key ARN of the KMS key to associate with the imported key material. Its Origin must be EXTERNAL and its KeyState must be PendingImport. You cannot perform this operation on a KMS key in a custom key store, or on a KMS key in a different Amazon Web Services account. To get the Origin and KeyState of a KMS key, call DescribeKey.

        • The encrypted key material.

        • The import token that GetParametersForImport returned. You must use a public key and token from the same GetParametersForImport response.

        • Whether the key material expires (ExpirationModel) and, if so, when (ValidTo). For help with this choice, see Setting an expiration time in the Key Management Service Developer Guide.

          If you set an expiration date, KMS deletes the key material from the KMS key on the specified date, making the KMS key unusable. To use the KMS key in cryptographic operations again, you must reimport the same key material. However, you can delete and reimport the key material at any time, including before the key material expires. Each time you reimport, you can eliminate or reset the expiration time.

        When this operation is successful, the key state of the KMS key changes from PendingImport to Enabled, and you can use the KMS key in cryptographic operations.

        If this operation fails, use the exception to help determine the problem. If the error is related to the key material, the import token, or wrapping key, use GetParametersForImport to get a new public key and import token for the KMS key and repeat the import procedure. For help, see How To Import Key Material in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ImportKeyMaterial (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Imports or reimports key material into an existing KMS key that was created without key material. You can also use this operation to set or update the expiration model and expiration date of the imported key material.

        By default, KMS creates KMS keys with key material that it generates. You can also generate and import your own key material. For more information about importing key material, see Importing key material.

        For asymmetric, HMAC and multi-Region keys, you cannot change the key material after the initial import. You can import multiple key materials into single-Region, symmetric encryption keys and rotate the key material on demand using RotateKeyOnDemand.

        After you import key material, you can reimport the same key material into that KMS key or, if the key supports on-demand rotation, import new key material. You can use the ImportType parameter to indicate whether you are importing new key material or re-importing previously imported key material. You might reimport key material to replace key material that expired or key material that you deleted. You might also reimport key material to change the expiration model or expiration date of the key material.

        Each time you import key material into KMS, you can determine whether (ExpirationModel) and when (ValidTo) the key material expires. To change the expiration of your key material, you must import it again, either by calling ImportKeyMaterial or using the import features of the KMS console.

        Before you call ImportKeyMaterial, complete these steps:

        • Create or identify a KMS key with EXTERNAL origin, which indicates that the KMS key is designed for imported key material.

          To create a new KMS key for imported key material, call the CreateKey operation with an Origin value of EXTERNAL. You can create a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, asymmetric key agreement key, or asymmetric signing KMS key. You can also import key material into a multi-Region key of any supported type. However, you can't import key material into a KMS key in a custom key store.

        • Call the GetParametersForImport operation to get a public key and import token set for importing key material.

        • Use the public key in the GetParametersForImport response to encrypt your key material.

        Then, in an ImportKeyMaterial request, you submit your encrypted key material and import token. When calling this operation, you must specify the following values:

        • The key ID or key ARN of the KMS key to associate with the imported key material. Its Origin must be EXTERNAL and its KeyState must be PendingImport. You cannot perform this operation on a KMS key in a custom key store, or on a KMS key in a different Amazon Web Services account. To get the Origin and KeyState of a KMS key, call DescribeKey.

        • The encrypted key material.

        • The import token that GetParametersForImport returned. You must use a public key and token from the same GetParametersForImport response.

        • Whether the key material expires (ExpirationModel) and, if so, when (ValidTo). For help with this choice, see Setting an expiration time in the Key Management Service Developer Guide.

          If you set an expiration date, KMS deletes the key material from the KMS key on the specified date, making the KMS key unusable. To use the KMS key in cryptographic operations again, you must reimport the same key material. However, you can delete and reimport the key material at any time, including before the key material expires. Each time you reimport, you can eliminate or reset the expiration time.

        When this operation is successful, the key state of the KMS key changes from PendingImport to Enabled, and you can use the KMS key in cryptographic operations. For single-Region, symmetric encryption keys, you will need to import all of the key materials associated with the KMS key to change its state to Enabled. Use the ListKeyRotations operation to list the ID and import state of each key material associated with a KMS key.

        If this operation fails, use the exception to help determine the problem. If the error is related to the key material, the import token, or wrapping key, use GetParametersForImport to get a new public key and import token for the KMS key and repeat the import procedure. For help, see Create a KMS key with imported key material in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ImportKeyMaterial (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListAliases":{ "name":"ListAliases", @@ -605,7 +606,7 @@ {"shape":"InvalidArnException"}, {"shape":"NotFoundException"} ], - "documentation":"

        Gets a list of aliases in the caller's Amazon Web Services account and region. For more information about aliases, see CreateAlias.

        By default, the ListAliases operation returns all aliases in the account and region. To get only the aliases associated with a particular KMS key, use the KeyId parameter.

        The ListAliases response can include aliases that you created and associated with your customer managed keys, and aliases that Amazon Web Services created and associated with Amazon Web Services managed keys in your account. You can recognize Amazon Web Services aliases because their names have the format aws/<service-name>, such as aws/dynamodb.

        The response might also include aliases that have no TargetKeyId field. These are predefined aliases that Amazon Web Services has created but has not yet associated with a KMS key. Aliases that Amazon Web Services creates in your account, including predefined aliases, do not count against your KMS aliases quota.

        Cross-account use: No. ListAliases does not return aliases in other Amazon Web Services accounts.

        Required permissions: kms:ListAliases (IAM policy)

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Gets a list of aliases in the caller's Amazon Web Services account and region. For more information about aliases, see CreateAlias.

        By default, the ListAliases operation returns all aliases in the account and region. To get only the aliases associated with a particular KMS key, use the KeyId parameter.

        The ListAliases response can include aliases that you created and associated with your customer managed keys, and aliases that Amazon Web Services created and associated with Amazon Web Services managed keys in your account. You can recognize Amazon Web Services aliases because their names have the format aws/<service-name>, such as aws/dynamodb.

        The response might also include aliases that have no TargetKeyId field. These are predefined aliases that Amazon Web Services has created but has not yet associated with a KMS key. Aliases that Amazon Web Services creates in your account, including predefined aliases, do not count against your KMS aliases quota.

        Cross-account use: No. ListAliases does not return aliases in other Amazon Web Services accounts.

        Required permissions: kms:ListAliases (IAM policy)

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListGrants":{ "name":"ListGrants", @@ -624,7 +625,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Gets a list of all grants for the specified KMS key.

        You must specify the KMS key in all requests. You can filter the grant list by grant ID or grantee principal.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

        The GranteePrincipal field in the ListGrants response usually contains the user or role designated as the grantee principal in the grant. However, when the grantee principal in the grant is an Amazon Web Services service, the GranteePrincipal field contains the service principal, which might represent several different grantee principals.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:ListGrants (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Gets a list of all grants for the specified KMS key.

        You must specify the KMS key in all requests. You can filter the grant list by grant ID or grantee principal.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of creating grants in several programming languages, see Use CreateGrant with an Amazon Web Services SDK or CLI.

        The GranteePrincipal field in the ListGrants response usually contains the user or role designated as the grantee principal in the grant. However, when the grantee principal in the grant is an Amazon Web Services service, the GranteePrincipal field contains the service principal, which might represent several different grantee principals.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:ListGrants (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListKeyPolicies":{ "name":"ListKeyPolicies", @@ -641,7 +642,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Gets the names of the key policies that are attached to a KMS key. This operation is designed to get policy names that you can use in a GetKeyPolicy operation. However, the only valid policy name is default.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListKeyPolicies (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Gets the names of the key policies that are attached to a KMS key. This operation is designed to get policy names that you can use in a GetKeyPolicy operation. However, the only valid policy name is default.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListKeyPolicies (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListKeyRotations":{ "name":"ListKeyRotations", @@ -659,7 +660,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

        Returns information about all completed key material rotations for the specified KMS key.

        You must specify the KMS key in all requests. You can refine the key rotations list by limiting the number of rotations returned.

        For detailed information about automatic and on-demand key rotations, see Rotating KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListKeyRotations (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns information about the key materials associated with the specified KMS key. You can use the optional IncludeKeyMaterial parameter to control which key materials are included in the response.

        You must specify the KMS key in all requests. You can refine the key rotations list by limiting the number of rotations returned.

        For detailed information about automatic and on-demand key rotations, see Rotate KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListKeyRotations (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListKeys":{ "name":"ListKeys", @@ -674,7 +675,7 @@ {"shape":"KMSInternalException"}, {"shape":"InvalidMarkerException"} ], - "documentation":"

        Gets a list of all KMS keys in the caller's Amazon Web Services account and Region.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListKeys (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Gets a list of all KMS keys in the caller's Amazon Web Services account and Region.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListKeys (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListResourceTags":{ "name":"ListResourceTags", @@ -690,7 +691,7 @@ {"shape":"InvalidArnException"}, {"shape":"InvalidMarkerException"} ], - "documentation":"

        Returns all tags on the specified KMS key.

        For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference. For information about using tags in KMS, see Tagging keys.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListResourceTags (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns all tags on the specified KMS key.

        For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference. For information about using tags in KMS, see Tags in KMS.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ListResourceTags (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ListRetirableGrants":{ "name":"ListRetirableGrants", @@ -707,7 +708,7 @@ {"shape":"NotFoundException"}, {"shape":"KMSInternalException"} ], - "documentation":"

        Returns information about all grants in the Amazon Web Services account and Region that have the specified retiring principal.

        You can specify any principal in your Amazon Web Services account. The grants that are returned include grants for KMS keys in your Amazon Web Services account and other Amazon Web Services accounts. You might use this operation to determine which grants you may retire. To retire a grant, use the RetireGrant operation.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

        Cross-account use: You must specify a principal in your Amazon Web Services account. This operation returns a list of grants where the retiring principal specified in the ListRetirableGrants request is the same retiring principal on the grant. This can include grants on KMS keys owned by other Amazon Web Services accounts, but you do not need kms:ListRetirableGrants permission (or any other additional permission) in any Amazon Web Services account other than your own.

        Required permissions: kms:ListRetirableGrants (IAM policy) in your Amazon Web Services account.

        KMS authorizes ListRetirableGrants requests by evaluating the caller account's kms:ListRetirableGrants permissions. The authorized resource in ListRetirableGrants calls is the retiring principal specified in the request. KMS does not evaluate the caller's permissions to verify their access to any KMS keys or grants that might be returned by the ListRetirableGrants call.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Returns information about all grants in the Amazon Web Services account and Region that have the specified retiring principal.

        You can specify any principal in your Amazon Web Services account. The grants that are returned include grants for KMS keys in your Amazon Web Services account and other Amazon Web Services accounts. You might use this operation to determine which grants you may retire. To retire a grant, use the RetireGrant operation.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of creating grants in several programming languages, see Use CreateGrant with an Amazon Web Services SDK or CLI.

        Cross-account use: You must specify a principal in your Amazon Web Services account. This operation returns a list of grants where the retiring principal specified in the ListRetirableGrants request is the same retiring principal on the grant. This can include grants on KMS keys owned by other Amazon Web Services accounts, but you do not need kms:ListRetirableGrants permission (or any other additional permission) in any Amazon Web Services account other than your own.

        Required permissions: kms:ListRetirableGrants (IAM policy) in your Amazon Web Services account.

        KMS authorizes ListRetirableGrants requests by evaluating the caller account's kms:ListRetirableGrants permissions. The authorized resource in ListRetirableGrants calls is the retiring principal specified in the request. KMS does not evaluate the caller's permissions to verify their access to any KMS keys or grants that might be returned by the ListRetirableGrants call.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "PutKeyPolicy":{ "name":"PutKeyPolicy", @@ -726,7 +727,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Attaches a key policy to the specified KMS key.

        For more information about key policies, see Key Policies in the Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide . For examples of adding a key policy in multiple programming languages, see Setting a key policy in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:PutKeyPolicy (key policy)

        Related operations: GetKeyPolicy

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Attaches a key policy to the specified KMS key.

        For more information about key policies, see Key Policies in the Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide . For examples of adding a key policy in multiple programming languages, see Use PutKeyPolicy with an Amazon Web Services SDK or CLI in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:PutKeyPolicy (key policy)

        Related operations: GetKeyPolicy

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ReEncrypt":{ "name":"ReEncrypt", @@ -749,7 +750,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Decrypts ciphertext and then reencrypts it entirely within KMS. You can use this operation to change the KMS key under which data is encrypted, such as when you manually rotate a KMS key or change the KMS key that protects a ciphertext. You can also use it to reencrypt ciphertext under the same KMS key, such as to change the encryption context of a ciphertext.

        The ReEncrypt operation can decrypt ciphertext that was encrypted by using a KMS key in an KMS operation, such as Encrypt or GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the public key of an asymmetric KMS key outside of KMS. However, it cannot decrypt ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

        When you use the ReEncrypt operation, you need to provide information for the decrypt operation and the subsequent encrypt operation.

        • If your ciphertext was encrypted under an asymmetric KMS key, you must use the SourceKeyId parameter to identify the KMS key that encrypted the ciphertext. You must also supply the encryption algorithm that was used. This information is required to decrypt the data.

        • If your ciphertext was encrypted under a symmetric encryption KMS key, the SourceKeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the source KMS key is always recommended as a best practice. When you use the SourceKeyId parameter to specify a KMS key, KMS uses only the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the ReEncrypt operation fails. This practice ensures that you use the KMS key that you intend.

        • To reencrypt the data, you must use the DestinationKeyId parameter to specify the KMS key that re-encrypts the data after it is decrypted. If the destination KMS key is an asymmetric KMS key, you must also provide the encryption algorithm. The algorithm that you choose must be compatible with the KMS key.

          When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

          You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. The source KMS key and destination KMS key can be in different Amazon Web Services accounts. Either or both KMS keys can be in a different account than the caller. To specify a KMS key in a different account, you must use its key ARN or alias ARN.

        Required permissions:

        To permit reencryption from or to a KMS key, include the \"kms:ReEncrypt*\" permission in your key policy. This permission is automatically included in the key policy when you use the console to create a KMS key. But you must include it manually when you create a KMS key programmatically or when you use the PutKeyPolicy operation to set a key policy.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Decrypts ciphertext and then reencrypts it entirely within KMS. You can use this operation to change the KMS key under which data is encrypted, such as when you manually rotate a KMS key or change the KMS key that protects a ciphertext. You can also use it to reencrypt ciphertext under the same KMS key, such as to change the encryption context of a ciphertext.

        The ReEncrypt operation can decrypt ciphertext that was encrypted by using a KMS key in an KMS operation, such as Encrypt or GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the public key of an asymmetric KMS key outside of KMS. However, it cannot decrypt ciphertext produced by other libraries, such as the Amazon Web Services Encryption SDK or Amazon S3 client-side encryption. These libraries return a ciphertext format that is incompatible with KMS.

        When you use the ReEncrypt operation, you need to provide information for the decrypt operation and the subsequent encrypt operation.

        • If your ciphertext was encrypted under an asymmetric KMS key, you must use the SourceKeyId parameter to identify the KMS key that encrypted the ciphertext. You must also supply the encryption algorithm that was used. This information is required to decrypt the data.

        • If your ciphertext was encrypted under a symmetric encryption KMS key, the SourceKeyId parameter is optional. KMS can get this information from metadata that it adds to the symmetric ciphertext blob. This feature adds durability to your implementation by ensuring that authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost track of the key ID. However, specifying the source KMS key is always recommended as a best practice. When you use the SourceKeyId parameter to specify a KMS key, KMS uses only the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the ReEncrypt operation fails. This practice ensures that you use the KMS key that you intend.

        • To reencrypt the data, you must use the DestinationKeyId parameter to specify the KMS key that re-encrypts the data after it is decrypted. If the destination KMS key is an asymmetric KMS key, you must also provide the encryption algorithm. The algorithm that you choose must be compatible with the KMS key.

          When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.

          You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric encryption KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. The source KMS key and destination KMS key can be in different Amazon Web Services accounts. Either or both KMS keys can be in a different account than the caller. To specify a KMS key in a different account, you must use its key ARN or alias ARN.

        Required permissions:

        To permit reencryption from or to a KMS key, include the \"kms:ReEncrypt*\" permission in your key policy. This permission is automatically included in the key policy when you use the console to create a KMS key. But you must include it manually when you create a KMS key programmatically or when you use the PutKeyPolicy operation to set a key policy.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ReplicateKey":{ "name":"ReplicateKey", @@ -771,7 +772,7 @@ {"shape":"TagException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

        Replicates a multi-Region key into the specified Region. This operation creates a multi-Region replica key based on a multi-Region primary key in a different Region of the same Amazon Web Services partition. You can create multiple replicas of a primary key, but each must be in a different Region. To create a multi-Region primary key, use the CreateKey operation.

        This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        A replica key is a fully-functional KMS key that can be used independently of its primary and peer replica keys. A primary key and its replica keys share properties that make them interoperable. They have the same key ID and key material. They also have the same key spec, key usage, key material origin, and automatic key rotation status. KMS automatically synchronizes these shared properties among related multi-Region keys. All other properties of a replica key can differ, including its key policy, tags, aliases, and Key states of KMS keys. KMS pricing and quotas for KMS keys apply to each primary key and replica key.

        When this operation completes, the new replica key has a transient key state of Creating. This key state changes to Enabled (or PendingImport) after a few seconds when the process of creating the new replica key is complete. While the key state is Creating, you can manage key, but you cannot yet use it in cryptographic operations. If you are creating and using the replica key programmatically, retry on KMSInvalidStateException or call DescribeKey to check its KeyState value before using it. For details about the Creating key state, see Key states of KMS keys in the Key Management Service Developer Guide.

        You cannot create more than one replica of a primary key in any Region. If the Region already includes a replica of the key you're trying to replicate, ReplicateKey returns an AlreadyExistsException error. If the key state of the existing replica is PendingDeletion, you can cancel the scheduled key deletion (CancelKeyDeletion) or wait for the key to be deleted. The new replica key you create will have the same shared properties as the original replica key.

        The CloudTrail log of a ReplicateKey operation records a ReplicateKey operation in the primary key's Region and a CreateKey operation in the replica key's Region.

        If you replicate a multi-Region primary key with imported key material, the replica key is created with no key material. You must import the same key material that you imported into the primary key. For details, see Importing key material into multi-Region keys in the Key Management Service Developer Guide.

        To convert a replica key to a primary key, use the UpdatePrimaryRegion operation.

        ReplicateKey uses different default values for the KeyPolicy and Tags parameters than those used in the KMS console. For details, see the parameter descriptions.

        Cross-account use: No. You cannot use this operation to create a replica key in a different Amazon Web Services account.

        Required permissions:

        • kms:ReplicateKey on the primary key (in the primary key's Region). Include this permission in the primary key's key policy.

        • kms:CreateKey in an IAM policy in the replica Region.

        • To use the Tags parameter, kms:TagResource in an IAM policy in the replica Region.

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Replicates a multi-Region key into the specified Region. This operation creates a multi-Region replica key based on a multi-Region primary key in a different Region of the same Amazon Web Services partition. You can create multiple replicas of a primary key, but each must be in a different Region. To create a multi-Region primary key, use the CreateKey operation.

        This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        A replica key is a fully-functional KMS key that can be used independently of its primary and peer replica keys. A primary key and its replica keys share properties that make them interoperable. They have the same key ID and key material. They also have the same key spec, key usage, key material origin, and automatic key rotation status. KMS automatically synchronizes these shared properties among related multi-Region keys. All other properties of a replica key can differ, including its key policy, tags, aliases, and key state. KMS pricing and quotas for KMS keys apply to each primary key and replica key.

        When this operation completes, the new replica key has a transient key state of Creating. This key state changes to Enabled (or PendingImport) after a few seconds when the process of creating the new replica key is complete. While the key state is Creating, you can manage key, but you cannot yet use it in cryptographic operations. If you are creating and using the replica key programmatically, retry on KMSInvalidStateException or call DescribeKey to check its KeyState value before using it. For details about the Creating key state, see Key states of KMS keys in the Key Management Service Developer Guide.

        You cannot create more than one replica of a primary key in any Region. If the Region already includes a replica of the key you're trying to replicate, ReplicateKey returns an AlreadyExistsException error. If the key state of the existing replica is PendingDeletion, you can cancel the scheduled key deletion (CancelKeyDeletion) or wait for the key to be deleted. The new replica key you create will have the same shared properties as the original replica key.

        The CloudTrail log of a ReplicateKey operation records a ReplicateKey operation in the primary key's Region and a CreateKey operation in the replica key's Region.

        If you replicate a multi-Region primary key with imported key material, the replica key is created with no key material. You must import the same key material that you imported into the primary key.

        To convert a replica key to a primary key, use the UpdatePrimaryRegion operation.

        ReplicateKey uses different default values for the KeyPolicy and Tags parameters than those used in the KMS console. For details, see the parameter descriptions.

        Cross-account use: No. You cannot use this operation to create a replica key in a different Amazon Web Services account.

        Required permissions:

        • kms:ReplicateKey on the primary key (in the primary key's Region). Include this permission in the primary key's key policy.

        • kms:CreateKey in an IAM policy in the replica Region.

        • To use the Tags parameter, kms:TagResource in an IAM policy in the replica Region.

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "RetireGrant":{ "name":"RetireGrant", @@ -790,7 +791,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Deletes a grant. Typically, you retire a grant when you no longer need its permissions. To identify the grant to retire, use a grant token, or both the grant ID and a key identifier (key ID or key ARN) of the KMS key. The CreateGrant operation returns both values.

        This operation can be called by the retiring principal for a grant, by the grantee principal if the grant allows the RetireGrant operation, and by the Amazon Web Services account in which the grant is created. It can also be called by principals to whom permission for retiring a grant is delegated. For details, see Retiring and revoking grants in the Key Management Service Developer Guide.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

        Cross-account use: Yes. You can retire a grant on a KMS key in a different Amazon Web Services account.

        Required permissions: Permission to retire a grant is determined primarily by the grant. For details, see Retiring and revoking grants in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Deletes a grant. Typically, you retire a grant when you no longer need its permissions. To identify the grant to retire, use a grant token, or both the grant ID and a key identifier (key ID or key ARN) of the KMS key. The CreateGrant operation returns both values.

        This operation can be called by the retiring principal for a grant, by the grantee principal if the grant allows the RetireGrant operation, and by the Amazon Web Services account in which the grant is created. It can also be called by principals to whom permission for retiring a grant is delegated.

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of creating grants in several programming languages, see Use CreateGrant with an Amazon Web Services SDK or CLI.

        Cross-account use: Yes. You can retire a grant on a KMS key in a different Amazon Web Services account.

        Required permissions: Permission to retire a grant is determined primarily by the grant. For details, see Retiring and revoking grants in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "RevokeGrant":{ "name":"RevokeGrant", @@ -808,7 +809,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Deletes the specified grant. You revoke a grant to terminate the permissions that the grant allows. For more information, see Retiring and revoking grants in the Key Management Service Developer Guide .

        When you create, retire, or revoke a grant, there might be a brief delay, usually less than five minutes, until the grant is available throughout KMS. This state is known as eventual consistency. For details, see Eventual consistency in the Key Management Service Developer Guide .

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of working with grants in several programming languages, see Programming grants.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:RevokeGrant (key policy).

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Deletes the specified grant. You revoke a grant to terminate the permissions that the grant allows. For more information, see Retiring and revoking grants in the Key Management Service Developer Guide .

        When you create, retire, or revoke a grant, there might be a brief delay, usually less than five minutes, until the grant is available throughout KMS. This state is known as eventual consistency. For details, see Eventual consistency in the Key Management Service Developer Guide .

        For detailed information about grants, including grant terminology, see Grants in KMS in the Key Management Service Developer Guide . For examples of creating grants in several programming languages, see Use CreateGrant with an Amazon Web Services SDK or CLI.

        Cross-account use: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key ARN in the value of the KeyId parameter.

        Required permissions: kms:RevokeGrant (key policy).

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "RotateKeyOnDemand":{ "name":"RotateKeyOnDemand", @@ -829,7 +830,7 @@ {"shape":"LimitExceededException"}, {"shape":"ConflictException"} ], - "documentation":"

        Immediately initiates rotation of the key material of the specified symmetric encryption KMS key.

        You can perform on-demand rotation of the key material in customer managed KMS keys, regardless of whether or not automatic key rotation is enabled. On-demand rotations do not change existing automatic rotation schedules. For example, consider a KMS key that has automatic key rotation enabled with a rotation period of 730 days. If the key is scheduled to automatically rotate on April 14, 2024, and you perform an on-demand rotation on April 10, 2024, the key will automatically rotate, as scheduled, on April 14, 2024 and every 730 days thereafter.

        You can perform on-demand key rotation a maximum of 10 times per KMS key. You can use the KMS console to view the number of remaining on-demand rotations available for a KMS key.

        You can use GetKeyRotationStatus to identify any in progress on-demand rotations. You can use ListKeyRotations to identify the date that completed on-demand rotations were performed. You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch.

        On-demand key rotation is supported only on symmetric encryption KMS keys. You cannot perform on-demand rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To perform on-demand rotation of a set of related multi-Region keys, invoke the on-demand rotation on the primary key.

        You cannot initiate on-demand rotation of Amazon Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services managed keys every year. Rotation of Amazon Web Services owned KMS keys is managed by the Amazon Web Services service that owns the key.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:RotateKeyOnDemand (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Immediately initiates rotation of the key material of the specified symmetric encryption KMS key.

        You can perform on-demand rotation of the key material in customer managed KMS keys, regardless of whether or not automatic key rotation is enabled. On-demand rotations do not change existing automatic rotation schedules. For example, consider a KMS key that has automatic key rotation enabled with a rotation period of 730 days. If the key is scheduled to automatically rotate on April 14, 2024, and you perform an on-demand rotation on April 10, 2024, the key will automatically rotate, as scheduled, on April 14, 2024 and every 730 days thereafter.

        You can perform on-demand key rotation a maximum of 10 times per KMS key. You can use the KMS console to view the number of remaining on-demand rotations available for a KMS key.

        You can use GetKeyRotationStatus to identify any in progress on-demand rotations. You can use ListKeyRotations to identify the date that completed on-demand rotations were performed. You can monitor rotation of the key material for your KMS keys in CloudTrail and Amazon CloudWatch.

        On-demand key rotation is supported only on symmetric encryption KMS keys. You cannot perform on-demand rotation of asymmetric KMS keys, HMAC KMS keys, multi-Region KMS keys with imported key material, or KMS keys in a custom key store. When you initiate on-demand key rotation on a symmetric encryption KMS key with imported key material, you must have already imported new key material and that key material's state should be PENDING_ROTATION. Use the ListKeyRotations operation to check the state of all key materials associated with a KMS key. To perform on-demand rotation of a set of related multi-Region keys, invoke the on-demand rotation on the primary key.

        You cannot initiate on-demand rotation of Amazon Web Services managed KMS keys. KMS always rotates the key material of Amazon Web Services managed keys every year. Rotation of Amazon Web Services owned KMS keys is managed by the Amazon Web Services service that owns the key.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:RotateKeyOnDemand (key policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "ScheduleKeyDeletion":{ "name":"ScheduleKeyDeletion", @@ -846,7 +847,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Schedules the deletion of a KMS key. By default, KMS applies a waiting period of 30 days, but you can specify a waiting period of 7-30 days. When this operation is successful, the key state of the KMS key changes to PendingDeletion and the key can't be used in any cryptographic operations. It remains in this state for the duration of the waiting period. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key, its key material, and all KMS data associated with it, including all aliases that refer to it.

        Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is a multi-Region replica key, or an asymmetric or HMAC KMS key with imported key material.) To prevent the use of a KMS key without deleting it, use DisableKey.

        You can schedule the deletion of a multi-Region primary key and its replica keys at any time. However, KMS will not delete a multi-Region primary key with existing replica keys. If you schedule the deletion of a primary key with replicas, its key state changes to PendingReplicaDeletion and it cannot be replicated or used in cryptographic operations. This status can continue indefinitely. When the last of its replicas keys is deleted (not just scheduled), the key state of the primary key changes to PendingDeletion and its waiting period (PendingWindowInDays) begins. For details, see Deleting multi-Region keys in the Key Management Service Developer Guide.

        When KMS deletes a KMS key from an CloudHSM key store, it makes a best effort to delete the associated key material from the associated CloudHSM cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups. Deleting a KMS key from an external key store has no effect on the associated external key. However, for both types of custom key stores, deleting a KMS key is destructive and irreversible. You cannot decrypt ciphertext encrypted under the KMS key by using only its associated external key or CloudHSM key. Also, you cannot recreate a KMS key in an external key store by creating a new KMS key with the same key material.

        For more information about scheduling a KMS key for deletion, see Deleting KMS keys in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ScheduleKeyDeletion (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Schedules the deletion of a KMS key. By default, KMS applies a waiting period of 30 days, but you can specify a waiting period of 7-30 days. When this operation is successful, the key state of the KMS key changes to PendingDeletion and the key can't be used in any cryptographic operations. It remains in this state for the duration of the waiting period. Before the waiting period ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key, its key material, and all KMS data associated with it, including all aliases that refer to it.

        Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is a multi-Region replica key, or an asymmetric or HMAC KMS key with imported key material.) To prevent the use of a KMS key without deleting it, use DisableKey.

        You can schedule the deletion of a multi-Region primary key and its replica keys at any time. However, KMS will not delete a multi-Region primary key with existing replica keys. If you schedule the deletion of a primary key with replicas, its key state changes to PendingReplicaDeletion and it cannot be replicated or used in cryptographic operations. This status can continue indefinitely. When the last of its replicas keys is deleted (not just scheduled), the key state of the primary key changes to PendingDeletion and its waiting period (PendingWindowInDays) begins. For details, see Deleting multi-Region keys in the Key Management Service Developer Guide.

        When KMS deletes a KMS key from an CloudHSM key store, it makes a best effort to delete the associated key material from the associated CloudHSM cluster. However, you might need to manually delete the orphaned key material from the cluster and its backups. Deleting a KMS key from an external key store has no effect on the associated external key. However, for both types of custom key stores, deleting a KMS key is destructive and irreversible. You cannot decrypt ciphertext encrypted under the KMS key by using only its associated external key or CloudHSM key. Also, you cannot recreate a KMS key in an external key store by creating a new KMS key with the same key material.

        For more information about scheduling a KMS key for deletion, see Deleting KMS keys in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:ScheduleKeyDeletion (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "Sign":{ "name":"Sign", @@ -867,7 +868,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Creates a digital signature for a message or message digest by using the private key in an asymmetric signing KMS key. To verify the signature, use the Verify operation, or use the public key in the same asymmetric KMS key outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        Digital signatures are generated and verified by using asymmetric key pair, such as an RSA or ECC pair that is represented by an asymmetric KMS key. The key owner (or an authorized user) uses their private key to sign a message. Anyone with the public key can verify that the message was signed with that particular private key and that the message hasn't changed since it was signed.

        To use the Sign operation, provide the following information:

        • Use the KeyId parameter to identify an asymmetric KMS key with a KeyUsage value of SIGN_VERIFY. To get the KeyUsage value of a KMS key, use the DescribeKey operation. The caller must have kms:Sign permission on the KMS key.

        • Use the Message parameter to specify the message or message digest to sign. You can submit messages of up to 4096 bytes. To sign a larger message, generate a hash digest of the message, and then provide the hash digest in the Message parameter. To indicate whether the message is a full message or a digest, use the MessageType parameter.

        • Choose a signing algorithm that is compatible with the KMS key.

        When signing a message, be sure to record the KMS key and the signing algorithm. This information is required to verify the signature.

        Best practices recommend that you limit the time during which any signature is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. Signatures do not include a timestamp, but you can include a timestamp in the signed message to help you detect when its time to refresh the signature.

        To verify the signature that this operation generates, use the Verify operation. Or use the GetPublicKey operation to download the public key and then use the public key to verify the signature outside of KMS.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:Sign (key policy)

        Related operations: Verify

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Creates a digital signature for a message or message digest by using the private key in an asymmetric signing KMS key. To verify the signature, use the Verify operation, or use the public key in the same asymmetric KMS key outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        Digital signatures are generated and verified by using asymmetric key pair, such as an RSA, ECC, or ML-DSA pair that is represented by an asymmetric KMS key. The key owner (or an authorized user) uses their private key to sign a message. Anyone with the public key can verify that the message was signed with that particular private key and that the message hasn't changed since it was signed.

        To use the Sign operation, provide the following information:

        • Use the KeyId parameter to identify an asymmetric KMS key with a KeyUsage value of SIGN_VERIFY. To get the KeyUsage value of a KMS key, use the DescribeKey operation. The caller must have kms:Sign permission on the KMS key.

        • Use the Message parameter to specify the message or message digest to sign. You can submit messages of up to 4096 bytes. To sign a larger message, generate a hash digest of the message, and then provide the hash digest in the Message parameter. To indicate whether the message is a full message, a digest, or an ML-DSA EXTERNAL_MU, use the MessageType parameter.

        • Choose a signing algorithm that is compatible with the KMS key.

        When signing a message, be sure to record the KMS key and the signing algorithm. This information is required to verify the signature.

        Best practices recommend that you limit the time during which any signature is effective. This deters an attack where the actor uses a signed message to establish validity repeatedly or long after the message is superseded. Signatures do not include a timestamp, but you can include a timestamp in the signed message to help you detect when its time to refresh the signature.

        To verify the signature that this operation generates, use the Verify operation. Or use the GetPublicKey operation to download the public key and then use the public key to verify the signature outside of KMS.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:Sign (key policy)

        Related operations: Verify

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "TagResource":{ "name":"TagResource", @@ -884,7 +885,7 @@ {"shape":"LimitExceededException"}, {"shape":"TagException"} ], - "documentation":"

        Adds or edits tags on a customer managed key.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        Each tag consists of a tag key and a tag value, both of which are case-sensitive strings. The tag value can be an empty (null) string. To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag key and a new tag value.

        You can use this operation to tag a customer managed key, but you cannot tag an Amazon Web Services managed key, an Amazon Web Services owned key, a custom key store, or an alias.

        You can also add tags to a KMS key while creating it (CreateKey) or replicating it (ReplicateKey).

        For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:TagResource (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Adds or edits tags on a customer managed key.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        Each tag consists of a tag key and a tag value, both of which are case-sensitive strings. The tag value can be an empty (null) string. To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag key and a new tag value.

        You can use this operation to tag a customer managed key, but you cannot tag an Amazon Web Services managed key, an Amazon Web Services owned key, a custom key store, or an alias.

        You can also add tags to a KMS key while creating it (CreateKey) or replicating it (ReplicateKey).

        For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:TagResource (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "UntagResource":{ "name":"UntagResource", @@ -900,7 +901,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"TagException"} ], - "documentation":"

        Deletes tags from a customer managed key. To delete a tag, specify the tag key and the KMS key.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        When it succeeds, the UntagResource operation doesn't return any output. Also, if the specified tag key isn't found on the KMS key, it doesn't throw an exception or return a response. To confirm that the operation worked, use the ListResourceTags operation.

        For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:UntagResource (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Deletes tags from a customer managed key. To delete a tag, specify the tag key and the KMS key.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        When it succeeds, the UntagResource operation doesn't return any output. Also, if the specified tag key isn't found on the KMS key, it doesn't throw an exception or return a response. To confirm that the operation worked, use the ListResourceTags operation.

        For information about using tags in KMS, see Tagging keys. For general information about tags, including the format and syntax, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:UntagResource (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "UpdateAlias":{ "name":"UpdateAlias", @@ -916,7 +917,7 @@ {"shape":"LimitExceededException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Associates an existing KMS alias with a different KMS key. Each alias is associated with only one KMS key at a time, although a KMS key can have multiple aliases. The alias and the KMS key must be in the same Amazon Web Services account and Region.

        Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        The current and new KMS key must be the same type (both symmetric or both asymmetric or both HMAC), and they must have the same key usage. This restriction prevents errors in code that uses aliases. If you must assign an alias to a different type of KMS key, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

        You cannot use UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

        Because an alias is not a property of a KMS key, you can create, update, and delete the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys in the account, use the ListAliases operation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Associates an existing KMS alias with a different KMS key. Each alias is associated with only one KMS key at a time, although a KMS key can have multiple aliases. The alias and the KMS key must be in the same Amazon Web Services account and Region.

        Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        The current and new KMS key must be the same type (both symmetric or both asymmetric or both HMAC), and they must have the same key usage. This restriction prevents errors in code that uses aliases. If you must assign an alias to a different type of KMS key, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

        You cannot use UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to delete the old alias and CreateAlias to create a new alias.

        Because an alias is not a property of a KMS key, you can create, update, and delete the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all KMS keys in the account, use the ListAliases operation.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions

        For details, see Controlling access to aliases in the Key Management Service Developer Guide.

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "UpdateCustomKeyStore":{ "name":"UpdateCustomKeyStore", @@ -945,7 +946,7 @@ {"shape":"XksProxyInvalidResponseException"}, {"shape":"XksProxyInvalidConfigurationException"} ], - "documentation":"

        Changes the properties of a custom key store. You can use this operation to change the properties of an CloudHSM key store or an external key store.

        Use the required CustomKeyStoreId parameter to identify the custom key store. Use the remaining optional parameters to change its properties. This operation does not return any property values. To verify the updated property values, use the DescribeCustomKeyStores operation.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        When updating the properties of an external key store, verify that the updated settings connect your key store, via the external key store proxy, to the same external key manager as the previous settings, or to a backup or snapshot of the external key manager with the same cryptographic keys. If the updated connection settings fail, you can fix them and retry, although an extended delay might disrupt Amazon Web Services services. However, if KMS permanently loses its access to cryptographic keys, ciphertext encrypted under those keys is unrecoverable.

        For external key stores:

        Some external key managers provide a simpler method for updating an external key store. For details, see your external key manager documentation.

        When updating an external key store in the KMS console, you can upload a JSON-based proxy configuration file with the desired values. You cannot upload the proxy configuration file to the UpdateCustomKeyStore operation. However, you can use the file to help you determine the correct values for the UpdateCustomKeyStore parameters.

        For an CloudHSM key store, you can use this operation to change the custom key store friendly name (NewCustomKeyStoreName), to tell KMS about a change to the kmsuser crypto user password (KeyStorePassword), or to associate the custom key store with a different, but related, CloudHSM cluster (CloudHsmClusterId). To update any property of an CloudHSM key store, the ConnectionState of the CloudHSM key store must be DISCONNECTED.

        For an external key store, you can use this operation to change the custom key store friendly name (NewCustomKeyStoreName), or to tell KMS about a change to the external key store proxy authentication credentials (XksProxyAuthenticationCredential), connection method (XksProxyConnectivity), external proxy endpoint (XksProxyUriEndpoint) and path (XksProxyUriPath). For external key stores with an XksProxyConnectivity of VPC_ENDPOINT_SERVICE, you can also update the Amazon VPC endpoint service name (XksProxyVpcEndpointServiceName). To update most properties of an external key store, the ConnectionState of the external key store must be DISCONNECTED. However, you can update the CustomKeyStoreName, XksProxyAuthenticationCredential, and XksProxyUriPath of an external key store when it is in the CONNECTED or DISCONNECTED state.

        If your update requires a DISCONNECTED state, before using UpdateCustomKeyStore, use the DisconnectCustomKeyStore operation to disconnect the custom key store. After the UpdateCustomKeyStore operation completes, use the ConnectCustomKeyStore to reconnect the custom key store. To find the ConnectionState of the custom key store, use the DescribeCustomKeyStores operation.

        Before updating the custom key store, verify that the new values allow KMS to connect the custom key store to its backing key store. For example, before you change the XksProxyUriPath value, verify that the external key store proxy is reachable at the new path.

        If the operation succeeds, it returns a JSON object with no properties.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:UpdateCustomKeyStore (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Changes the properties of a custom key store. You can use this operation to change the properties of an CloudHSM key store or an external key store.

        Use the required CustomKeyStoreId parameter to identify the custom key store. Use the remaining optional parameters to change its properties. This operation does not return any property values. To verify the updated property values, use the DescribeCustomKeyStores operation.

        This operation is part of the custom key stores feature in KMS, which combines the convenience and extensive integration of KMS with the isolation and control of a key store that you own and manage.

        When updating the properties of an external key store, verify that the updated settings connect your key store, via the external key store proxy, to the same external key manager as the previous settings, or to a backup or snapshot of the external key manager with the same cryptographic keys. If the updated connection settings fail, you can fix them and retry, although an extended delay might disrupt Amazon Web Services services. However, if KMS permanently loses its access to cryptographic keys, ciphertext encrypted under those keys is unrecoverable.

        For external key stores:

        Some external key managers provide a simpler method for updating an external key store. For details, see your external key manager documentation.

        When updating an external key store in the KMS console, you can upload a JSON-based proxy configuration file with the desired values. You cannot upload the proxy configuration file to the UpdateCustomKeyStore operation. However, you can use the file to help you determine the correct values for the UpdateCustomKeyStore parameters.

        For an CloudHSM key store, you can use this operation to change the custom key store friendly name (NewCustomKeyStoreName), to tell KMS about a change to the kmsuser crypto user password (KeyStorePassword), or to associate the custom key store with a different, but related, CloudHSM cluster (CloudHsmClusterId). To update any property of an CloudHSM key store, the ConnectionState of the CloudHSM key store must be DISCONNECTED.

        For an external key store, you can use this operation to change the custom key store friendly name (NewCustomKeyStoreName), or to tell KMS about a change to the external key store proxy authentication credentials (XksProxyAuthenticationCredential), connection method (XksProxyConnectivity), external proxy endpoint (XksProxyUriEndpoint) and path (XksProxyUriPath). For external key stores with an XksProxyConnectivity of VPC_ENDPOINT_SERVICE, you can also update the Amazon VPC endpoint service name (XksProxyVpcEndpointServiceName). To update most properties of an external key store, the ConnectionState of the external key store must be DISCONNECTED. However, you can update the CustomKeyStoreName, XksProxyAuthenticationCredential, and XksProxyUriPath of an external key store when it is in the CONNECTED or DISCONNECTED state.

        If your update requires a DISCONNECTED state, before using UpdateCustomKeyStore, use the DisconnectCustomKeyStore operation to disconnect the custom key store. After the UpdateCustomKeyStore operation completes, use the ConnectCustomKeyStore to reconnect the custom key store. To find the ConnectionState of the custom key store, use the DescribeCustomKeyStores operation.

        Before updating the custom key store, verify that the new values allow KMS to connect the custom key store to its backing key store. For example, before you change the XksProxyUriPath value, verify that the external key store proxy is reachable at the new path.

        If the operation succeeds, it returns a JSON object with no properties.

        Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.

        Required permissions: kms:UpdateCustomKeyStore (IAM policy)

        Related operations:

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "UpdateKeyDescription":{ "name":"UpdateKeyDescription", @@ -961,7 +962,7 @@ {"shape":"KMSInternalException"}, {"shape":"KMSInvalidStateException"} ], - "documentation":"

        Updates the description of a KMS key. To see the description of a KMS key, use DescribeKey.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:UpdateKeyDescription (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Updates the description of a KMS key. To see the description of a KMS key, use DescribeKey.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.

        Required permissions: kms:UpdateKeyDescription (key policy)

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "UpdatePrimaryRegion":{ "name":"UpdatePrimaryRegion", @@ -978,7 +979,7 @@ {"shape":"NotFoundException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

        Changes the primary key of a multi-Region key.

        This operation changes the replica key in the specified Region to a primary key and changes the former primary key to a replica key. For example, suppose you have a primary key in us-east-1 and a replica key in eu-west-2. If you run UpdatePrimaryRegion with a PrimaryRegion value of eu-west-2, the primary key is now the key in eu-west-2, and the key in us-east-1 becomes a replica key. For details, see Updating the primary Region in the Key Management Service Developer Guide.

        This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        The primary key of a multi-Region key is the source for properties that are always shared by primary and replica keys, including the key material, key ID, key spec, key usage, key material origin, and automatic key rotation. It's the only key that can be replicated. You cannot delete the primary key until all replica keys are deleted.

        The key ID and primary Region that you specify uniquely identify the replica key that will become the primary key. The primary Region must already have a replica key. This operation does not create a KMS key in the specified Region. To find the replica keys, use the DescribeKey operation on the primary key or any replica key. To create a replica key, use the ReplicateKey operation.

        You can run this operation while using the affected multi-Region keys in cryptographic operations. This operation should not delay, interrupt, or cause failures in cryptographic operations.

        Even after this operation completes, the process of updating the primary Region might still be in progress for a few more seconds. Operations such as DescribeKey might display both the old and new primary keys as replicas. The old and new primary keys have a transient key state of Updating. The original key state is restored when the update is complete. While the key state is Updating, you can use the keys in cryptographic operations, but you cannot replicate the new primary key or perform certain management operations, such as enabling or disabling these keys. For details about the Updating key state, see Key states of KMS keys in the Key Management Service Developer Guide.

        This operation does not return any output. To verify that primary key is changed, use the DescribeKey operation.

        Cross-account use: No. You cannot use this operation in a different Amazon Web Services account.

        Required permissions:

        • kms:UpdatePrimaryRegion on the current primary key (in the primary key's Region). Include this permission primary key's key policy.

        • kms:UpdatePrimaryRegion on the current replica key (in the replica key's Region). Include this permission in the replica key's key policy.

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Changes the primary key of a multi-Region key.

        This operation changes the replica key in the specified Region to a primary key and changes the former primary key to a replica key. For example, suppose you have a primary key in us-east-1 and a replica key in eu-west-2. If you run UpdatePrimaryRegion with a PrimaryRegion value of eu-west-2, the primary key is now the key in eu-west-2, and the key in us-east-1 becomes a replica key. For details, see Change the primary key in a set of multi-Region keys in the Key Management Service Developer Guide.

        This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

        The primary key of a multi-Region key is the source for properties that are always shared by primary and replica keys, including the key material, key ID, key spec, key usage, key material origin, and automatic key rotation. It's the only key that can be replicated. You cannot delete the primary key until all replica keys are deleted.

        The key ID and primary Region that you specify uniquely identify the replica key that will become the primary key. The primary Region must already have a replica key. This operation does not create a KMS key in the specified Region. To find the replica keys, use the DescribeKey operation on the primary key or any replica key. To create a replica key, use the ReplicateKey operation.

        You can run this operation while using the affected multi-Region keys in cryptographic operations. This operation should not delay, interrupt, or cause failures in cryptographic operations.

        Even after this operation completes, the process of updating the primary Region might still be in progress for a few more seconds. Operations such as DescribeKey might display both the old and new primary keys as replicas. The old and new primary keys have a transient key state of Updating. The original key state is restored when the update is complete. While the key state is Updating, you can use the keys in cryptographic operations, but you cannot replicate the new primary key or perform certain management operations, such as enabling or disabling these keys. For details about the Updating key state, see Key states of KMS keys in the Key Management Service Developer Guide.

        This operation does not return any output. To verify that primary key is changed, use the DescribeKey operation.

        Cross-account use: No. You cannot use this operation in a different Amazon Web Services account.

        Required permissions:

        • kms:UpdatePrimaryRegion on the current primary key (in the primary key's Region). Include this permission primary key's key policy.

        • kms:UpdatePrimaryRegion on the current replica key (in the replica key's Region). Include this permission in the replica key's key policy.

        Related operations

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "Verify":{ "name":"Verify", @@ -1000,7 +1001,7 @@ {"shape":"KMSInvalidSignatureException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Verifies a digital signature that was generated by the Sign operation.

        Verification confirms that an authorized user signed the message with the specified KMS key and signing algorithm, and the message hasn't changed since it was signed. If the signature is verified, the value of the SignatureValid field in the response is True. If the signature verification fails, the Verify operation fails with an KMSInvalidSignatureException exception.

        A digital signature is generated by using the private key in an asymmetric KMS key. The signature is verified by using the public key in the same asymmetric KMS key. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        To use the Verify operation, specify the same asymmetric KMS key, message, and signing algorithm that were used to produce the signature. The message type does not need to be the same as the one used for signing, but it must indicate whether the value of the Message parameter should be hashed as part of the verification process.

        You can also verify the digital signature by using the public key of the KMS key outside of KMS. Use the GetPublicKey operation to download the public key in the asymmetric KMS key and then use the public key to verify the signature outside of KMS. The advantage of using the Verify operation is that it is performed within KMS. As a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use the KMS key to verify signatures.

        To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:Verify (key policy)

        Related operations: Sign

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Verifies a digital signature that was generated by the Sign operation.

        Verification confirms that an authorized user signed the message with the specified KMS key and signing algorithm, and the message hasn't changed since it was signed. If the signature is verified, the value of the SignatureValid field in the response is True. If the signature verification fails, the Verify operation fails with an KMSInvalidSignatureException exception.

        A digital signature is generated by using the private key in an asymmetric KMS key. The signature is verified by using the public key in the same asymmetric KMS key. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

        To use the Verify operation, specify the same asymmetric KMS key, message, and signing algorithm that were used to produce the signature. The message type does not need to be the same as the one used for signing, but it must indicate whether the value of the Message parameter should be hashed as part of the verification process.

        You can also verify the digital signature by using the public key of the KMS key outside of KMS. Use the GetPublicKey operation to download the public key in the asymmetric KMS key and then use the public key to verify the signature outside of KMS. The advantage of using the Verify operation is that it is performed within KMS. As a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use the KMS key to verify signatures.

        To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:Verify (key policy)

        Related operations: Sign

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " }, "VerifyMac":{ "name":"VerifyMac", @@ -1021,7 +1022,7 @@ {"shape":"KMSInvalidStateException"}, {"shape":"DryRunOperationException"} ], - "documentation":"

        Verifies the hash-based message authentication code (HMAC) for a specified message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac computes an HMAC using the message, HMAC KMS key, and MAC algorithm that you specify, and compares the computed HMAC to the HMAC that you specify. If the HMACs are identical, the verification succeeds; otherwise, it fails. Verification indicates that the message hasn't changed since the HMAC was calculated, and the specified key was used to generate and verify the HMAC.

        HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104.

        This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:VerifyMac (key policy)

        Related operations: GenerateMac

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " + "documentation":"

        Verifies the hash-based message authentication code (HMAC) for a specified message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac computes an HMAC using the message, HMAC KMS key, and MAC algorithm that you specify, and compares the computed HMAC to the HMAC that you specify. If the HMACs are identical, the verification succeeds; otherwise, it fails. Verification indicates that the message hasn't changed since the HMAC was calculated, and the specified key was used to generate and verify the HMAC.

        HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry standards defined in RFC 2104.

        This operation is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS in the Key Management Service Developer Guide.

        The KMS key that you use for this operation must be in a compatible key state. For details, see Key states of KMS keys in the Key Management Service Developer Guide.

        Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter.

        Required permissions: kms:VerifyMac (key policy)

        Related operations: GenerateMac

        Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency.

        " } }, "shapes":{ @@ -1091,6 +1092,18 @@ "max":262144, "min":1 }, + "BackingKeyIdResponseType":{ + "type":"string", + "max":64, + "min":0, + "pattern":"^[a-f0-9]+$" + }, + "BackingKeyIdType":{ + "type":"string", + "max":64, + "min":64, + "pattern":"^[a-f0-9]+$" + }, "BooleanType":{"type":"boolean"}, "CancelKeyDeletionRequest":{ "type":"structure", @@ -1182,8 +1195,7 @@ }, "ConnectCustomKeyStoreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ConnectionErrorCodeType":{ "type":"string", @@ -1231,7 +1243,7 @@ }, "TargetKeyId":{ "shape":"KeyIdType", - "documentation":"

        Associates the alias with the specified customer managed key. The KMS key must be in the same Amazon Web Services Region.

        A valid key ID is required. If you supply a null or empty string value, this operation returns an error.

        For help finding the key ID and ARN, see Finding the Key ID and ARN in the Key Management Service Developer Guide .

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " + "documentation":"

        Associates the alias with the specified customer managed key. The KMS key must be in the same Amazon Web Services Region.

        A valid key ID is required. If you supply a null or empty string value, this operation returns an error.

        For help finding the key ID and ARN, see Find the key ID and key ARN in the Key Management Service Developer Guide .

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " } } }, @@ -1253,7 +1265,7 @@ }, "KeyStorePassword":{ "shape":"KeyStorePasswordType", - "documentation":"

        Specifies the kmsuser password for an CloudHSM key store. This parameter is required for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.

        Enter the password of the kmsuser crypto user (CU) account in the specified CloudHSM cluster. KMS logs into the cluster as this user to manage key material on your behalf.

        The password must be a string of 7 to 32 characters. Its value is case sensitive.

        This parameter tells KMS the kmsuser account password; it does not change the password in the CloudHSM cluster.

        " + "documentation":"

        Specifies the kmsuser password for an CloudHSM key store. This parameter is required for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.

        Enter the password of the kmsuser crypto user (CU) account in the specified CloudHSM cluster. KMS logs into the cluster as this user to manage key material on your behalf.

        The password must be a string of 7 to 32 characters. Its value is case sensitive.

        This parameter tells KMS the kmsuser account password; it does not change the password in the CloudHSM cluster.

        " }, "CustomKeyStoreType":{ "shape":"CustomKeyStoreType", @@ -1273,11 +1285,11 @@ }, "XksProxyAuthenticationCredential":{ "shape":"XksProxyAuthenticationCredentialType", - "documentation":"

        Specifies an authentication credential for the external key store proxy (XKS proxy). This parameter is required for all custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

        The XksProxyAuthenticationCredential has two required elements: RawSecretAccessKey, a secret key, and AccessKeyId, a unique identifier for the RawSecretAccessKey. For character requirements, see XksProxyAuthenticationCredentialType.

        KMS uses this authentication credential to sign requests to the external key store proxy on your behalf. This credential is unrelated to Identity and Access Management (IAM) and Amazon Web Services credentials.

        This parameter doesn't set or change the authentication credentials on the XKS proxy. It just tells KMS the credential that you established on your external key store proxy. If you rotate your proxy authentication credential, use the UpdateCustomKeyStore operation to provide the new credential to KMS.

        " + "documentation":"

        Specifies an authentication credential for the external key store proxy (XKS proxy). This parameter is required for all custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

        The XksProxyAuthenticationCredential has two required elements: RawSecretAccessKey, a secret key, and AccessKeyId, a unique identifier for the RawSecretAccessKey. For character requirements, see XksProxyAuthenticationCredentialType.

        KMS uses this authentication credential to sign requests to the external key store proxy on your behalf. This credential is unrelated to Identity and Access Management (IAM) and Amazon Web Services credentials.

        This parameter doesn't set or change the authentication credentials on the XKS proxy. It just tells KMS the credential that you established on your external key store proxy. If you rotate your proxy authentication credential, use the UpdateCustomKeyStore operation to provide the new credential to KMS.

        " }, "XksProxyConnectivity":{ "shape":"XksProxyConnectivityType", - "documentation":"

        Indicates how KMS communicates with the external key store proxy. This parameter is required for custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

        If the external key store proxy uses a public endpoint, specify PUBLIC_ENDPOINT. If the external key store proxy uses a Amazon VPC endpoint service for communication with KMS, specify VPC_ENDPOINT_SERVICE. For help making this choice, see Choosing a connectivity option in the Key Management Service Developer Guide.

        An Amazon VPC endpoint service keeps your communication with KMS in a private address space entirely within Amazon Web Services, but it requires more configuration, including establishing a Amazon VPC with multiple subnets, a VPC endpoint service, a network load balancer, and a verified private DNS name. A public endpoint is simpler to set up, but it might be slower and might not fulfill your security requirements. You might consider testing with a public endpoint, and then establishing a VPC endpoint service for production tasks. Note that this choice does not determine the location of the external key store proxy. Even if you choose a VPC endpoint service, the proxy can be hosted within the VPC or outside of Amazon Web Services such as in your corporate data center.

        " + "documentation":"

        Indicates how KMS communicates with the external key store proxy. This parameter is required for custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.

        If the external key store proxy uses a public endpoint, specify PUBLIC_ENDPOINT. If the external key store proxy uses a Amazon VPC endpoint service for communication with KMS, specify VPC_ENDPOINT_SERVICE. For help making this choice, see Choosing a connectivity option in the Key Management Service Developer Guide.

        An Amazon VPC endpoint service keeps your communication with KMS in a private address space entirely within Amazon Web Services, but it requires more configuration, including establishing a Amazon VPC with multiple subnets, a VPC endpoint service, a network load balancer, and a verified private DNS name. A public endpoint is simpler to set up, but it might be slower and might not fulfill your security requirements. You might consider testing with a public endpoint, and then establishing a VPC endpoint service for production tasks. Note that this choice does not determine the location of the external key store proxy. Even if you choose a VPC endpoint service, the proxy can be hosted within the VPC or outside of Amazon Web Services such as in your corporate data center.

        " } } }, @@ -1308,7 +1320,7 @@ }, "RetiringPrincipal":{ "shape":"PrincipalIdType", - "documentation":"

        The principal that has permission to use the RetireGrant operation to retire the grant.

        To specify the principal, use the Amazon Resource Name (ARN) of an Amazon Web Services principal. Valid principals include Amazon Web Services accounts, IAM users, IAM roles, federated users, and assumed role users. For help with the ARN syntax for a principal, see IAM ARNs in the Identity and Access Management User Guide .

        The grant determines the retiring principal. Other principals might have permission to retire the grant or revoke the grant. For details, see RevokeGrant and Retiring and revoking grants in the Key Management Service Developer Guide.

        " + "documentation":"

        The principal that has permission to use the RetireGrant operation to retire the grant.

        To specify the principal, use the Amazon Resource Name (ARN) of an Amazon Web Services principal. Valid principals include Amazon Web Services accounts, IAM users, IAM roles, federated users, and assumed role users. For help with the ARN syntax for a principal, see IAM ARNs in the Identity and Access Management User Guide .

        The grant determines the retiring principal. Other principals might have permission to retire the grant or revoke the grant. For details, see RevokeGrant and Retiring and revoking grants in the Key Management Service Developer Guide.

        " }, "Operations":{ "shape":"GrantOperationList", @@ -1320,7 +1332,7 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "Name":{ "shape":"GrantNameType", @@ -1328,7 +1340,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -1337,7 +1349,7 @@ "members":{ "GrantToken":{ "shape":"GrantTokenType", - "documentation":"

        The grant token.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        The grant token.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "GrantId":{ "shape":"GrantIdType", @@ -1350,7 +1362,7 @@ "members":{ "Policy":{ "shape":"PolicyType", - "documentation":"

        The key policy to attach to the KMS key.

        If you provide a key policy, it must meet the following criteria:

        • The key policy must allow the calling principal to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, see Default key policy in the Key Management Service Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck to true.)

        • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal, you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services Identity and Access Management User Guide.

        If you do not provide a key policy, KMS attaches a default key policy to the KMS key. For more information, see Default key policy in the Key Management Service Developer Guide.

        The key policy size quota is 32 kilobytes (32768 bytes).

        For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

        " + "documentation":"

        The key policy to attach to the KMS key.

        If you provide a key policy, it must meet the following criteria:

        • The key policy must allow the calling principal to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, see Default key policy in the Key Management Service Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck to true.)

        • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal, you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services Identity and Access Management User Guide.

        If either of the required Resource or Action elements are missing from a key policy statement, the policy statement has no effect. When a key policy statement is missing one of these elements, the KMS console correctly reports an error, but the CreateKey and PutKeyPolicy API requests succeed, even though the policy statement is ineffective.

        For more information on required key policy elements, see Elements in a key policy in the Key Management Service Developer Guide.

        If you do not provide a key policy, KMS attaches a default key policy to the KMS key. For more information, see Default key policy in the Key Management Service Developer Guide.

        If the key policy exceeds the length constraint, KMS returns a LimitExceededException.

        For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

        " }, "Description":{ "shape":"DescriptionType", @@ -1358,7 +1370,7 @@ }, "KeyUsage":{ "shape":"KeyUsageType", - "documentation":"

        Determines the cryptographic operations for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric encryption KMS key; otherwise, it is required. You can't change the KeyUsage value after the KMS key is created.

        Select only one valid value.

        • For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT.

        • For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC.

        • For asymmetric KMS keys with RSA key pairs, specify ENCRYPT_DECRYPT or SIGN_VERIFY.

        • For asymmetric KMS keys with NIST-recommended elliptic curve key pairs, specify SIGN_VERIFY or KEY_AGREEMENT.

        • For asymmetric KMS keys with ECC_SECG_P256K1 key pairs specify SIGN_VERIFY.

        • For asymmetric KMS keys with SM2 key pairs (China Regions only), specify ENCRYPT_DECRYPT, SIGN_VERIFY, or KEY_AGREEMENT.

        " + "documentation":"

        Determines the cryptographic operations for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric encryption KMS key; otherwise, it is required. You can't change the KeyUsage value after the KMS key is created.

        Select only one valid value.

        • For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT.

        • For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC.

        • For asymmetric KMS keys with RSA key pairs, specify ENCRYPT_DECRYPT or SIGN_VERIFY.

        • For asymmetric KMS keys with NIST-recommended elliptic curve key pairs, specify SIGN_VERIFY or KEY_AGREEMENT.

        • For asymmetric KMS keys with ECC_SECG_P256K1 key pairs, specify SIGN_VERIFY.

        • For asymmetric KMS keys with ML-DSA key pairs, specify SIGN_VERIFY.

        • For asymmetric KMS keys with SM2 key pairs (China Regions only), specify ENCRYPT_DECRYPT, SIGN_VERIFY, or KEY_AGREEMENT.

        " }, "CustomerMasterKeySpec":{ "shape":"CustomerMasterKeySpec", @@ -1368,7 +1380,7 @@ }, "KeySpec":{ "shape":"KeySpec", - "documentation":"

        Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the Key Management Service Developer Guide .

        The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the Key Management Service Developer Guide .

        Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys.

        KMS supports the following key specs for KMS keys:

        • Symmetric encryption key (default)

          • SYMMETRIC_DEFAULT

        • HMAC keys (symmetric)

          • HMAC_224

          • HMAC_256

          • HMAC_384

          • HMAC_512

        • Asymmetric RSA key pairs (encryption and decryption -or- signing and verification)

          • RSA_2048

          • RSA_3072

          • RSA_4096

        • Asymmetric NIST-recommended elliptic curve key pairs (signing and verification -or- deriving shared secrets)

          • ECC_NIST_P256 (secp256r1)

          • ECC_NIST_P384 (secp384r1)

          • ECC_NIST_P521 (secp521r1)

        • Other asymmetric elliptic curve key pairs (signing and verification)

          • ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies.

        • SM2 key pairs (encryption and decryption -or- signing and verification -or- deriving shared secrets)

          • SM2 (China Regions only)

        " + "documentation":"

        Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key that uses SM4 encryption. For a detailed description of all supported key specs, see Key spec reference in the Key Management Service Developer Guide .

        The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm, kms:KeyAgreementAlgorithm, or kms:SigningAlgorithm in the Key Management Service Developer Guide .

        Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys.

        KMS supports the following key specs for KMS keys:

        • Symmetric encryption key (default)

          • SYMMETRIC_DEFAULT

        • HMAC keys (symmetric)

          • HMAC_224

          • HMAC_256

          • HMAC_384

          • HMAC_512

        • Asymmetric RSA key pairs (encryption and decryption -or- signing and verification)

          • RSA_2048

          • RSA_3072

          • RSA_4096

        • Asymmetric NIST-recommended elliptic curve key pairs (signing and verification -or- deriving shared secrets)

          • ECC_NIST_P256 (secp256r1)

          • ECC_NIST_P384 (secp384r1)

          • ECC_NIST_P521 (secp521r1)

        • Other asymmetric elliptic curve key pairs (signing and verification)

          • ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies.

        • Asymmetric ML-DSA key pairs (signing and verification)

          • ML_DSA_44

          • ML_DSA_65

          • ML_DSA_87

        • SM2 key pairs (encryption and decryption -or- signing and verification -or- deriving shared secrets)

          • SM2 (China Regions only)

        " }, "Origin":{ "shape":"OriginType", @@ -1376,7 +1388,7 @@ }, "CustomKeyStoreId":{ "shape":"CustomKeyStoreIdType", - "documentation":"

        Creates the KMS key in the specified custom key store. The ConnectionState of the custom key store must be CONNECTED. To find the CustomKeyStoreID and ConnectionState use the DescribeCustomKeyStores operation.

        This parameter is valid only for symmetric encryption KMS keys in a single Region. You cannot create any other type of KMS key in a custom key store.

        When you create a KMS key in an CloudHSM key store, KMS generates a non-exportable 256-bit symmetric key in its associated CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you must use the XksKeyId parameter to specify an external key that serves as key material for the KMS key.

        " + "documentation":"

        Creates the KMS key in the specified custom key store. The ConnectionState of the custom key store must be CONNECTED. To find the CustomKeyStoreID and ConnectionState use the DescribeCustomKeyStores operation.

        This parameter is valid only for symmetric encryption KMS keys in a single Region. You cannot create any other type of KMS key in a custom key store.

        When you create a KMS key in an CloudHSM key store, KMS generates a non-exportable 256-bit symmetric key in its associated CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you must use the XksKeyId parameter to specify an external key that serves as key material for the KMS key.

        " }, "BypassPolicyLockoutSafetyCheck":{ "shape":"BooleanType", @@ -1384,7 +1396,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

        Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        To use this parameter, you must have kms:TagResource permission in an IAM policy.

        Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one.

        When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tagging Keys.

        " + "documentation":"

        Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        To use this parameter, you must have kms:TagResource permission in an IAM policy.

        Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one.

        When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tags in KMS.

        " }, "MultiRegion":{ "shape":"NullableBooleanType", @@ -1392,7 +1404,7 @@ }, "XksKeyId":{ "shape":"XksKeyIdType", - "documentation":"

        Identifies the external key that serves as key material for the KMS key in an external key store. Specify the ID that the external key store proxy uses to refer to the external key. For help, see the documentation for your external key store proxy.

        This parameter is required for a KMS key with an Origin value of EXTERNAL_KEY_STORE. It is not valid for KMS keys with any other Origin value.

        The external key must be an existing 256-bit AES symmetric encryption key hosted outside of Amazon Web Services in an external key manager associated with the external key store specified by the CustomKeyStoreId parameter. This key must be enabled and configured to perform encryption and decryption. Each KMS key in an external key store must use a different external key. For details, see Requirements for a KMS key in an external key store in the Key Management Service Developer Guide.

        Each KMS key in an external key store is associated two backing keys. One is key material that KMS generates. The other is the external key specified by this parameter. When you use the KMS key in an external key store to encrypt data, the encryption operation is performed first by KMS using the KMS key material, and then by the external key manager using the specified external key, a process known as double encryption. For details, see Double encryption in the Key Management Service Developer Guide.

        " + "documentation":"

        Identifies the external key that serves as key material for the KMS key in an external key store. Specify the ID that the external key store proxy uses to refer to the external key. For help, see the documentation for your external key store proxy.

        This parameter is required for a KMS key with an Origin value of EXTERNAL_KEY_STORE. It is not valid for KMS keys with any other Origin value.

        The external key must be an existing 256-bit AES symmetric encryption key hosted outside of Amazon Web Services in an external key manager associated with the external key store specified by the CustomKeyStoreId parameter. This key must be enabled and configured to perform encryption and decryption. Each KMS key in an external key store must use a different external key. For details, see Requirements for a KMS key in an external key store in the Key Management Service Developer Guide.

        Each KMS key in an external key store is associated two backing keys. One is key material that KMS generates. The other is the external key specified by this parameter. When you use the KMS key in an external key store to encrypt data, the encryption operation is performed first by KMS using the KMS key material, and then by the external key manager using the specified external key, a process known as double encryption. For details, see Double encryption in the Key Management Service Developer Guide.

        " } } }, @@ -1551,11 +1563,11 @@ }, "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context to use when decrypting the data. An encryption context is valid only for cryptographic operations with a symmetric encryption KMS key. The standard asymmetric encryption algorithms and HMAC algorithms that KMS uses do not support an encryption context.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context to use when decrypting the data. An encryption context is valid only for cryptographic operations with a symmetric encryption KMS key. The standard asymmetric encryption algorithms and HMAC algorithms that KMS uses do not support an encryption context.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "KeyId":{ "shape":"KeyIdType", @@ -1571,7 +1583,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -1593,6 +1605,10 @@ "CiphertextForRecipient":{ "shape":"CiphertextType", "documentation":"

        The plaintext data encrypted with the public key in the attestation document.

        This field is included in the response only when the Recipient parameter in the request includes a valid attestation document from an Amazon Web Services Nitro enclave. For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to decrypt the ciphertext. This field is present only when the operation uses a symmetric encryption KMS key. This field is omitted if the request includes the Recipient parameter.

        " } } }, @@ -1618,8 +1634,7 @@ }, "DeleteCustomKeyStoreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteImportedKeyMaterialRequest":{ "type":"structure", @@ -1628,6 +1643,23 @@ "KeyId":{ "shape":"KeyIdType", "documentation":"

        Identifies the KMS key from which you are deleting imported key material. The Origin of the KMS key must be EXTERNAL.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        Identifies the imported key material you are deleting.

        If no KeyMaterialId is specified, KMS deletes the current key material.

        To get the list of key material IDs associated with a KMS key, use ListKeyRotations.

        " + } + } + }, + "DeleteImportedKeyMaterialResponse":{ + "type":"structure", + "members":{ + "KeyId":{ + "shape":"KeyIdType", + "documentation":"

        The Amazon Resource Name (key ARN) of the KMS key from which the key material was deleted.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdResponseType", + "documentation":"

        Identifies the deleted key material.

        " } } }, @@ -1662,11 +1694,11 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " }, "Recipient":{ "shape":"RecipientInfo", @@ -1747,7 +1779,7 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " } } }, @@ -1781,7 +1813,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

        Identifies a symmetric encryption KMS key. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " + "documentation":"

        Identifies a symmetric encryption KMS key. You cannot enable or disable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " } } }, @@ -1805,8 +1837,7 @@ }, "DisconnectCustomKeyStoreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DryRunOperationException":{ "type":"structure", @@ -1832,7 +1863,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

        Identifies a symmetric encryption KMS key. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " + "documentation":"

        Identifies a symmetric encryption KMS key. You cannot enable automatic rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To enable or disable automatic rotation of a set of related multi-Region keys, set the property on the primary key.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " }, "RotationPeriodInDays":{ "shape":"RotationPeriodInDaysType", @@ -1857,11 +1888,11 @@ }, "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context that will be used to encrypt the data. An encryption context is valid only for cryptographic operations with a symmetric encryption KMS key. The standard asymmetric encryption algorithms and HMAC algorithms that KMS uses do not support an encryption context.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context that will be used to encrypt the data. An encryption context is valid only for cryptographic operations with a symmetric encryption KMS key. The standard asymmetric encryption algorithms and HMAC algorithms that KMS uses do not support an encryption context.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "EncryptionAlgorithm":{ "shape":"EncryptionAlgorithmSpec", @@ -1869,7 +1900,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -1935,7 +1966,7 @@ "members":{ "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context that will be used when encrypting the private key in the data key pair.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context that will be used when encrypting the private key in the data key pair.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "KeyId":{ "shape":"KeyIdType", @@ -1943,11 +1974,11 @@ }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", - "documentation":"

        Determines the type of data key pair that is generated.

        The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions.

        " + "documentation":"

        Determines the type of data key pair that is generated.

        The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), the rule that permits you to use ECC KMS keys only to sign and verify, and the rule that permits you to use ML-DSA key pairs to sign and verify only are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions.

        " }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "Recipient":{ "shape":"RecipientInfo", @@ -1955,7 +1986,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -1985,6 +2016,10 @@ "CiphertextForRecipient":{ "shape":"CiphertextType", "documentation":"

        The plaintext private data key encrypted with the public key from the Nitro enclave. This ciphertext can be decrypted only by using a private key in the Nitro enclave.

        This field is included in the response only when the Recipient parameter in the request includes a valid attestation document from an Amazon Web Services Nitro enclave. For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to encrypt the private key.

        " } } }, @@ -1997,7 +2032,7 @@ "members":{ "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context that will be used when encrypting the private key in the data key pair.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context that will be used when encrypting the private key in the data key pair.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "KeyId":{ "shape":"KeyIdType", @@ -2005,15 +2040,15 @@ }, "KeyPairSpec":{ "shape":"DataKeyPairSpec", - "documentation":"

        Determines the type of data key pair that is generated.

        The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions.

        " + "documentation":"

        Determines the type of data key pair that is generated.

        The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), the rule that permits you to use ECC KMS keys only to sign and verify, and the rule that permits you to use ML-DSA key pairs to sign and verify only are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions.

        " }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -2035,6 +2070,10 @@ "KeyPairSpec":{ "shape":"DataKeyPairSpec", "documentation":"

        The type of data key pair that was generated.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to encrypt the private key.

        " } } }, @@ -2048,7 +2087,7 @@ }, "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context that will be used when encrypting the data key.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context that will be used when encrypting the data key.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "NumberOfBytes":{ "shape":"NumberOfBytesType", @@ -2060,7 +2099,7 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "Recipient":{ "shape":"RecipientInfo", @@ -2068,7 +2107,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -2090,6 +2129,10 @@ "CiphertextForRecipient":{ "shape":"CiphertextType", "documentation":"

        The plaintext data key encrypted with the public key from the Nitro enclave. This ciphertext can be decrypted only by using a private key in the Nitro enclave.

        This field is included in the response only when the Recipient parameter in the request includes a valid attestation document from an Amazon Web Services Nitro enclave. For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to encrypt the data key. This field is omitted if the request includes the Recipient parameter.

        " } } }, @@ -2103,7 +2146,7 @@ }, "EncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context that will be used when encrypting the data key.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context that will be used when encrypting the data key.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "KeySpec":{ "shape":"DataKeySpec", @@ -2115,11 +2158,11 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -2133,6 +2176,10 @@ "KeyId":{ "shape":"KeyIdType", "documentation":"

        The Amazon Resource Name (key ARN) of the KMS key that encrypted the data key.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to encrypt the data key.

        " } } }, @@ -2158,11 +2205,11 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -2271,7 +2318,7 @@ }, "OnDemandRotationStartDate":{ "shape":"DateType", - "documentation":"

        Identifies the date and time that an in progress on-demand rotation was initiated.

        The KMS API follows an eventual consistency model due to the distributed nature of the system. As a result, there might be a slight delay between initiating on-demand key rotation and the rotation's completion. Once the on-demand rotation is complete, use ListKeyRotations to view the details of the on-demand rotation.

        " + "documentation":"

        Identifies the date and time that an in progress on-demand rotation was initiated.

        KMS uses a background process to perform rotations. As a result, there might be a slight delay between initiating on-demand key rotation and the rotation's completion. Once the on-demand rotation is complete, KMS removes this field from the response. You can use ListKeyRotations to view the details of the completed on-demand rotation.

        " } } }, @@ -2289,7 +2336,7 @@ }, "WrappingAlgorithm":{ "shape":"AlgorithmSpec", - "documentation":"

        The algorithm you will use with the RSA public key (PublicKey) in the response to protect your key material during import. For more information, see Select a wrapping algorithm in the Key Management Service Developer Guide.

        For RSA_AES wrapping algorithms, you encrypt your key material with an AES key that you generate, then encrypt your AES key with the RSA public key from KMS. For RSAES wrapping algorithms, you encrypt your key material directly with the RSA public key from KMS.

        The wrapping algorithms that you can use depend on the type of key material that you are importing. To import an RSA private key, you must use an RSA_AES wrapping algorithm.

        • RSA_AES_KEY_WRAP_SHA_256 — Supported for wrapping RSA and ECC key material.

        • RSA_AES_KEY_WRAP_SHA_1 — Supported for wrapping RSA and ECC key material.

        • RSAES_OAEP_SHA_256 — Supported for all types of key material, except RSA key material (private key).

          You cannot use the RSAES_OAEP_SHA_256 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material.

        • RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key material (private key).

          You cannot use the RSAES_OAEP_SHA_1 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material.

        • RSAES_PKCS1_V1_5 (Deprecated) — As of October 10, 2023, KMS does not support the RSAES_PKCS1_V1_5 wrapping algorithm.

        " + "documentation":"

        The algorithm you will use with the RSA public key (PublicKey) in the response to protect your key material during import. For more information, see Select a wrapping algorithm in the Key Management Service Developer Guide.

        For RSA_AES wrapping algorithms, you encrypt your key material with an AES key that you generate, then encrypt your AES key with the RSA public key from KMS. For RSAES wrapping algorithms, you encrypt your key material directly with the RSA public key from KMS.

        The wrapping algorithms that you can use depend on the type of key material that you are importing. To import an RSA private key, you must use an RSA_AES wrapping algorithm.

        • RSA_AES_KEY_WRAP_SHA_256 — Supported for wrapping RSA and ECC key material.

        • RSA_AES_KEY_WRAP_SHA_1 — Supported for wrapping RSA and ECC key material.

        • RSAES_OAEP_SHA_256 — Supported for all types of key material, except RSA key material (private key).

          You cannot use the RSAES_OAEP_SHA_256 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material.

        • RSAES_OAEP_SHA_1 — Supported for all types of key material, except RSA key material (private key).

          You cannot use the RSAES_OAEP_SHA_1 wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key material.

        • RSAES_PKCS1_V1_5 (Deprecated) — As of October 10, 2023, KMS does not support the RSAES_PKCS1_V1_5 wrapping algorithm.

        " }, "WrappingKeySpec":{ "shape":"WrappingKeySpec", @@ -2328,7 +2375,7 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " } } }, @@ -2376,14 +2423,14 @@ "members":{ "EncryptionContextSubset":{ "shape":"EncryptionContextType", - "documentation":"

        A list of key-value pairs that must be included in the encryption context of the cryptographic operation request. The grant allows the cryptographic operation only when the encryption context in the request includes the key-value pairs specified in this constraint, although it can include additional key-value pairs.

        " + "documentation":"

        A list of key-value pairs that must be included in the encryption context of the cryptographic operation request. The grant allows the cryptographic operation only when the encryption context in the request includes the key-value pairs specified in this constraint, although it can include additional key-value pairs.

        " }, "EncryptionContextEquals":{ "shape":"EncryptionContextType", - "documentation":"

        A list of key-value pairs that must match the encryption context in the cryptographic operation request. The grant allows the operation only when the encryption context in the request is the same as the encryption context specified in this constraint.

        " + "documentation":"

        A list of key-value pairs that must match the encryption context in the cryptographic operation request. The grant allows the operation only when the encryption context in the request is the same as the encryption context specified in this constraint.

        " } }, - "documentation":"

        Use this structure to allow cryptographic operations in the grant only when the operation request includes the specified encryption context.

        KMS applies the grant constraints only to cryptographic operations that support an encryption context, that is, all cryptographic operations with a symmetric KMS key. Grant constraints are not applied to operations that do not support an encryption context, such as cryptographic operations with asymmetric KMS keys and management operations, such as DescribeKey or RetireGrant.

        In a cryptographic operation, the encryption context in the decryption operation must be an exact, case-sensitive match for the keys and values in the encryption context of the encryption operation. Only the order of the pairs can vary.

        However, in a grant constraint, the key in each key-value pair is not case sensitive, but the value is case sensitive.

        To avoid confusion, do not use multiple encryption context pairs that differ only by case. To require a fully case-sensitive encryption context, use the kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM or key policy. For details, see kms:EncryptionContext: in the Key Management Service Developer Guide .

        " + "documentation":"

        Use this structure to allow cryptographic operations in the grant only when the operation request includes the specified encryption context.

        KMS applies the grant constraints only to cryptographic operations that support an encryption context, that is, all cryptographic operations with a symmetric KMS key. Grant constraints are not applied to operations that do not support an encryption context, such as cryptographic operations with asymmetric KMS keys and management operations, such as DescribeKey or RetireGrant.

        In a cryptographic operation, the encryption context in the decryption operation must be an exact, case-sensitive match for the keys and values in the encryption context of the encryption operation. Only the order of the pairs can vary.

        However, in a grant constraint, the key in each key-value pair is not case sensitive, but the value is case sensitive.

        To avoid confusion, do not use multiple encryption context pairs that differ only by case. To require a fully case-sensitive encryption context, use the kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM or key policy. For details, see kms:EncryptionContext:context-key in the Key Management Service Developer Guide .

        " }, "GrantIdType":{ "type":"string", @@ -2489,7 +2536,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

        The identifier of the KMS key that will be associated with the imported key material. This must be the same KMS key specified in the KeyID parameter of the corresponding GetParametersForImport request. The Origin of the KMS key must be EXTERNAL and its KeyState must be PendingImport.

        The KMS key can be a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key, including a multi-Region key of any supported type. You cannot perform this operation on a KMS key in a custom key store, or on a KMS key in a different Amazon Web Services account.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " + "documentation":"

        The identifier of the KMS key that will be associated with the imported key material. This must be the same KMS key specified in the KeyID parameter of the corresponding GetParametersForImport request. The Origin of the KMS key must be EXTERNAL and its KeyState must be PendingImport.

        The KMS key can be a symmetric encryption KMS key, HMAC KMS key, asymmetric encryption KMS key, or asymmetric signing KMS key, including a multi-Region key of any supported type. You cannot perform this operation on a KMS key in a custom key store, or on a KMS key in a different Amazon Web Services account.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " }, "ImportToken":{ "shape":"CiphertextType", @@ -2505,15 +2552,56 @@ }, "ExpirationModel":{ "shape":"ExpirationModelType", - "documentation":"

        Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES. For help with this choice, see Setting an expiration time in the Key Management Service Developer Guide.

        When the value of ExpirationModel is KEY_MATERIAL_EXPIRES, you must specify a value for the ValidTo parameter. When value is KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter.

        You cannot change the ExpirationModel or ValidTo values for the current import after the request completes. To change either value, you must reimport the key material.

        " + "documentation":"

        Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES. For help with this choice, see Setting an expiration time in the Key Management Service Developer Guide.

        When the value of ExpirationModel is KEY_MATERIAL_EXPIRES, you must specify a value for the ValidTo parameter. When value is KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter.

        You cannot change the ExpirationModel or ValidTo values for the current import after the request completes. To change either value, you must reimport the key material.

        " + }, + "ImportType":{ + "shape":"ImportType", + "documentation":"

        Indicates whether the key material being imported is previously associated with this KMS key or not. This parameter is optional and only usable with symmetric encryption keys. If no key material has ever been imported into the KMS key, and this parameter is omitted, the parameter defaults to NEW_KEY_MATERIAL. After the first key material is imported, if this parameter is omitted then the parameter defaults to EXISTING_KEY_MATERIAL.

        " + }, + "KeyMaterialDescription":{ + "shape":"KeyMaterialDescriptionType", + "documentation":"

        Description for the key material being imported. This parameter is optional and only usable with symmetric encryption keys. If you do not specify a key material description, KMS retains the value you specified when you last imported the same key material into this KMS key.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        Identifies the key material being imported. This parameter is optional and only usable with symmetric encryption keys. You cannot specify a key material ID with ImportType set to NEW_KEY_MATERIAL. Whenever you import key material into a symmetric encryption key, KMS assigns a unique identifier to the key material based on the KMS key ID and the imported key material. When you re-import key material with a specified key material ID, KMS:

        • Computes the identifier for the key material

        • Matches the computed identifier against the specified key material ID

        • Verifies that the key material ID is already associated with the KMS key

        To get the list of key material IDs associated with a KMS key, use ListKeyRotations.

        " } } }, "ImportKeyMaterialResponse":{ "type":"structure", "members":{ + "KeyId":{ + "shape":"KeyIdType", + "documentation":"

        The Amazon Resource Name (key ARN) of the KMS key into which key material was imported.

        " + }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        Identifies the imported key material.

        " + } } }, + "ImportState":{ + "type":"string", + "enum":[ + "IMPORTED", + "PENDING_IMPORT" + ] + }, + "ImportType":{ + "type":"string", + "enum":[ + "NEW_KEY_MATERIAL", + "EXISTING_KEY_MATERIAL" + ] + }, + "IncludeKeyMaterial":{ + "type":"string", + "enum":[ + "ALL_KEY_MATERIAL", + "ROTATIONS_ONLY" + ] + }, "IncorrectKeyException":{ "type":"structure", "members":{ @@ -2527,7 +2615,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

        The request was rejected because the key material in the request is, expired, invalid, or is not the same key material that was previously imported into this KMS key.

        ", + "documentation":"

        The request was rejected because the key material in the request is, expired, invalid, or does not meet expectations. For example, it is not the same key material that was previously imported or KMS expected new key material but the key material being imported is already associated with the KMS key.

        ", "exception":true }, "IncorrectTrustAnchorException":{ @@ -2677,6 +2765,20 @@ "CUSTOMER" ] }, + "KeyMaterialDescriptionType":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^[a-zA-Z0-9:/_\\s.-]+$" + }, + "KeyMaterialState":{ + "type":"string", + "enum":[ + "NON_CURRENT", + "CURRENT", + "PENDING_ROTATION" + ] + }, "KeyMetadata":{ "type":"structure", "required":["KeyId"], @@ -2707,7 +2809,7 @@ }, "KeyUsage":{ "shape":"KeyUsageType", - "documentation":"

        The cryptographic operations for which you can use the KMS key.

        " + "documentation":"

        The cryptographic operations for which you can use the KMS key.

        " }, "KeyState":{ "shape":"KeyState", @@ -2719,7 +2821,7 @@ }, "ValidTo":{ "shape":"DateType", - "documentation":"

        The time at which the imported key material expires. When the key material expires, KMS deletes the key material and the KMS key becomes unusable. This value is present only for KMS keys whose Origin is EXTERNAL and whose ExpirationModel is KEY_MATERIAL_EXPIRES, otherwise this value is omitted.

        " + "documentation":"

        The earliest time at which any imported key material permanently associated with this KMS key expires. When a key material expires, KMS deletes the key material and the KMS key becomes unusable. This value is present only for KMS keys whose Origin is EXTERNAL and the ExpirationModel is KEY_MATERIAL_EXPIRES, otherwise this value is omitted.

        " }, "Origin":{ "shape":"OriginType", @@ -2727,11 +2829,11 @@ }, "CustomKeyStoreId":{ "shape":"CustomKeyStoreIdType", - "documentation":"

        A unique identifier for the custom key store that contains the KMS key. This field is present only when the KMS key is created in a custom key store.

        " + "documentation":"

        A unique identifier for the custom key store that contains the KMS key. This field is present only when the KMS key is created in a custom key store.

        " }, "CloudHsmClusterId":{ "shape":"CloudHsmClusterIdType", - "documentation":"

        The cluster ID of the CloudHSM cluster that contains the key material for the KMS key. When you create a KMS key in an CloudHSM custom key store, KMS creates the key material for the KMS key in the associated CloudHSM cluster. This field is present only when the KMS key is created in an CloudHSM key store.

        " + "documentation":"

        The cluster ID of the CloudHSM cluster that contains the key material for the KMS key. When you create a KMS key in an CloudHSM custom key store, KMS creates the key material for the KMS key in the associated CloudHSM cluster. This field is present only when the KMS key is created in an CloudHSM key store.

        " }, "ExpirationModel":{ "shape":"ExpirationModelType", @@ -2782,6 +2884,10 @@ "XksKeyConfiguration":{ "shape":"XksKeyConfigurationType", "documentation":"

        Information about the external key that is associated with a KMS key in an external key store.

        For more information, see External key in the Key Management Service Developer Guide.

        " + }, + "CurrentKeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        Identifies the current key material. This value is present for symmetric encryption keys with AWS_KMS origin and single-Region, symmetric encryption keys with EXTERNAL origin. These KMS keys support automatic or on-demand key rotation and can have multiple key materials associated with them. KMS uses the current key material for both encryption and decryption, and the non-current key material for decryption operations only.

        " } }, "documentation":"

        Contains metadata about a KMS key.

        This data type is used as a response element for the CreateKey, DescribeKey, and ReplicateKey operations.

        " @@ -2801,7 +2907,10 @@ "HMAC_256", "HMAC_384", "HMAC_512", - "SM2" + "SM2", + "ML_DSA_44", + "ML_DSA_65", + "ML_DSA_87" ] }, "KeyState":{ @@ -2846,7 +2955,7 @@ "members":{ "message":{"shape":"ErrorMessageType"} }, - "documentation":"

        The request was rejected because a quota was exceeded. For more information, see Quotas in the Key Management Service Developer Guide.

        ", + "documentation":"

        The request was rejected because a length constraint or quota was exceeded. For more information, see Quotas in the Key Management Service Developer Guide.

        ", "exception":true }, "LimitType":{ @@ -2974,6 +3083,10 @@ "shape":"KeyIdType", "documentation":"

        Gets the key rotations for the specified KMS key.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " }, + "IncludeKeyMaterial":{ + "shape":"IncludeKeyMaterial", + "documentation":"

        Use this optional parameter to control which key materials associated with this key are listed in the response. The default value of this parameter is ROTATIONS_ONLY. If you omit this parameter, KMS returns information on the key materials created by automatic or on-demand key rotation. When you specify a value of ALL_KEY_MATERIAL, KMS adds the first key material and any imported key material pending rotation to the response. This parameter can only be used with KMS keys that support automatic or on-demand key rotation.

        " + }, "Limit":{ "shape":"LimitType", "documentation":"

        Use this parameter to specify the maximum number of items to return. When this value is present, KMS does not return more than the specified number of items, but it might return fewer.

        This value is optional. If you include a value, it must be between 1 and 1000, inclusive. If you do not include a value, it defaults to 100.

        " @@ -2989,7 +3102,7 @@ "members":{ "Rotations":{ "shape":"RotationsList", - "documentation":"

        A list of completed key material rotations.

        " + "documentation":"

        A list of completed key material rotations. When the optional input parameter IncludeKeyMaterial is specified with a value of ALL_KEY_MATERIAL, this list includes the first key material and any imported key material pending rotation.

        " }, "NextMarker":{ "shape":"MarkerType", @@ -3115,7 +3228,8 @@ "type":"string", "enum":[ "RAW", - "DIGEST" + "DIGEST", + "EXTERNAL_MU" ] }, "MultiRegionConfiguration":{ @@ -3239,7 +3353,7 @@ }, "Policy":{ "shape":"PolicyType", - "documentation":"

        The key policy to attach to the KMS key.

        The key policy must meet the following criteria:

        • The key policy must allow the calling principal to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, see Default key policy in the Key Management Service Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck to true.)

        • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal, you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services Identity and Access Management User Guide.

        A key policy document can include only the following characters:

        • Printable ASCII characters from the space character (\\u0020) through the end of the ASCII character range.

        • Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF).

        • The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special characters

        For information about key policies, see Key policies in KMS in the Key Management Service Developer Guide.For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

        " + "documentation":"

        The key policy to attach to the KMS key.

        The key policy must meet the following criteria:

        • The key policy must allow the calling principal to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, see Default key policy in the Key Management Service Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck to true.)

        • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal, you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services Identity and Access Management User Guide.

        If either of the required Resource or Action elements are missing from a key policy statement, the policy statement has no effect. When a key policy statement is missing one of these elements, the KMS console correctly reports an error, but the PutKeyPolicy API request succeeds, even though the policy statement is ineffective.

        For more information on required key policy elements, see Elements in a key policy in the Key Management Service Developer Guide.

        A key policy document can include only the following characters:

        • Printable ASCII characters from the space character (\\u0020) through the end of the ASCII character range.

        • Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF).

        • The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special characters

        If the key policy exceeds the length constraint, KMS returns a LimitExceededException.

        For information about key policies, see Key policies in KMS in the Key Management Service Developer Guide.For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

        " }, "BypassPolicyLockoutSafetyCheck":{ "shape":"BooleanType", @@ -3260,7 +3374,7 @@ }, "SourceEncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies the encryption context to use to decrypt the ciphertext. Enter the same encryption context that was used to encrypt the ciphertext.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies the encryption context to use to decrypt the ciphertext. Enter the same encryption context that was used to encrypt the ciphertext.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "SourceKeyId":{ "shape":"KeyIdType", @@ -3272,7 +3386,7 @@ }, "DestinationEncryptionContext":{ "shape":"EncryptionContextType", - "documentation":"

        Specifies that encryption context to use when the reencrypting the data.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        A destination encryption context is valid only when the destination KMS key is a symmetric encryption KMS key. The standard ciphertext format for asymmetric KMS keys does not include fields for metadata.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " + "documentation":"

        Specifies that encryption context to use when the reencrypting the data.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        A destination encryption context is valid only when the destination KMS key is a symmetric encryption KMS key. The standard ciphertext format for asymmetric KMS keys does not include fields for metadata.

        An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data. When you use an encryption context to encrypt data, you must specify the same (an exact case-sensitive match) encryption context to decrypt the data. An encryption context is supported only on operations with symmetric encryption KMS keys. On operations with symmetric encryption KMS keys, an encryption context is optional, but it is strongly recommended.

        For more information, see Encryption context in the Key Management Service Developer Guide.

        " }, "SourceEncryptionAlgorithm":{ "shape":"EncryptionAlgorithmSpec", @@ -3284,11 +3398,11 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -3314,6 +3428,14 @@ "DestinationEncryptionAlgorithm":{ "shape":"EncryptionAlgorithmSpec", "documentation":"

        The encryption algorithm that was used to reencrypt the data.

        " + }, + "SourceKeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to originally encrypt the data. This field is present only when the original encryption used a symmetric encryption KMS key.

        " + }, + "DestinationKeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        The identifier of the key material used to reencrypt the data. This field is present only when data is reencrypted using a symmetric encryption KMS key.

        " } } }, @@ -3350,11 +3472,11 @@ }, "ReplicaRegion":{ "shape":"RegionType", - "documentation":"

        The Region ID of the Amazon Web Services Region for this replica key.

        Enter the Region ID, such as us-east-1 or ap-southeast-2. For a list of Amazon Web Services Regions in which KMS is supported, see KMS service endpoints in the Amazon Web Services General Reference.

        HMAC KMS keys are not supported in all Amazon Web Services Regions. If you try to replicate an HMAC KMS key in an Amazon Web Services Region in which HMAC keys are not supported, the ReplicateKey operation returns an UnsupportedOperationException. For a list of Regions in which HMAC KMS keys are supported, see HMAC keys in KMS in the Key Management Service Developer Guide.

        The replica must be in a different Amazon Web Services Region than its primary key and other replicas of that primary key, but in the same Amazon Web Services partition. KMS must be available in the replica Region. If the Region is not enabled by default, the Amazon Web Services account must be enabled in the Region. For information about Amazon Web Services partitions, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference. For information about enabling and disabling Regions, see Enabling a Region and Disabling a Region in the Amazon Web Services General Reference.

        " + "documentation":"

        The Region ID of the Amazon Web Services Region for this replica key.

        Enter the Region ID, such as us-east-1 or ap-southeast-2. For a list of Amazon Web Services Regions in which KMS is supported, see KMS service endpoints in the Amazon Web Services General Reference.

        The replica must be in a different Amazon Web Services Region than its primary key and other replicas of that primary key, but in the same Amazon Web Services partition. KMS must be available in the replica Region. If the Region is not enabled by default, the Amazon Web Services account must be enabled in the Region. For information about Amazon Web Services partitions, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference. For information about enabling and disabling Regions, see Enabling a Region and Disabling a Region in the Amazon Web Services General Reference.

        " }, "Policy":{ "shape":"PolicyType", - "documentation":"

        The key policy to attach to the KMS key. This parameter is optional. If you do not provide a key policy, KMS attaches the default key policy to the KMS key.

        The key policy is not a shared property of multi-Region keys. You can specify the same key policy or a different key policy for each key in a set of related multi-Region keys. KMS does not synchronize this property.

        If you provide a key policy, it must meet the following criteria:

        • The key policy must allow the calling principal to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, see Default key policy in the Key Management Service Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck to true.)

        • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal, you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services Identity and Access Management User Guide.

        A key policy document can include only the following characters:

        • Printable ASCII characters from the space character (\\u0020) through the end of the ASCII character range.

        • Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF).

        • The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special characters

        For information about key policies, see Key policies in KMS in the Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

        " + "documentation":"

        The key policy to attach to the KMS key. This parameter is optional. If you do not provide a key policy, KMS attaches the default key policy to the KMS key.

        The key policy is not a shared property of multi-Region keys. You can specify the same key policy or a different key policy for each key in a set of related multi-Region keys. KMS does not synchronize this property.

        If you provide a key policy, it must meet the following criteria:

        • The key policy must allow the calling principal to make a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For more information, see Default key policy in the Key Management Service Developer Guide. (To omit this condition, set BypassPolicyLockoutSafetyCheck to true.)

        • Each statement in the key policy must contain one or more principals. The principals in the key policy must exist and be visible to KMS. When you create a new Amazon Web Services principal, you might need to enforce a delay before including the new principal in a key policy because the new principal might not be immediately visible to KMS. For more information, see Changes that I make are not always immediately visible in the Amazon Web Services Identity and Access Management User Guide.

        A key policy document can include only the following characters:

        • Printable ASCII characters from the space character (\\u0020) through the end of the ASCII character range.

        • Printable characters in the Basic Latin and Latin-1 Supplement character set (through \\u00FF).

        • The tab (\\u0009), line feed (\\u000A), and carriage return (\\u000D) special characters

        For information about key policies, see Key policies in KMS in the Key Management Service Developer Guide. For help writing and formatting a JSON policy document, see the IAM JSON Policy Reference in the Identity and Access Management User Guide .

        " }, "BypassPolicyLockoutSafetyCheck":{ "shape":"BooleanType", @@ -3366,7 +3488,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

        Assigns one or more tags to the replica key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        To use this parameter, you must have kms:TagResource permission in an IAM policy.

        Tags are not a shared property of multi-Region keys. You can specify the same tags or different tags for each key in a set of related multi-Region keys. KMS does not synchronize this property.

        Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one.

        When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tagging Keys.

        " + "documentation":"

        Assigns one or more tags to the replica key. Use this parameter to tag the KMS key when it is created. To tag an existing KMS key, use the TagResource operation.

        Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output.

        Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see ABAC for KMS in the Key Management Service Developer Guide.

        To use this parameter, you must have kms:TagResource permission in an IAM policy.

        Tags are not a shared property of multi-Region keys. You can specify the same tags or different tags for each key in a set of related multi-Region keys. KMS does not synchronize this property.

        Each tag consists of a tag key and a tag value. Both the tag key and the tag value are required, but the tag value can be an empty (null) string. You cannot have more than one tag on a KMS key with the same tag key. If you specify an existing tag key with a different tag value, KMS replaces the current tag value with the specified one.

        When you add tags to an Amazon Web Services resource, Amazon Web Services generates a cost allocation report with usage and costs aggregated by tags. Tags can also be used to control access to a KMS key. For details, see Tags in KMS.

        " } } }, @@ -3404,7 +3526,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -3425,7 +3547,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -3435,7 +3557,7 @@ "members":{ "KeyId":{ "shape":"KeyIdType", - "documentation":"

        Identifies a symmetric encryption KMS key. You cannot perform on-demand rotation of asymmetric KMS keys, HMAC KMS keys, KMS keys with imported key material, or KMS keys in a custom key store. To perform on-demand rotation of a set of related multi-Region keys, invoke the on-demand rotation on the primary key.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " + "documentation":"

        Identifies a symmetric encryption KMS key. You cannot perform on-demand rotation of asymmetric KMS keys, HMAC KMS keys, multi-Region KMS keys with imported key material, or KMS keys in a custom key store. To perform on-demand rotation of a set of related multi-Region keys, invoke the on-demand rotation on the primary key.

        Specify the key ID or key ARN of the KMS key.

        For example:

        • Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab

        • Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

        To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.

        " } } }, @@ -3471,16 +3593,40 @@ "shape":"KeyIdType", "documentation":"

        Unique identifier of the key.

        " }, + "KeyMaterialId":{ + "shape":"BackingKeyIdType", + "documentation":"

        Unique identifier of the key material.

        " + }, + "KeyMaterialDescription":{ + "shape":"KeyMaterialDescriptionType", + "documentation":"

        User-specified description of the key material. This field is only present for symmetric encryption KMS keys with EXTERNAL origin.

        " + }, + "ImportState":{ + "shape":"ImportState", + "documentation":"

        Indicates if the key material is currently imported into KMS. It has two possible values: IMPORTED or PENDING_IMPORT. This field is only present for symmetric encryption KMS keys with EXTERNAL origin.

        " + }, + "KeyMaterialState":{ + "shape":"KeyMaterialState", + "documentation":"

        There are three possible values for this field: CURRENT, NON_CURRENT and PENDING_ROTATION. KMS uses CURRENT key material for both encryption and decryption and NON_CURRENT key material only for decryption. PENDING_ROTATION identifies key material that has been imported for on-demand key rotation but the rotation hasn't completed. Key material in PENDING_ROTATION is not permanently associated with the KMS key. You can delete this key material and import different key material in its place. The PENDING_ROTATION value is only used in symmetric encryption keys with imported key material. The other values, CURRENT and NON_CURRENT, are used for all KMS keys that support automatic or on-demand key rotation.

        " + }, + "ExpirationModel":{ + "shape":"ExpirationModelType", + "documentation":"

        Indicates if the key material is configured to automatically expire. There are two possible values for this field: KEY_MATERIAL_EXPIRES and KEY_MATERIAL_DOES_NOT_EXPIRE. For any key material that expires, the expiration date and time is indicated in ValidTo. This field is only present for symmetric encryption KMS keys with EXTERNAL origin.

        " + }, + "ValidTo":{ + "shape":"DateType", + "documentation":"

        Date and time at which the key material expires. This field is only present for symmetric encryption KMS keys with EXTERNAL origin in rotation list entries with an ExpirationModel value of KEY_MATERIAL_EXPIRES.

        " + }, "RotationDate":{ "shape":"DateType", - "documentation":"

        Date and time that the key material rotation completed. Formatted as Unix time.

        " + "documentation":"

        Date and time that the key material rotation completed. Formatted as Unix time. This field is not present for the first key material or an imported key material in PENDING_ROTATION state.

        " }, "RotationType":{ "shape":"RotationType", - "documentation":"

        Identifies whether the key material rotation was a scheduled automatic rotation or an on-demand rotation.

        " + "documentation":"

        Identifies whether the key material rotation was a scheduled automatic rotation or an on-demand rotation. This field is not present for the first key material or an imported key material in PENDING_ROTATION state.

        " } }, - "documentation":"

        Contains information about completed key material rotations.

        " + "documentation":"

        Each entry contains information about one of the key materials associated with a KMS key.

        " }, "ScheduleKeyDeletionRequest":{ "type":"structure", @@ -3535,11 +3681,11 @@ }, "MessageType":{ "shape":"MessageType", - "documentation":"

        Tells KMS whether the value of the Message parameter should be hashed as part of the signing algorithm. Use RAW for unhashed messages; use DIGEST for message digests, which are already hashed.

        When the value of MessageType is RAW, KMS uses the standard signing algorithm, which begins with a hash function. When the value is DIGEST, KMS skips the hashing step in the signing algorithm.

        Use the DIGEST value only when the value of the Message parameter is a message digest. If you use the DIGEST value with an unhashed message, the security of the signing operation can be compromised.

        When the value of MessageTypeis DIGEST, the length of the Message value must match the length of hashed messages for the specified signing algorithm.

        You can submit a message digest and omit the MessageType or specify RAW so the digest is hashed again while signing. However, this can cause verification failures when verifying with a system that assumes a single hash.

        The hashing algorithm in that Sign uses is based on the SigningAlgorithm value.

        • Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm.

        • Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm.

        • Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm.

        • SM2DSA uses the SM3 hashing algorithm. For details, see Offline verification with SM2 key pairs.

        " + "documentation":"

        Tells KMS whether the value of the Message parameter should be hashed as part of the signing algorithm. Use RAW for unhashed messages; use DIGEST for message digests, which are already hashed; use EXTERNAL_MU for 64-byte representative μ used in ML-DSA signing as defined in NIST FIPS 204 Section 6.2.

        When the value of MessageType is RAW, KMS uses the standard signing algorithm, which begins with a hash function. When the value is DIGEST, KMS skips the hashing step in the signing algorithm. When the value is EXTERNAL_MU KMS skips the concatenated hashing of the public key hash and the message done in the ML-DSA signing algorithm.

        Use the DIGEST or EXTERNAL_MU value only when the value of the Message parameter is a message digest. If you use the DIGEST value with an unhashed message, the security of the signing operation can be compromised.

        When the value of MessageType is DIGEST, the length of the Message value must match the length of hashed messages for the specified signing algorithm.

        When the value of MessageType is EXTERNAL_MU the length of the Message value must be 64 bytes.

        You can submit a message digest and omit the MessageType or specify RAW so the digest is hashed again while signing. However, this can cause verification failures when verifying with a system that assumes a single hash.

        The hashing algorithm that Sign uses is based on the SigningAlgorithm value.

        • Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm.

        • Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm.

        • Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm.

        • Signing algorithms that end in SHAKE_256 use the SHAKE_256 hashing algorithm.

        • SM2DSA uses the SM3 hashing algorithm. For details, see Offline verification with SM2 key pairs.

        " }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "SigningAlgorithm":{ "shape":"SigningAlgorithmSpec", @@ -3547,7 +3693,7 @@ }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -3580,7 +3726,8 @@ "ECDSA_SHA_256", "ECDSA_SHA_384", "ECDSA_SHA_512", - "SM2DSA" + "SM2DSA", + "ML_DSA_SHAKE_256" ] }, "SigningAlgorithmSpecList":{ @@ -3739,8 +3886,7 @@ }, "UpdateCustomKeyStoreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateKeyDescriptionRequest":{ "type":"structure", @@ -3803,11 +3949,11 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -3847,7 +3993,7 @@ }, "MessageType":{ "shape":"MessageType", - "documentation":"

        Tells KMS whether the value of the Message parameter should be hashed as part of the signing algorithm. Use RAW for unhashed messages; use DIGEST for message digests, which are already hashed.

        When the value of MessageType is RAW, KMS uses the standard signing algorithm, which begins with a hash function. When the value is DIGEST, KMS skips the hashing step in the signing algorithm.

        Use the DIGEST value only when the value of the Message parameter is a message digest. If you use the DIGEST value with an unhashed message, the security of the verification operation can be compromised.

        When the value of MessageTypeis DIGEST, the length of the Message value must match the length of hashed messages for the specified signing algorithm.

        You can submit a message digest and omit the MessageType or specify RAW so the digest is hashed again while signing. However, if the signed message is hashed once while signing, but twice while verifying, verification fails, even when the message hasn't changed.

        The hashing algorithm in that Verify uses is based on the SigningAlgorithm value.

        • Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm.

        • Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm.

        • Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm.

        • SM2DSA uses the SM3 hashing algorithm. For details, see Offline verification with SM2 key pairs.

        " + "documentation":"

        Tells KMS whether the value of the Message parameter should be hashed as part of the signing algorithm. Use RAW for unhashed messages; use DIGEST for message digests, which are already hashed; use EXTERNAL_MU for 64-byte representative μ used in ML-DSA signing as defined in NIST FIPS 204 Section 6.2.

        When the value of MessageType is RAW, KMS uses the standard signing algorithm, which begins with a hash function. When the value is DIGEST, KMS skips the hashing step in the signing algorithm. When the value is EXTERNAL_MU KMS skips the concatenated hashing of the public key hash and the message done in the ML-DSA signing algorithm.

        Use the DIGEST or EXTERNAL_MU value only when the value of the Message parameter is a message digest. If you use the DIGEST value with an unhashed message, the security of the signing operation can be compromised.

        When the value of MessageType is DIGEST, the length of the Message value must match the length of hashed messages for the specified signing algorithm.

        When the value of MessageType is EXTERNAL_MU the length of the Message value must be 64 bytes.

        You can submit a message digest and omit the MessageType or specify RAW so the digest is hashed again while signing. However, if the signed message is hashed once while signing, but twice while verifying, verification fails, even when the message hasn't changed.

        The hashing algorithm that Verify uses is based on the SigningAlgorithm value.

        • Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm.

        • Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm.

        • Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm.

        • Signing algorithms that end in SHAKE_256 use the SHAKE_256 hashing algorithm.

        • SM2DSA uses the SM3 hashing algorithm. For details, see Offline verification with SM2 key pairs.

        " }, "Signature":{ "shape":"CiphertextType", @@ -3859,11 +4005,11 @@ }, "GrantTokens":{ "shape":"GrantTokenList", - "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " + "documentation":"

        A list of grant tokens.

        Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide.

        " }, "DryRun":{ "shape":"NullableBooleanType", - "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

        " + "documentation":"

        Checks if your request will succeed. DryRun is an optional parameter.

        To learn more about how to use this parameter, see Testing your permissions in the Key Management Service Developer Guide.

        " } } }, @@ -4089,5 +4235,5 @@ "exception":true } }, - "documentation":"Key Management Service

        Key Management Service (KMS) is an encryption and key management web service. This guide describes the KMS operations that you can call programmatically. For general information about KMS, see the Key Management Service Developer Guide .

        KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

        Amazon Web Services provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, macOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to KMS and other Amazon Web Services services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools for Amazon Web Services.

        We recommend that you use the Amazon Web Services SDKs to make programmatic API calls to KMS.

        If you need to use FIPS 140-2 validated cryptographic modules when communicating with Amazon Web Services, use the FIPS endpoint in your preferred Amazon Web Services Region. For more information about the available FIPS endpoints, see Service endpoints in the Key Management Service topic of the Amazon Web Services General Reference.

        All KMS API calls must be signed and be transmitted using Transport Layer Security (TLS). KMS recommends you always use the latest supported TLS version. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes.

        Signing Requests

        Requests must be signed using an access key ID and a secret access key. We strongly recommend that you do not use your Amazon Web Services account root access key ID and secret access key for everyday work. You can use the access key ID and secret access key for an IAM user or you can use the Security Token Service (STS) to generate temporary security credentials and use those to sign requests.

        All KMS requests must be signed with Signature Version 4.

        Logging API Requests

        KMS supports CloudTrail, a service that logs Amazon Web Services API calls and related events for your Amazon Web Services account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the CloudTrail User Guide.

        Additional Resources

        For more information about credentials and request signing, see the following:

        Commonly Used API Operations

        Of the API operations discussed in this guide, the following will prove the most useful for most applications. You will likely perform operations other than these, such as creating keys and assigning policies, by using the console.

        " + "documentation":"Key Management Service

        Key Management Service (KMS) is an encryption and key management web service. This guide describes the KMS operations that you can call programmatically. For general information about KMS, see the Key Management Service Developer Guide .

        KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

        Amazon Web Services provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Rust, Python, Ruby, .Net, macOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to KMS and other Amazon Web Services services. For example, the SDKs take care of tasks such as signing requests (see below), managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools for Amazon Web Services.

        We recommend that you use the Amazon Web Services SDKs to make programmatic API calls to KMS.

        If you need to use FIPS 140-2 validated cryptographic modules when communicating with Amazon Web Services, use one of the FIPS endpoints in your preferred Amazon Web Services Region. If you need communicate over IPv6, use the dual-stack endpoint in your preferred Amazon Web Services Region. For more information see Service endpoints in the Key Management Service topic of the Amazon Web Services General Reference and Dual-stack endpoint support in the KMS Developer Guide.

        All KMS API calls must be signed and be transmitted using Transport Layer Security (TLS). KMS recommends you always use the latest supported TLS version. Clients must also support cipher suites with Perfect Forward Secrecy (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support these modes.

        Signing Requests

        Requests must be signed using an access key ID and a secret access key. We strongly recommend that you do not use your Amazon Web Services account root access key ID and secret access key for everyday work. You can use the access key ID and secret access key for an IAM user or you can use the Security Token Service (STS) to generate temporary security credentials and use those to sign requests.

        All KMS requests must be signed with Signature Version 4.

        Logging API Requests

        KMS supports CloudTrail, a service that logs Amazon Web Services API calls and related events for your Amazon Web Services account and delivers them to an Amazon S3 bucket that you specify. By using the information collected by CloudTrail, you can determine what requests were made to KMS, who made the request, when it was made, and so on. To learn more about CloudTrail, including how to turn it on and find your log files, see the CloudTrail User Guide.

        Additional Resources

        For more information about credentials and request signing, see the following:

        Commonly Used API Operations

        Of the API operations discussed in this guide, the following will prove the most useful for most applications. You will likely perform operations other than these, such as creating keys and assigning policies, by using the console.

        " } diff --git a/services/lakeformation/pom.xml b/services/lakeformation/pom.xml index c0375cde2d60..409f0db52886 100644 --- a/services/lakeformation/pom.xml +++ b/services/lakeformation/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT lakeformation AWS Java SDK :: Services :: LakeFormation diff --git a/services/lakeformation/src/main/resources/codegen-resources/customization.config b/services/lakeformation/src/main/resources/codegen-resources/customization.config index 2880fc39d3a3..cdf857bdc287 100644 --- a/services/lakeformation/src/main/resources/codegen-resources/customization.config +++ b/services/lakeformation/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/lambda/pom.xml b/services/lambda/pom.xml index 1e2e7539e382..64b5a21a517e 100644 --- a/services/lambda/pom.xml +++ b/services/lambda/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT lambda AWS Java SDK :: Services :: AWS Lambda diff --git a/services/lambda/src/main/resources/codegen-resources/customization.config b/services/lambda/src/main/resources/codegen-resources/customization.config index d9800630318d..952021ded000 100644 --- a/services/lambda/src/main/resources/codegen-resources/customization.config +++ b/services/lambda/src/main/resources/codegen-resources/customization.config @@ -8,6 +8,5 @@ "deprecatedOperations": [ "InvokeAsync" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/lambda/src/main/resources/codegen-resources/service-2.json b/services/lambda/src/main/resources/codegen-resources/service-2.json index 8b5f3b7ff326..a3ab7528ed28 100644 --- a/services/lambda/src/main/resources/codegen-resources/service-2.json +++ b/services/lambda/src/main/resources/codegen-resources/service-2.json @@ -101,7 +101,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

        Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function.

        For details about how to configure different event sources, see the following topics.

        The following error handling options are available only for DynamoDB and Kinesis event sources:

        • BisectBatchOnFunctionError – If the function returns an error, split the batch in two and retry.

        • MaximumRecordAgeInSeconds – Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires

        • MaximumRetryAttempts – Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

        • ParallelizationFactor – Process multiple batches from each shard concurrently.

        For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:

        • DestinationConfig – Send discarded records to an Amazon SQS queue, Amazon SNS topic, or Amazon S3 bucket.

        For information about which configuration parameters apply to each event source, see the following topics.

        " + "documentation":"

        Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function.

        For details about how to configure different event sources, see the following topics.

        The following error handling options are available only for DynamoDB and Kinesis event sources:

        • BisectBatchOnFunctionError – If the function returns an error, split the batch in two and retry.

        • MaximumRecordAgeInSeconds – Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires

        • MaximumRetryAttempts – Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

        • ParallelizationFactor – Process multiple batches from each shard concurrently.

        For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:

        • OnFailure – Send discarded records to an Amazon SQS queue, Amazon SNS topic, or Amazon S3 bucket. For more information, see Adding a destination.

        For information about which configuration parameters apply to each event source, see the following topics.

        " }, "CreateFunction":{ "name":"CreateFunction", @@ -1166,7 +1166,7 @@ {"shape":"ResourceConflictException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

        Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.

        For details about how to configure different event sources, see the following topics.

        The following error handling options are available only for DynamoDB and Kinesis event sources:

        • BisectBatchOnFunctionError – If the function returns an error, split the batch in two and retry.

        • MaximumRecordAgeInSeconds – Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires

        • MaximumRetryAttempts – Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

        • ParallelizationFactor – Process multiple batches from each shard concurrently.

        For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:

        • DestinationConfig – Send discarded records to an Amazon SQS queue, Amazon SNS topic, or Amazon S3 bucket.

        For information about which configuration parameters apply to each event source, see the following topics.

        " + "documentation":"

        Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.

        For details about how to configure different event sources, see the following topics.

        The following error handling options are available only for DynamoDB and Kinesis event sources:

        • BisectBatchOnFunctionError – If the function returns an error, split the batch in two and retry.

        • MaximumRecordAgeInSeconds – Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires

        • MaximumRetryAttempts – Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

        • ParallelizationFactor – Process multiple batches from each shard concurrently.

        For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:

        • OnFailure – Send discarded records to an Amazon SQS queue, Amazon SNS topic, or Amazon S3 bucket. For more information, see Adding a destination.

        For information about which configuration parameters apply to each event source, see the following topics.

        " }, "UpdateFunctionCode":{ "name":"UpdateFunctionCode", @@ -1511,6 +1511,10 @@ "ConsumerGroupId":{ "shape":"URI", "documentation":"

        The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see Customizable consumer group ID.

        " + }, + "SchemaRegistryConfig":{ + "shape":"KafkaSchemaRegistryConfig", + "documentation":"

        Specific configuration settings for a Kafka schema registry.

        " } }, "documentation":"

        Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.

        " @@ -2080,7 +2084,7 @@ "documentation":"

        The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.

        " } }, - "documentation":"

        The dead-letter queue for failed asynchronous invocations.

        " + "documentation":"

        The dead-letter queue for failed asynchronous invocations.

        " }, "DeleteAliasRequest":{ "type":"structure", @@ -2117,8 +2121,7 @@ }, "DeleteCodeSigningConfigResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteEventSourceMappingRequest":{ "type":"structure", @@ -2268,14 +2271,14 @@ "members":{ "OnSuccess":{ "shape":"OnSuccess", - "documentation":"

        The destination configuration for successful invocations.

        " + "documentation":"

        The destination configuration for successful invocations. Not supported in CreateEventSourceMapping or UpdateEventSourceMapping.

        " }, "OnFailure":{ "shape":"OnFailure", "documentation":"

        The destination configuration for failed invocations.

        " } }, - "documentation":"

        A configuration object that specifies the destination of an event after Lambda processes it.

        " + "documentation":"

        A configuration object that specifies the destination of an event after Lambda processes it. For more information, see Adding a destination.

        " }, "DocumentDBEventSourceConfig":{ "type":"structure", @@ -2520,7 +2523,7 @@ }, "LastProcessingResult":{ "shape":"String", - "documentation":"

        The result of the last Lambda invocation of your function.

        " + "documentation":"

        The result of the event source mapping's last processing attempt.

        " }, "State":{ "shape":"String", @@ -3064,8 +3067,7 @@ }, "GetAccountSettingsRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetAccountSettingsResponse":{ "type":"structure", @@ -3992,6 +3994,75 @@ "error":{"httpStatusCode":502}, "exception":true }, + "KafkaSchemaRegistryAccessConfig":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"KafkaSchemaRegistryAuthType", + "documentation":"

        The type of authentication Lambda uses to access your schema registry.

        " + }, + "URI":{ + "shape":"Arn", + "documentation":"

        The URI of the secret (Secrets Manager secret ARN) to authenticate with your schema registry.

        " + } + }, + "documentation":"

        Specific access configuration settings that tell Lambda how to authenticate with your schema registry.

        If you're working with an Glue schema registry, don't provide authentication details in this object. Instead, ensure that your execution role has the required permissions for Lambda to access your cluster.

        If you're working with a Confluent schema registry, choose the authentication method in the Type field, and provide the Secrets Manager secret ARN in the URI field.

        " + }, + "KafkaSchemaRegistryAccessConfigList":{ + "type":"list", + "member":{"shape":"KafkaSchemaRegistryAccessConfig"} + }, + "KafkaSchemaRegistryAuthType":{ + "type":"string", + "enum":[ + "BASIC_AUTH", + "CLIENT_CERTIFICATE_TLS_AUTH", + "SERVER_ROOT_CA_CERTIFICATE" + ] + }, + "KafkaSchemaRegistryConfig":{ + "type":"structure", + "members":{ + "SchemaRegistryURI":{ + "shape":"SchemaRegistryUri", + "documentation":"

        The URI for your schema registry. The correct URI format depends on the type of schema registry you're using.

        • For Glue schema registries, use the ARN of the registry.

        • For Confluent schema registries, use the URL of the registry.

        " + }, + "EventRecordFormat":{ + "shape":"SchemaRegistryEventRecordFormat", + "documentation":"

        The record format that Lambda delivers to your function after schema validation.

        • Choose JSON to have Lambda deliver the record to your function as a standard JSON object.

        • Choose SOURCE to have Lambda deliver the record to your function in its original source format. Lambda removes all schema metadata, such as the schema ID, before sending the record to your function.

        " + }, + "AccessConfigs":{ + "shape":"KafkaSchemaRegistryAccessConfigList", + "documentation":"

        An array of access configuration objects that tell Lambda how to authenticate with your schema registry.

        " + }, + "SchemaValidationConfigs":{ + "shape":"KafkaSchemaValidationConfigList", + "documentation":"

        An array of schema validation configuration objects, which tell Lambda the message attributes you want to validate and filter using your schema registry.

        " + } + }, + "documentation":"

        Specific configuration settings for a Kafka schema registry.

        " + }, + "KafkaSchemaValidationAttribute":{ + "type":"string", + "enum":[ + "KEY", + "VALUE" + ] + }, + "KafkaSchemaValidationConfig":{ + "type":"structure", + "members":{ + "Attribute":{ + "shape":"KafkaSchemaValidationAttribute", + "documentation":"

        The attributes you want your schema registry to validate and filter for. If you selected JSON as the EventRecordFormat, Lambda also deserializes the selected message attributes.

        " + } + }, + "documentation":"

        Specific schema validation configuration settings that tell Lambda the message attributes you want to validate and filter using your schema registry.

        " + }, + "KafkaSchemaValidationConfigList":{ + "type":"list", + "member":{"shape":"KafkaSchemaValidationConfig"} + }, "LastUpdateStatus":{ "type":"string", "enum":[ @@ -4814,7 +4885,7 @@ "documentation":"

        The Amazon Resource Name (ARN) of the destination resource.

        To retain records of unsuccessful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination.

        To retain records of failed invocations from Kinesis, DynamoDB, self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.

        " } }, - "documentation":"

        A destination for events that failed processing.

        " + "documentation":"

        A destination for events that failed processing. For more information, see Adding a destination.

        " }, "OnSuccess":{ "type":"structure", @@ -4824,7 +4895,7 @@ "documentation":"

        The Amazon Resource Name (ARN) of the destination resource.

        " } }, - "documentation":"

        A destination for events that were processed successfully.

        To retain records of successful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.

        " + "documentation":"

        A destination for events that were processed successfully.

        To retain records of successful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.

        OnSuccess is not supported in CreateEventSourceMapping or UpdateEventSourceMapping requests.

        " }, "OrganizationId":{ "type":"string", @@ -5578,6 +5649,19 @@ }, "documentation":"

        (Amazon SQS only) The scaling configuration for the event source. To remove the configuration, pass an empty value.

        " }, + "SchemaRegistryEventRecordFormat":{ + "type":"string", + "enum":[ + "JSON", + "SOURCE" + ] + }, + "SchemaRegistryUri":{ + "type":"string", + "max":10000, + "min":1, + "pattern":"[a-zA-Z0-9-\\/*:_+=.@-]*" + }, "SecurityGroupId":{"type":"string"}, "SecurityGroupIds":{ "type":"list", @@ -5599,7 +5683,11 @@ "members":{ "ConsumerGroupId":{ "shape":"URI", - "documentation":"

        The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see Customizable consumer group ID.

        " + "documentation":"

        The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see Customizable consumer group ID.

        " + }, + "SchemaRegistryConfig":{ + "shape":"KafkaSchemaRegistryConfig", + "documentation":"

        Specific configuration settings for a Kafka schema registry.

        " } }, "documentation":"

        Specific configuration settings for a self-managed Apache Kafka event source.

        " @@ -6132,6 +6220,8 @@ "shape":"ScalingConfig", "documentation":"

        (Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.

        " }, + "AmazonManagedKafkaEventSourceConfig":{"shape":"AmazonManagedKafkaEventSourceConfig"}, + "SelfManagedKafkaEventSourceConfig":{"shape":"SelfManagedKafkaEventSourceConfig"}, "DocumentDBEventSourceConfig":{ "shape":"DocumentDBEventSourceConfig", "documentation":"

        Specific configuration settings for a DocumentDB event source.

        " diff --git a/services/launchwizard/pom.xml b/services/launchwizard/pom.xml index b891df0c774a..db2ad6f71883 100644 --- a/services/launchwizard/pom.xml +++ b/services/launchwizard/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT launchwizard AWS Java SDK :: Services :: Launch Wizard diff --git a/services/launchwizard/src/main/resources/codegen-resources/customization.config b/services/launchwizard/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/launchwizard/src/main/resources/codegen-resources/customization.config +++ b/services/launchwizard/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/lexmodelbuilding/pom.xml b/services/lexmodelbuilding/pom.xml index b696f29e92e4..1d04d6266c51 100644 --- a/services/lexmodelbuilding/pom.xml +++ b/services/lexmodelbuilding/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT lexmodelbuilding AWS Java SDK :: Services :: Amazon Lex Model Building diff --git a/services/lexmodelbuilding/src/main/resources/codegen-resources/customization.config b/services/lexmodelbuilding/src/main/resources/codegen-resources/customization.config index edfdc5a23c68..dd5c901b0edb 100644 --- a/services/lexmodelbuilding/src/main/resources/codegen-resources/customization.config +++ b/services/lexmodelbuilding/src/main/resources/codegen-resources/customization.config @@ -6,6 +6,5 @@ "getIntents", "getSlotTypes" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/lexmodelsv2/pom.xml b/services/lexmodelsv2/pom.xml index 11a6a1fcd590..b7a440772a69 100644 --- a/services/lexmodelsv2/pom.xml +++ b/services/lexmodelsv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT lexmodelsv2 AWS Java SDK :: Services :: Lex Models V2 diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/customization.config b/services/lexmodelsv2/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/lexmodelsv2/src/main/resources/codegen-resources/customization.config +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json b/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json index 871fecce8049..2b3c3e2babb6 100644 --- a/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json +++ b/services/lexmodelsv2/src/main/resources/codegen-resources/service-2.json @@ -5737,8 +5737,7 @@ }, "CreateUploadUrlRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "CreateUploadUrlResponse":{ "type":"structure", @@ -6494,8 +6493,7 @@ }, "DeleteUtterancesResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeBotAliasRequest":{ "type":"structure", @@ -7772,7 +7770,7 @@ }, "Description":{ "type":"string", - "max":200, + "max":2000, "min":0 }, "DescriptiveBotBuilderSpecification":{ @@ -10880,6 +10878,17 @@ "min":0 }, "NextToken":{"type":"string"}, + "NluImprovementSpecification":{ + "type":"structure", + "required":["enabled"], + "members":{ + "enabled":{ + "shape":"Enabled", + "documentation":"

        Specifies whether the assisted nlu feature is enabled.

        " + } + }, + "documentation":"

        Specifies whether the assisted nlu feature is turned on or off.

        " + }, "NonEmptyString":{ "type":"string", "min":1 @@ -11501,6 +11510,10 @@ "slotResolutionImprovement":{ "shape":"SlotResolutionImprovementSpecification", "documentation":"

        An object containing specifications for the assisted slot resolution feature.

        " + }, + "nluImprovement":{ + "shape":"NluImprovementSpecification", + "documentation":"

        An object containing specifications for the assisted nlu feature.

        " } }, "documentation":"

        Contains specifications about the Amazon Lex runtime generative AI capabilities from Amazon Bedrock that you can turn on for your bot.

        " @@ -12942,8 +12955,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -13650,8 +13662,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateBotAliasRequest":{ "type":"structure", diff --git a/services/lexruntime/pom.xml b/services/lexruntime/pom.xml index bcd8d2884dc2..67916772ae11 100644 --- a/services/lexruntime/pom.xml +++ b/services/lexruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT lexruntime AWS Java SDK :: Services :: Amazon Lex Runtime diff --git a/services/lexruntime/src/main/resources/codegen-resources/customization.config b/services/lexruntime/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/lexruntime/src/main/resources/codegen-resources/customization.config +++ b/services/lexruntime/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/lexruntimev2/pom.xml b/services/lexruntimev2/pom.xml index 796c588369ad..a3e2855d04f0 100644 --- a/services/lexruntimev2/pom.xml +++ b/services/lexruntimev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT lexruntimev2 AWS Java SDK :: Services :: Lex Runtime V2 diff --git a/services/lexruntimev2/src/main/resources/codegen-resources/customization.config b/services/lexruntimev2/src/main/resources/codegen-resources/customization.config index 6a46da376f05..a10b6cbcf23a 100644 --- a/services/lexruntimev2/src/main/resources/codegen-resources/customization.config +++ b/services/lexruntimev2/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "contentType": "application/json" }, "enableGenerateCompiledEndpointRules": true, - "usePriorKnowledgeForH2": true, - "enableFastUnmarshaller": true + "usePriorKnowledgeForH2": true } diff --git a/services/licensemanager/pom.xml b/services/licensemanager/pom.xml index b832e83cef95..6c1b44af6d0a 100644 --- a/services/licensemanager/pom.xml +++ b/services/licensemanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT licensemanager AWS Java SDK :: Services :: License Manager diff --git a/services/licensemanager/src/main/resources/codegen-resources/customization.config b/services/licensemanager/src/main/resources/codegen-resources/customization.config index 843ed5e6ee63..d8c559db863a 100644 --- a/services/licensemanager/src/main/resources/codegen-resources/customization.config +++ b/services/licensemanager/src/main/resources/codegen-resources/customization.config @@ -8,6 +8,5 @@ "listLicenseConfigurations", "listResourceInventory" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/licensemanager/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/licensemanager/src/main/resources/codegen-resources/endpoint-rule-set.json index c5bb192c75f5..ae6c873707c9 100644 --- a/services/licensemanager/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/licensemanager/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/services/licensemanager/src/main/resources/codegen-resources/paginators-1.json b/services/licensemanager/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..ea142457a6a7 100644 --- a/services/licensemanager/src/main/resources/codegen-resources/paginators-1.json +++ b/services/licensemanager/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,3 @@ { - "pagination": { - } + "pagination": {} } diff --git a/services/licensemanager/src/main/resources/codegen-resources/service-2.json b/services/licensemanager/src/main/resources/codegen-resources/service-2.json index 9134561f8e70..8d05fd2c488a 100644 --- a/services/licensemanager/src/main/resources/codegen-resources/service-2.json +++ b/services/licensemanager/src/main/resources/codegen-resources/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"license-manager", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS License Manager", "serviceId":"License Manager", "signatureVersion":"v4", "targetPrefix":"AWSLicenseManager", - "uid":"license-manager-2018-08-01" + "uid":"license-manager-2018-08-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptGrant":{ @@ -776,7 +778,7 @@ {"shape":"AccessDeniedException"}, {"shape":"RateLimitExceededException"} ], - "documentation":"

        Lists the tags for the specified license configuration.

        " + "documentation":"

        Lists the tags for the specified resource. For more information about tagging support in License Manager, see the TagResource operation.

        " }, "ListTokens":{ "name":"ListTokens", @@ -847,7 +849,7 @@ {"shape":"AccessDeniedException"}, {"shape":"RateLimitExceededException"} ], - "documentation":"

        Adds the specified tags to the specified license configuration.

        " + "documentation":"

        Adds the specified tags to the specified resource. The following resources support tagging in License Manager:

        • Licenses

        • Grants

        • License configurations

        • Report generators

        " }, "UntagResource":{ "name":"UntagResource", @@ -864,7 +866,7 @@ {"shape":"AccessDeniedException"}, {"shape":"RateLimitExceededException"} ], - "documentation":"

        Removes the specified tags from the specified license configuration.

        " + "documentation":"

        Removes the specified tags from the specified resource.

        " }, "UpdateLicenseConfiguration":{ "name":"UpdateLicenseConfiguration", @@ -880,7 +882,8 @@ {"shape":"AuthorizationException"}, {"shape":"AccessDeniedException"}, {"shape":"RateLimitExceededException"}, - {"shape":"ResourceLimitExceededException"} + {"shape":"ResourceLimitExceededException"}, + {"shape":"ConflictException"} ], "documentation":"

        Modifies the attributes of an existing license configuration.

        " }, @@ -919,7 +922,8 @@ {"shape":"ServerInternalException"}, {"shape":"AuthorizationException"}, {"shape":"AccessDeniedException"}, - {"shape":"RateLimitExceededException"} + {"shape":"RateLimitExceededException"}, + {"shape":"ConflictException"} ], "documentation":"

        Adds or removes the specified license configurations for the specified Amazon Web Services resource.

        You can update the license specifications of AMIs, instances, and hosts. You cannot update the license specifications for launch templates and CloudFormation templates, as they send license configurations to the operation that creates the resource.

        " }, @@ -999,13 +1003,13 @@ "AllowedOperationList":{ "type":"list", "member":{"shape":"AllowedOperation"}, - "max":7, + "max":8, "min":1 }, "Arn":{ "type":"string", "max":2048, - "pattern":"^arn:aws(-(cn|us-gov|iso-b|iso-c|iso-d))?:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + "pattern":"^arn:aws[a-zA-Z-]*:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" }, "ArnList":{ "type":"list", @@ -1067,8 +1071,7 @@ }, "CheckInLicenseResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "CheckoutBorrowLicenseRequest":{ "type":"structure", @@ -1314,6 +1317,10 @@ "AllowedOperations":{ "shape":"AllowedOperationList", "documentation":"

        Allowed operations for the grant.

        " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

        Tags to add to the grant. For more information about tagging support in License Manager, see the TagResource operation.

        " } } }, @@ -1421,7 +1428,7 @@ }, "LicenseRules":{ "shape":"StringList", - "documentation":"

        License rules. The syntax is #name=value (for example, #allowedTenancy=EC2-DedicatedHost). The available rules vary by dimension, as follows.

        • Cores dimension: allowedTenancy | licenseAffinityToHost | maximumCores | minimumCores

        • Instances dimension: allowedTenancy | maximumCores | minimumCores | maximumSockets | minimumSockets | maximumVcpus | minimumVcpus

        • Sockets dimension: allowedTenancy | licenseAffinityToHost | maximumSockets | minimumSockets

        • vCPUs dimension: allowedTenancy | honorVcpuOptimization | maximumVcpus | minimumVcpus

        The unit for licenseAffinityToHost is days and the range is 1 to 180. The possible values for allowedTenancy are EC2-Default, EC2-DedicatedHost, and EC2-DedicatedInstance. The possible values for honorVcpuOptimization are True and False.

        " + "documentation":"

        License rules. The syntax is #name=value (for example, #allowedTenancy=EC2-DedicatedHost). The available rules vary by dimension, as follows.

        • Cores dimension: allowedTenancy | licenseAffinityToHost | maximumCores | minimumCores

        • Instances dimension: allowedTenancy | maximumVcpus | minimumVcpus

        • Sockets dimension: allowedTenancy | licenseAffinityToHost | maximumSockets | minimumSockets

        • vCPUs dimension: allowedTenancy | honorVcpuOptimization | maximumVcpus | minimumVcpus

        The unit for licenseAffinityToHost is days and the range is 1 to 180. The possible values for allowedTenancy are EC2-Default, EC2-DedicatedHost, and EC2-DedicatedInstance. The possible values for honorVcpuOptimization are True and False.

        " }, "Tags":{ "shape":"TagList", @@ -1584,6 +1591,10 @@ "ClientToken":{ "shape":"ClientToken", "documentation":"

        Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

        " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

        Tags to add to the license. For more information about tagging support in License Manager, see the TagResource operation.

        " } } }, @@ -1798,8 +1809,7 @@ }, "DeleteLicenseConfigurationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLicenseManagerReportGeneratorRequest":{ "type":"structure", @@ -1813,8 +1823,7 @@ }, "DeleteLicenseManagerReportGeneratorResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLicenseRequest":{ "type":"structure", @@ -1858,8 +1867,7 @@ }, "DeleteTokenResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DigitalSignatureMethod":{ "type":"string", @@ -2347,8 +2355,7 @@ }, "GetServiceSettingsRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetServiceSettingsResponse":{ "type":"structure", @@ -2828,6 +2835,10 @@ "UsageOperation":{ "shape":"UsageOperation", "documentation":"

        The Usage operation value that corresponds to the license type you are converting your resource from. For more information about which platforms correspond to which usage operation values see Sample data: usage operation by platform

        " + }, + "ProductCodes":{ + "shape":"ProductCodeList", + "documentation":"

        Product codes referred to in the license conversion process.

        " } }, "documentation":"

        Information about a license type conversion task.

        " @@ -3115,7 +3126,7 @@ }, "Filters":{ "shape":"Filters", - "documentation":"

        Filters to scope the results. The following filters and logical operators are supported:

        • licenseCountingType - The dimension for which licenses are counted. Possible values are vCPU | Instance | Core | Socket. Logical operators are EQUALS | NOT_EQUALS.

        • enforceLicenseCount - A Boolean value that indicates whether hard license enforcement is used. Logical operators are EQUALS | NOT_EQUALS.

        • usagelimitExceeded - A Boolean value that indicates whether the available licenses have been exceeded. Logical operators are EQUALS | NOT_EQUALS.

        " + "documentation":"

        Filters to scope the results. The following filters and logical operators are supported:

        • licenseCountingType - The dimension for which licenses are counted. Possible values are vCPU | Instance | Core | Socket.

        • enforceLicenseCount - A Boolean value that indicates whether hard license enforcement is used.

        • usagelimitExceeded - A Boolean value that indicates whether the available licenses have been exceeded.

        " } } }, @@ -3457,7 +3468,7 @@ "members":{ "ResourceArn":{ "shape":"String", - "documentation":"

        Amazon Resource Name (ARN) of the license configuration.

        " + "documentation":"

        Amazon Resource Name (ARN) of the resource.

        " } } }, @@ -3522,7 +3533,7 @@ }, "Filters":{ "shape":"Filters", - "documentation":"

        Filters to scope the results. The following filters and logical operators are supported:

        • resourceArn - The ARN of the license configuration resource. Logical operators are EQUALS | NOT_EQUALS.

        • resourceType - The resource type (EC2_INSTANCE | EC2_HOST | EC2_AMI | SYSTEMS_MANAGER_MANAGED_INSTANCE). Logical operators are EQUALS | NOT_EQUALS.

        • resourceAccount - The ID of the account that owns the resource. Logical operators are EQUALS | NOT_EQUALS.

        " + "documentation":"

        Filters to scope the results. The following filters and logical operators are supported:

        • resourceArn - The ARN of the license configuration resource.

        • resourceType - The resource type (EC2_INSTANCE | EC2_HOST | EC2_AMI | SYSTEMS_MANAGER_MANAGED_INSTANCE).

        • resourceAccount - The ID of the account that owns the resource.

        " } } }, @@ -3623,6 +3634,36 @@ "max":1, "min":1 }, + "ProductCodeId":{ + "type":"string", + "pattern":"^[A-Za-z0-9]{1,25}$" + }, + "ProductCodeList":{ + "type":"list", + "member":{"shape":"ProductCodeListItem"} + }, + "ProductCodeListItem":{ + "type":"structure", + "required":[ + "ProductCodeId", + "ProductCodeType" + ], + "members":{ + "ProductCodeId":{ + "shape":"ProductCodeId", + "documentation":"

        The product code ID

        " + }, + "ProductCodeType":{ + "shape":"ProductCodeType", + "documentation":"

        The product code type

        " + } + }, + "documentation":"

        A list item that contains a product code.

        " + }, + "ProductCodeType":{ + "type":"string", + "enum":["marketplace"] + }, "ProductInformation":{ "type":"structure", "required":[ @@ -3636,7 +3677,7 @@ }, "ProductInformationFilterList":{ "shape":"ProductInformationFilterList", - "documentation":"

        A Product information filter consists of a ProductInformationFilterComparator which is a logical operator, a ProductInformationFilterName which specifies the type of filter being declared, and a ProductInformationFilterValue that specifies the value to filter on.

        Accepted values for ProductInformationFilterName are listed here along with descriptions and valid options for ProductInformationFilterComparator.

        The following filters and are supported when the resource type is SSM_MANAGED:

        • Application Name - The name of the application. Logical operator is EQUALS.

        • Application Publisher - The publisher of the application. Logical operator is EQUALS.

        • Application Version - The version of the application. Logical operator is EQUALS.

        • Platform Name - The name of the platform. Logical operator is EQUALS.

        • Platform Type - The platform type. Logical operator is EQUALS.

        • Tag:key - The key of a tag attached to an Amazon Web Services resource you wish to exclude from automated discovery. Logical operator is NOT_EQUALS. The key for your tag must be appended to Tag: following the example: Tag:name-of-your-key. ProductInformationFilterValue is optional if you are not using values for the key.

        • AccountId - The 12-digit ID of an Amazon Web Services account you wish to exclude from automated discovery. Logical operator is NOT_EQUALS.

        • License Included - The type of license included. Logical operators are EQUALS and NOT_EQUALS. Possible values are: sql-server-enterprise | sql-server-standard | sql-server-web | windows-server-datacenter.

        The following filters and logical operators are supported when the resource type is RDS:

        • Engine Edition - The edition of the database engine. Logical operator is EQUALS. Possible values are: oracle-ee | oracle-se | oracle-se1 | oracle-se2.

        • License Pack - The license pack. Logical operator is EQUALS. Possible values are: data guard | diagnostic pack sqlt | tuning pack sqlt | ols | olap.

        " + "documentation":"

        A Product information filter consists of a ProductInformationFilterComparator which is a logical operator, a ProductInformationFilterName which specifies the type of filter being declared, and a ProductInformationFilterValue that specifies the value to filter on.

        Accepted values for ProductInformationFilterName are listed here along with descriptions and valid options for ProductInformationFilterComparator.

        The following filters and are supported when the resource type is SSM_MANAGED:

        • Application Name - The name of the application. Logical operator is EQUALS.

        • Application Publisher - The publisher of the application. Logical operator is EQUALS.

        • Application Version - The version of the application. Logical operator is EQUALS.

        • Platform Name - The name of the platform. Logical operator is EQUALS.

        • Platform Type - The platform type. Logical operator is EQUALS.

        • Tag:key - The key of a tag attached to an Amazon Web Services resource you wish to exclude from automated discovery. Logical operator is NOT_EQUALS. The key for your tag must be appended to Tag: following the example: Tag:name-of-your-key. ProductInformationFilterValue is optional if you are not using values for the key.

        • AccountId - The 12-digit ID of an Amazon Web Services account you wish to exclude from automated discovery. Logical operator is NOT_EQUALS.

        • License Included - The type of license included. Logical operators are EQUALS and NOT_EQUALS. Possible values are: sql-server-enterprise | sql-server-standard | sql-server-web | windows-server-datacenter.

        The following filters and logical operators are supported when the resource type is RDS:

        • Engine Edition - The edition of the database engine. Logical operator is EQUALS. Possible values are: oracle-ee | oracle-se | oracle-se1 | oracle-se2 | db2-se | db2-ae.

        • License Pack - The license pack. Logical operator is EQUALS. Possible values are: data guard | diagnostic pack sqlt | tuning pack sqlt | ols | olap.

        " } }, "documentation":"

        Describes product information for a license configuration.

        " @@ -3978,14 +4019,14 @@ "members":{ "Key":{ "shape":"String", - "documentation":"

        Tag key.

        " + "documentation":"

        The tag key.

        " }, "Value":{ "shape":"String", - "documentation":"

        Tag value.

        " + "documentation":"

        The tag value.

        " } }, - "documentation":"

        Details about a tag for a license configuration.

        " + "documentation":"

        Details about the tags for a resource. For more information about tagging support in License Manager, see the TagResource operation.

        " }, "TagKeyList":{ "type":"list", @@ -4004,7 +4045,7 @@ "members":{ "ResourceArn":{ "shape":"String", - "documentation":"

        Amazon Resource Name (ARN) of the license configuration.

        " + "documentation":"

        Amazon Resource Name (ARN) of the resource. The following examples provide an example ARN for each supported resource in License Manager:

        • Licenses - arn:aws:license-manager::111122223333:license:l-EXAMPLE2da7646d6861033667f20e895

        • Grants - arn:aws:license-manager::111122223333:grant:g-EXAMPLE7b19f4a0ab73679b0beb52707

        • License configurations - arn:aws:license-manager:us-east-1:111122223333:license-configuration:lic-EXAMPLE6a788d4c8acd4264ff0ecf2ed2d

        • Report generators - arn:aws:license-manager:us-east-1:111122223333:report-generator:r-EXAMPLE825b4a4f8fe5a3e0c88824e5fc6

        " }, "Tags":{ "shape":"TagList", @@ -4014,8 +4055,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TokenData":{ "type":"structure", @@ -4081,7 +4121,7 @@ "members":{ "ResourceArn":{ "shape":"String", - "documentation":"

        Amazon Resource Name (ARN) of the license configuration.

        " + "documentation":"

        Amazon Resource Name (ARN) of the resource.

        " }, "TagKeys":{ "shape":"TagKeyList", @@ -4091,8 +4131,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLicenseConfigurationRequest":{ "type":"structure", @@ -4138,8 +4177,7 @@ }, "UpdateLicenseConfigurationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLicenseManagerReportGeneratorRequest":{ "type":"structure", @@ -4184,8 +4222,7 @@ }, "UpdateLicenseManagerReportGeneratorResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateLicenseSpecificationsForResourceRequest":{ "type":"structure", @@ -4207,8 +4244,7 @@ }, "UpdateLicenseSpecificationsForResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateServiceSettingsRequest":{ "type":"structure", @@ -4233,8 +4269,7 @@ }, "UpdateServiceSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UsageOperation":{ "type":"string", diff --git a/services/licensemanagerlinuxsubscriptions/pom.xml b/services/licensemanagerlinuxsubscriptions/pom.xml index 51d2c64ba2f3..2e4a461d9d97 100644 --- a/services/licensemanagerlinuxsubscriptions/pom.xml +++ b/services/licensemanagerlinuxsubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT licensemanagerlinuxsubscriptions AWS Java SDK :: Services :: License Manager Linux Subscriptions diff --git a/services/licensemanagerlinuxsubscriptions/src/main/resources/codegen-resources/customization.config b/services/licensemanagerlinuxsubscriptions/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/licensemanagerlinuxsubscriptions/src/main/resources/codegen-resources/customization.config +++ b/services/licensemanagerlinuxsubscriptions/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/licensemanagerusersubscriptions/pom.xml b/services/licensemanagerusersubscriptions/pom.xml index 54897c773e22..0c71aa8ceda6 100644 --- a/services/licensemanagerusersubscriptions/pom.xml +++ b/services/licensemanagerusersubscriptions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT licensemanagerusersubscriptions AWS Java SDK :: Services :: License Manager User Subscriptions diff --git a/services/licensemanagerusersubscriptions/src/main/resources/codegen-resources/customization.config b/services/licensemanagerusersubscriptions/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/licensemanagerusersubscriptions/src/main/resources/codegen-resources/customization.config +++ b/services/licensemanagerusersubscriptions/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/lightsail/pom.xml b/services/lightsail/pom.xml index d8cec0f4d7c7..60a8f0908e77 100644 --- a/services/lightsail/pom.xml +++ b/services/lightsail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT lightsail AWS Java SDK :: Services :: Amazon Lightsail diff --git a/services/lightsail/src/main/resources/codegen-resources/customization.config b/services/lightsail/src/main/resources/codegen-resources/customization.config index 00fdc2652b12..229655a6c433 100644 --- a/services/lightsail/src/main/resources/codegen-resources/customization.config +++ b/services/lightsail/src/main/resources/codegen-resources/customization.config @@ -24,6 +24,5 @@ "getRelationalDatabases", "getStaticIps" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/location/pom.xml b/services/location/pom.xml index dadf62bf79aa..fb373eea5f5b 100644 --- a/services/location/pom.xml +++ b/services/location/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT location AWS Java SDK :: Services :: Location diff --git a/services/location/src/main/resources/codegen-resources/customization.config b/services/location/src/main/resources/codegen-resources/customization.config index 2880fc39d3a3..cdf857bdc287 100644 --- a/services/location/src/main/resources/codegen-resources/customization.config +++ b/services/location/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/lookoutequipment/pom.xml b/services/lookoutequipment/pom.xml index 25143597ca5b..0d2bb89c4b75 100644 --- a/services/lookoutequipment/pom.xml +++ b/services/lookoutequipment/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT lookoutequipment AWS Java SDK :: Services :: Lookout Equipment diff --git a/services/lookoutequipment/src/main/resources/codegen-resources/customization.config b/services/lookoutequipment/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/lookoutequipment/src/main/resources/codegen-resources/customization.config +++ b/services/lookoutequipment/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/lookoutmetrics/pom.xml b/services/lookoutmetrics/pom.xml index 7046c4905e35..6245eaf87d53 100644 --- a/services/lookoutmetrics/pom.xml +++ b/services/lookoutmetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT lookoutmetrics AWS Java SDK :: Services :: Lookout Metrics diff --git a/services/lookoutmetrics/src/main/resources/codegen-resources/customization.config b/services/lookoutmetrics/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/lookoutmetrics/src/main/resources/codegen-resources/customization.config +++ b/services/lookoutmetrics/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/lookoutvision/pom.xml b/services/lookoutvision/pom.xml index 5caa75ef2724..8fc4983d73de 100644 --- a/services/lookoutvision/pom.xml +++ b/services/lookoutvision/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT lookoutvision AWS Java SDK :: Services :: Lookout Vision diff --git a/services/lookoutvision/src/main/resources/codegen-resources/customization.config b/services/lookoutvision/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/lookoutvision/src/main/resources/codegen-resources/customization.config +++ b/services/lookoutvision/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/m2/pom.xml b/services/m2/pom.xml index a5cbc5b5d577..d0502288ba4c 100644 --- a/services/m2/pom.xml +++ b/services/m2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT m2 AWS Java SDK :: Services :: M2 diff --git a/services/m2/src/main/resources/codegen-resources/customization.config b/services/m2/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/m2/src/main/resources/codegen-resources/customization.config +++ b/services/m2/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/machinelearning/pom.xml b/services/machinelearning/pom.xml index c370779d2015..a75d9d23565f 100644 --- a/services/machinelearning/pom.xml +++ b/services/machinelearning/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT machinelearning AWS Java SDK :: Services :: Amazon Machine Learning diff --git a/services/machinelearning/src/main/resources/codegen-resources/customization.config b/services/machinelearning/src/main/resources/codegen-resources/customization.config index 9033eee0c7c5..4798c2627d0c 100644 --- a/services/machinelearning/src/main/resources/codegen-resources/customization.config +++ b/services/machinelearning/src/main/resources/codegen-resources/customization.config @@ -9,6 +9,5 @@ "software.amazon.awssdk.services.machinelearning.internal.PredictEndpointInterceptor", "software.amazon.awssdk.services.machinelearning.internal.RandomIdInterceptor" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/macie2/pom.xml b/services/macie2/pom.xml index 90449ea9382e..37e2c14349f2 100644 --- a/services/macie2/pom.xml +++ b/services/macie2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT macie2 AWS Java SDK :: Services :: Macie2 diff --git a/services/macie2/src/main/resources/codegen-resources/customization.config b/services/macie2/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/macie2/src/main/resources/codegen-resources/customization.config +++ b/services/macie2/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mailmanager/pom.xml b/services/mailmanager/pom.xml index c1a5972854f6..f6fbb5a10e31 100644 --- a/services/mailmanager/pom.xml +++ b/services/mailmanager/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT mailmanager AWS Java SDK :: Services :: Mail Manager diff --git a/services/mailmanager/src/main/resources/codegen-resources/customization.config b/services/mailmanager/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/mailmanager/src/main/resources/codegen-resources/customization.config +++ b/services/mailmanager/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/managedblockchain/pom.xml b/services/managedblockchain/pom.xml index 2b776dff784d..edec61967f41 100644 --- a/services/managedblockchain/pom.xml +++ b/services/managedblockchain/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT managedblockchain AWS Java SDK :: Services :: ManagedBlockchain diff --git a/services/managedblockchain/src/main/resources/codegen-resources/customization.config b/services/managedblockchain/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/managedblockchain/src/main/resources/codegen-resources/customization.config +++ b/services/managedblockchain/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/managedblockchainquery/pom.xml b/services/managedblockchainquery/pom.xml index 04f25333f165..aa1f3ff76696 100644 --- a/services/managedblockchainquery/pom.xml +++ b/services/managedblockchainquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT managedblockchainquery AWS Java SDK :: Services :: Managed Blockchain Query diff --git a/services/marketplaceagreement/pom.xml b/services/marketplaceagreement/pom.xml index 66db10d277ea..b724f17f20b1 100644 --- a/services/marketplaceagreement/pom.xml +++ b/services/marketplaceagreement/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT marketplaceagreement AWS Java SDK :: Services :: Marketplace Agreement diff --git a/services/marketplaceagreement/src/main/resources/codegen-resources/customization.config b/services/marketplaceagreement/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/marketplaceagreement/src/main/resources/codegen-resources/customization.config +++ b/services/marketplaceagreement/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/marketplacecatalog/pom.xml b/services/marketplacecatalog/pom.xml index 8d9df3752118..6fafea1ac515 100644 --- a/services/marketplacecatalog/pom.xml +++ b/services/marketplacecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT marketplacecatalog AWS Java SDK :: Services :: Marketplace Catalog diff --git a/services/marketplacecatalog/src/main/resources/codegen-resources/customization.config b/services/marketplacecatalog/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/marketplacecatalog/src/main/resources/codegen-resources/customization.config +++ b/services/marketplacecatalog/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json b/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json index c7dd96bfef1b..10e7ca80a538 100644 --- a/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json +++ b/services/marketplacecatalog/src/main/resources/codegen-resources/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"catalog.marketplace", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"AWS Marketplace Catalog", "serviceFullName":"AWS Marketplace Catalog Service", "serviceId":"Marketplace Catalog", "signatureVersion":"v4", "signingName":"aws-marketplace", - "uid":"marketplace-catalog-2018-09-17" + "uid":"marketplace-catalog-2018-09-17", + "auth":["aws.auth#sigv4"] }, "operations":{ "BatchDescribeEntities":{ @@ -200,7 +202,7 @@ {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

        Allows you to request changes for your entities. Within a single ChangeSet, you can't start the same change type against the same entity multiple times. Additionally, when a ChangeSet is running, all the entities targeted by the different changes are locked until the change set has completed (either succeeded, cancelled, or failed). If you try to start a change set containing a change against an entity that is already locked, you will receive a ResourceInUseException error.

        For example, you can't start the ChangeSet described in the example later in this topic because it contains two changes to run the same change type (AddRevisions) against the same entity (entity-id@1).

        For more information about working with change sets, see Working with change sets. For information about change types for single-AMI products, see Working with single-AMI products. Also, for more information about change types available for container-based products, see Working with container products.

        " + "documentation":"

        Allows you to request changes for your entities. Within a single ChangeSet, you can't start the same change type against the same entity multiple times. Additionally, when a ChangeSet is running, all the entities targeted by the different changes are locked until the change set has completed (either succeeded, cancelled, or failed). If you try to start a change set containing a change against an entity that is already locked, you will receive a ResourceInUseException error.

        For example, you can't start the ChangeSet described in the example later in this topic because it contains two changes to run the same change type (AddRevisions) against the same entity (entity-id@1).

        For more information about working with change sets, see Working with change sets. For information about change types for single-AMI products, see Working with single-AMI products. Also, for more information about change types available for container-based products, see Working with container products.

        To download \"DetailsDocument\" shapes, see Python and Java shapes on GitHub.

        " }, "TagResource":{ "name":"TagResource", @@ -524,7 +526,7 @@ }, "DetailsDocument":{ "shape":"JsonDocumentType", - "documentation":"

        Alternative field that accepts a JSON value instead of a string for ChangeType details. You can use either Details or DetailsDocument, but not both.

        " + "documentation":"

        Alternative field that accepts a JSON value instead of a string for ChangeType details. You can use either Details or DetailsDocument, but not both.

        To download the \"DetailsDocument\" shapes, see the Python and Java shapes on GitHub.

        " }, "ChangeName":{ "shape":"ChangeName", @@ -618,7 +620,7 @@ }, "DetailsDocument":{ "shape":"JsonDocumentType", - "documentation":"

        The JSON value of the details specific to the change type of the requested change.

        " + "documentation":"

        The JSON value of the details specific to the change type of the requested change.

        To download the \"DetailsDocument\" shapes, see the Python and Java shapes on GitHub.

        " }, "ErrorDetailList":{ "shape":"ErrorDetailList", @@ -976,8 +978,7 @@ }, "DeleteResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeChangeSetRequest":{ "type":"structure", @@ -1091,7 +1092,7 @@ }, "DetailsDocument":{ "shape":"JsonDocumentType", - "documentation":"

        The JSON value of the details specific to the entity.

        " + "documentation":"

        The JSON value of the details specific to the entity.

        To download \"DetailsDocument\" shapes, see the Python and Java shapes on GitHub.

        " } } }, @@ -1227,7 +1228,8 @@ "ResaleAuthorizationSummary":{ "shape":"ResaleAuthorizationSummary", "documentation":"

        An object that contains summary information about the Resale Authorization.

        " - } + }, + "MachineLearningProductSummary":{"shape":"MachineLearningProductSummary"} }, "documentation":"

        This object is a container for common summary information about the entity. The summary doesn't contain the whole entity structure, but it does contain information common across all entities.

        " }, @@ -1267,7 +1269,8 @@ "ResaleAuthorizationFilters":{ "shape":"ResaleAuthorizationFilters", "documentation":"

        A filter for Resale Authorizations.

        " - } + }, + "MachineLearningProductFilters":{"shape":"MachineLearningProductFilters"} }, "documentation":"

        Object containing all the filter fields per entity type.

        ", "union":true @@ -1298,7 +1301,8 @@ "ResaleAuthorizationSort":{ "shape":"ResaleAuthorizationSort", "documentation":"

        A sort for Resale Authorizations.

        " - } + }, + "MachineLearningProductSort":{"shape":"MachineLearningProductSort"} }, "documentation":"

        Object containing all the sort fields per entity type.

        ", "union":true @@ -1429,8 +1433,7 @@ }, "JsonDocumentType":{ "type":"structure", - "members":{ - }, + "members":{}, "document":true }, "ListChangeSetsMaxResultInteger":{ @@ -1565,6 +1568,169 @@ } } }, + "MachineLearningProductEntityIdFilter":{ + "type":"structure", + "members":{ + "ValueList":{ + "shape":"MachineLearningProductEntityIdFilterValueList", + "documentation":"

        A list of entity IDs to filter by. The operation returns machine learning products with entity IDs that match the values in this list.

        " + } + }, + "documentation":"

        The filter for machine learning product entity IDs.

        " + }, + "MachineLearningProductEntityIdFilterValueList":{ + "type":"list", + "member":{"shape":"MachineLearningProductEntityIdString"}, + "documentation":"

        A list of entity ID values to filter by. You can include up to 10 entity IDs in this list.

        ", + "max":10, + "min":1 + }, + "MachineLearningProductEntityIdString":{ + "type":"string", + "documentation":"

        The entity ID of a machine learning product. This string uniquely identifies the product.

        ", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9][.a-zA-Z0-9/-]+[a-zA-Z0-9]$" + }, + "MachineLearningProductFilters":{ + "type":"structure", + "members":{ + "EntityId":{ + "shape":"MachineLearningProductEntityIdFilter", + "documentation":"

        Filter machine learning products by their entity IDs.

        " + }, + "LastModifiedDate":{ + "shape":"MachineLearningProductLastModifiedDateFilter", + "documentation":"

        Filter machine learning products by their last modified date.

        " + }, + "ProductTitle":{ + "shape":"MachineLearningProductTitleFilter", + "documentation":"

        Filter machine learning products by their product titles.

        " + }, + "Visibility":{ + "shape":"MachineLearningProductVisibilityFilter", + "documentation":"

        Filter machine learning products by their visibility status.

        " + } + }, + "documentation":"

        The filters that you can use with the ListEntities operation to filter machine learning products. You can filter by EntityId, LastModifiedDate, ProductTitle, and Visibility.

        " + }, + "MachineLearningProductLastModifiedDateFilter":{ + "type":"structure", + "members":{ + "DateRange":{ + "shape":"MachineLearningProductLastModifiedDateFilterDateRange", + "documentation":"

        A date range to filter by. The operation returns machine learning products with last modified dates that fall within this range.

        " + } + }, + "documentation":"

        The filter for machine learning product last modified date.

        " + }, + "MachineLearningProductLastModifiedDateFilterDateRange":{ + "type":"structure", + "members":{ + "AfterValue":{ + "shape":"DateTimeISO8601", + "documentation":"

        The start date (inclusive) of the date range. The operation returns machine learning products with last modified dates on or after this date.

        " + }, + "BeforeValue":{ + "shape":"DateTimeISO8601", + "documentation":"

        The end date (inclusive) of the date range. The operation returns machine learning products with last modified dates on or before this date.

        " + } + }, + "documentation":"

        A date range for filtering machine learning products by their last modified date.

        " + }, + "MachineLearningProductSort":{ + "type":"structure", + "members":{ + "SortBy":{ + "shape":"MachineLearningProductSortBy", + "documentation":"

        The field to sort by. Valid values: EntityId, LastModifiedDate, ProductTitle, and Visibility.

        " + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

        The sort order. Valid values are ASC (ascending) and DESC (descending).

        " + } + }, + "documentation":"

        The sort options for machine learning products.

        " + }, + "MachineLearningProductSortBy":{ + "type":"string", + "documentation":"

        The fields that you can sort machine learning products by.

        ", + "enum":[ + "EntityId", + "LastModifiedDate", + "ProductTitle", + "Visibility" + ] + }, + "MachineLearningProductSummary":{ + "type":"structure", + "members":{ + "ProductTitle":{ + "shape":"MachineLearningProductTitleString", + "documentation":"

        The title of the machine learning product.

        " + }, + "Visibility":{ + "shape":"MachineLearningProductVisibilityString", + "documentation":"

        The visibility status of the machine learning product. Valid values are Limited, Public, Restricted, and Draft.

        " + } + }, + "documentation":"

        A summary of a machine learning product.

        " + }, + "MachineLearningProductTitleFilter":{ + "type":"structure", + "members":{ + "ValueList":{ + "shape":"MachineLearningProductTitleFilterValueList", + "documentation":"

        A list of product titles to filter by. The operation returns machine learning products with titles that exactly match the values in this list.

        " + }, + "WildCardValue":{ + "shape":"MachineLearningProductTitleString", + "documentation":"

        A wildcard value to filter product titles. The operation returns machine learning products with titles that match this wildcard pattern.

        " + } + }, + "documentation":"

        The filter for machine learning product titles.

        " + }, + "MachineLearningProductTitleFilterValueList":{ + "type":"list", + "member":{"shape":"MachineLearningProductTitleString"}, + "documentation":"

        A list of product title values to filter by. You can include up to 10 product titles in this list.

        ", + "max":10, + "min":1 + }, + "MachineLearningProductTitleString":{ + "type":"string", + "documentation":"

        The title of a machine learning product.

        ", + "max":255, + "min":1, + "pattern":"^(.)+$" + }, + "MachineLearningProductVisibilityFilter":{ + "type":"structure", + "members":{ + "ValueList":{ + "shape":"MachineLearningProductVisibilityFilterValueList", + "documentation":"

        A list of visibility values to filter by. The operation returns machine learning products with visibility status that match the values in this list.

        " + } + }, + "documentation":"

        The filter for machine learning product visibility status.

        " + }, + "MachineLearningProductVisibilityFilterValueList":{ + "type":"list", + "member":{"shape":"MachineLearningProductVisibilityString"}, + "documentation":"

        A list of visibility status values to filter by. You can include up to 10 visibility status values in this list.

        ", + "max":10, + "min":1 + }, + "MachineLearningProductVisibilityString":{ + "type":"string", + "documentation":"

        The visibility status of a machine learning product. Valid values are:

        • Limited - The product is available to a limited set of buyers.

        • Public - The product is publicly available to all buyers.

        • Restricted - The product has restricted availability.

        • Draft - The product is in draft state and not yet available to buyers.

        ", + "enum":[ + "Limited", + "Public", + "Restricted", + "Draft" + ] + }, "NextToken":{ "type":"string", "max":2048, @@ -1956,8 +2122,7 @@ }, "PutResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "RequestedChangeList":{ "type":"list", @@ -2836,8 +3001,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -2874,8 +3038,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ValidationException":{ "type":"structure", @@ -2900,5 +3063,5 @@ "pattern":"^[a-zA-Z]+$" } }, - "documentation":"

        Catalog API actions allow you to manage your entities through list, describe, and update capabilities. An entity can be a product or an offer on AWS Marketplace.

        You can automate your entity update process by integrating the AWS Marketplace Catalog API with your AWS Marketplace product build or deployment pipelines. You can also create your own applications on top of the Catalog API to manage your products on AWS Marketplace.

        " + "documentation":"

        Catalog API actions allow you to manage your entities through list, describe, and update capabilities. An entity can be a product or an offer on AWS Marketplace.

        You can automate your entity update process by integrating the AWS Marketplace Catalog API with your AWS Marketplace product build or deployment pipelines. You can also create your own applications on top of the Catalog API to manage your products on AWS Marketplace.

        " } diff --git a/services/marketplacecommerceanalytics/pom.xml b/services/marketplacecommerceanalytics/pom.xml index 1018409dfd80..a9b089341e7f 100644 --- a/services/marketplacecommerceanalytics/pom.xml +++ b/services/marketplacecommerceanalytics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT marketplacecommerceanalytics AWS Java SDK :: Services :: AWS Marketplace Commerce Analytics diff --git a/services/marketplacecommerceanalytics/src/main/resources/codegen-resources/customization.config b/services/marketplacecommerceanalytics/src/main/resources/codegen-resources/customization.config index 87ce1af89b19..af7a1033611e 100644 --- a/services/marketplacecommerceanalytics/src/main/resources/codegen-resources/customization.config +++ b/services/marketplacecommerceanalytics/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "renameShapes": { "MarketplaceCommerceAnalyticsException": "MarketplaceCommerceAnalyticsServiceException" }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/marketplacedeployment/pom.xml b/services/marketplacedeployment/pom.xml index f90b4ecd0ecf..0c6e9078d8da 100644 --- a/services/marketplacedeployment/pom.xml +++ b/services/marketplacedeployment/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT marketplacedeployment AWS Java SDK :: Services :: Marketplace Deployment diff --git a/services/marketplacedeployment/src/main/resources/codegen-resources/customization.config b/services/marketplacedeployment/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/marketplacedeployment/src/main/resources/codegen-resources/customization.config +++ b/services/marketplacedeployment/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/marketplaceentitlement/pom.xml b/services/marketplaceentitlement/pom.xml index 4080a9252144..fe8864622fa8 100644 --- a/services/marketplaceentitlement/pom.xml +++ b/services/marketplaceentitlement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT marketplaceentitlement AWS Java SDK :: Services :: AWS Marketplace Entitlement diff --git a/services/marketplaceentitlement/src/main/resources/codegen-resources/customization.config b/services/marketplaceentitlement/src/main/resources/codegen-resources/customization.config index f94b86c040fe..6d810faea5bc 100644 --- a/services/marketplaceentitlement/src/main/resources/codegen-resources/customization.config +++ b/services/marketplaceentitlement/src/main/resources/codegen-resources/customization.config @@ -4,6 +4,5 @@ "union": true } }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/marketplacemetering/pom.xml b/services/marketplacemetering/pom.xml index b932941a93c0..257c36e099c3 100644 --- a/services/marketplacemetering/pom.xml +++ b/services/marketplacemetering/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT marketplacemetering AWS Java SDK :: Services :: AWS Marketplace Metering Service diff --git a/services/marketplacemetering/src/main/resources/codegen-resources/customization.config b/services/marketplacemetering/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/marketplacemetering/src/main/resources/codegen-resources/customization.config +++ b/services/marketplacemetering/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/marketplacereporting/pom.xml b/services/marketplacereporting/pom.xml index 1c2052969374..a176a7869665 100644 --- a/services/marketplacereporting/pom.xml +++ b/services/marketplacereporting/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT marketplacereporting AWS Java SDK :: Services :: Marketplace Reporting diff --git a/services/marketplacereporting/src/main/resources/codegen-resources/customization.config b/services/marketplacereporting/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/marketplacereporting/src/main/resources/codegen-resources/customization.config +++ b/services/marketplacereporting/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/mediaconnect/pom.xml b/services/mediaconnect/pom.xml index 62b36f7ceba3..ea04912a2b19 100644 --- a/services/mediaconnect/pom.xml +++ b/services/mediaconnect/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT mediaconnect AWS Java SDK :: Services :: MediaConnect diff --git a/services/mediaconnect/src/main/resources/codegen-resources/customization.config b/services/mediaconnect/src/main/resources/codegen-resources/customization.config index a9ab9ada4bb2..cf0eeb89dc03 100644 --- a/services/mediaconnect/src/main/resources/codegen-resources/customization.config +++ b/services/mediaconnect/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "listEntitlements", "listFlows" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mediaconnect/src/main/resources/codegen-resources/service-2.json b/services/mediaconnect/src/main/resources/codegen-resources/service-2.json index 26b20431cfbf..3dec52ad2919 100644 --- a/services/mediaconnect/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconnect/src/main/resources/codegen-resources/service-2.json @@ -26,8 +26,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -46,8 +46,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -65,8 +65,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -85,8 +85,8 @@ {"shape":"BadRequestException"}, {"shape":"AddFlowOutputs420Exception"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -104,8 +104,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -123,8 +123,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -144,8 +144,8 @@ {"shape":"CreateBridge420Exception"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"ServiceUnavailableException"} ], "documentation":"

        Creates a new bridge. The request must include one source.

        " @@ -162,8 +162,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"ServiceUnavailableException"}, {"shape":"CreateFlow420Exception"} ], @@ -182,9 +182,9 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, - {"shape":"ForbiddenException"}, {"shape":"CreateGateway420Exception"}, + {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"ServiceUnavailableException"} ], "documentation":"

        Creates a new gateway. The request must include at least one network (up to four).

        " @@ -202,8 +202,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -222,8 +222,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -243,8 +243,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -264,8 +264,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -285,8 +285,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -304,8 +304,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -323,8 +323,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -342,8 +342,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -362,8 +362,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -382,8 +382,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -438,8 +438,8 @@ {"shape":"BadRequestException"}, {"shape":"GrantFlowEntitlements420Exception"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -595,8 +595,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -615,8 +615,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -636,8 +636,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -656,8 +656,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -676,8 +676,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -696,8 +696,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -716,8 +716,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -736,8 +736,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -756,8 +756,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -775,8 +775,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -826,8 +826,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -847,8 +847,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -868,8 +868,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -889,8 +889,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -909,8 +909,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -929,8 +929,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -949,8 +949,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -969,8 +969,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -989,8 +989,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -1010,8 +1010,8 @@ {"shape":"BadRequestException"}, {"shape":"TooManyRequestsException"}, {"shape":"ConflictException"}, - {"shape":"InternalServerErrorException"}, {"shape":"ForbiddenException"}, + {"shape":"InternalServerErrorException"}, {"shape":"NotFoundException"}, {"shape":"ServiceUnavailableException"} ], @@ -2913,7 +2913,7 @@ "members":{ "ChannelOrder":{ "shape":"String", - "documentation":"

        The format of the audio channel.

        ", + "documentation":"

        The format of the audio channel.

        ", "locationName":"channelOrder" }, "Colorimetry":{ @@ -4379,6 +4379,11 @@ "shape":"OutputStatus", "documentation":"

        An indication of whether the output is transmitting data or not.

        ", "locationName":"outputStatus" + }, + "PeerIpAddress":{ + "shape":"String", + "documentation":"

        The IP address of the device that is currently receiving content from this output.

        • For outputs that use protocols where you specify the destination (such as SRT Caller or Zixi Push), this value matches the configured destination address.

        • For outputs that use listener protocols (such as SRT Listener), this value shows the address of the connected receiver.

        • Peer IP addresses aren't available for entitlements, managed MediaLive outputs, NDI outputs, and CDI/ST2110 outputs.

        • The peer IP address might not be visible for flows that haven't been started yet, or flows that were started before May 2025. In these cases, restart your flow to see the peer IP address.

        ", + "locationName":"peerIpAddress" } }, "documentation":"

        The settings for an output.

        " @@ -5115,6 +5120,11 @@ "shape":"GatewayBridgeSource", "documentation":"

        The source configuration for cloud flows receiving a stream from a bridge.

        ", "locationName":"gatewayBridgeSource" + }, + "PeerIpAddress":{ + "shape":"String", + "documentation":"

        The IP address of the device that is currently sending content to this source.

        • For sources that use protocols where you specify the origin (such as SRT Caller), this value matches the configured origin address.

        • For sources that use listener protocols (such as SRT Listener or RTP), this value shows the address of the connected sender.

        • Peer IP addresses aren't available for entitlements and CDI/ST2110 sources.

        • The peer IP address might not be visible for flows that haven't been started yet, or flows that were started before May 2025. In these cases, restart your flow to see the peer IP address.

        ", + "locationName":"peerIpAddress" } }, "documentation":"

        The settings for the source of the flow.

        " diff --git a/services/mediaconvert/pom.xml b/services/mediaconvert/pom.xml index 10d2e473ba20..48291cd91c6f 100644 --- a/services/mediaconvert/pom.xml +++ b/services/mediaconvert/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 mediaconvert diff --git a/services/mediaconvert/src/main/resources/codegen-resources/customization.config b/services/mediaconvert/src/main/resources/codegen-resources/customization.config index 4797aaf0e76e..da22aaad9103 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/customization.config +++ b/services/mediaconvert/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "excludedSimpleMethods": [ "*" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json index d71ee0f6d1e8..fee2ca9a381b 100644 --- a/services/mediaconvert/src/main/resources/codegen-resources/service-2.json +++ b/services/mediaconvert/src/main/resources/codegen-resources/service-2.json @@ -1724,8 +1724,7 @@ }, "AssociateCertificateResponse": { "type": "structure", - "members": { - } + "members": {} }, "AudioChannelTag": { "type": "string", @@ -2116,7 +2115,7 @@ "SelectorType": { "shape": "AudioSelectorType", "locationName": "selectorType", - "documentation": "Specifies the type of the audio selector." + "documentation": "Specify how MediaConvert selects audio content within your input. The default is Track. PID: Select audio by specifying the Packet Identifier (PID) values for MPEG Transport Stream inputs. Use this when you know the exact PID values of your audio streams. Track: Default. Select audio by track number. This is the most common option and works with most input container formats. Language code: Select audio by language using ISO 639-2 or ISO 639-3 three-letter language codes. Use this when your source has embedded language metadata and you want to select tracks based on their language. HLS rendition group: Select audio from an HLS rendition group. Use this when your input is an HLS package with multiple audio renditions and you want to select specific rendition groups. All PCM: Select all uncompressed PCM audio tracks from your input automatically. This is useful when you want to include all PCM audio tracks without specifying individual track numbers." }, "Tracks": { "shape": "__listOf__integerMin1Max2147483647", @@ -2139,7 +2138,7 @@ }, "AudioSelectorType": { "type": "string", - "documentation": "Specifies the type of the audio selector.", + "documentation": "Specify how MediaConvert selects audio content within your input. The default is Track. PID: Select audio by specifying the Packet Identifier (PID) values for MPEG Transport Stream inputs. Use this when you know the exact PID values of your audio streams. Track: Default. Select audio by track number. This is the most common option and works with most input container formats. Language code: Select audio by language using ISO 639-2 or ISO 639-3 three-letter language codes. Use this when your source has embedded language metadata and you want to select tracks based on their language. HLS rendition group: Select audio from an HLS rendition group. Use this when your input is an HLS package with multiple audio renditions and you want to select specific rendition groups. All PCM: Select all uncompressed PCM audio tracks from your input automatically. This is useful when you want to include all PCM audio tracks without specifying individual track numbers.", "enum": [ "PID", "TRACK", @@ -2354,7 +2353,7 @@ "PerFrameMetrics": { "shape": "__listOfFrameMetricType", "locationName": "perFrameMetrics", - "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the video, video codec, and metric type. For example: video_h264_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." + "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the output name and metric type. For example: videofile_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." }, "QvbrSettings": { "shape": "Av1QvbrSettings", @@ -2486,7 +2485,7 @@ "PerFrameMetrics": { "shape": "__listOfFrameMetricType", "locationName": "perFrameMetrics", - "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the video, video codec, and metric type. For example: video_h264_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." + "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the output name and metric type. For example: videofile_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." }, "ScanTypeConversionMode": { "shape": "AvcIntraScanTypeConversionMode", @@ -2846,8 +2845,7 @@ }, "CancelJobResponse": { "type": "structure", - "members": { - } + "members": {} }, "CaptionDescription": { "type": "structure", @@ -3099,6 +3097,14 @@ "WEBVTT" ] }, + "CaptionSourceUpconvertSTLToTeletext": { + "type": "string", + "documentation": "Specify whether this set of input captions appears in your outputs in both STL and Teletext format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the STL data through using the Teletext compatibility bytes fields of the Teletext wrapper, and it also translates the STL data into Teletext.", + "enum": [ + "UPCONVERT", + "DISABLED" + ] + }, "ChannelMapping": { "type": "structure", "members": { @@ -4639,18 +4645,15 @@ }, "DeleteJobTemplateResponse": { "type": "structure", - "members": { - } + "members": {} }, "DeletePolicyRequest": { "type": "structure", - "members": { - } + "members": {} }, "DeletePolicyResponse": { "type": "structure", - "members": { - } + "members": {} }, "DeletePresetRequest": { "type": "structure", @@ -4668,8 +4671,7 @@ }, "DeletePresetResponse": { "type": "structure", - "members": { - } + "members": {} }, "DeleteQueueRequest": { "type": "structure", @@ -4687,8 +4689,7 @@ }, "DeleteQueueResponse": { "type": "structure", - "members": { - } + "members": {} }, "DescribeEndpointsMode": { "type": "string", @@ -4766,8 +4767,7 @@ }, "DisassociateCertificateResponse": { "type": "structure", - "members": { - } + "members": {} }, "DolbyVision": { "type": "structure", @@ -4917,7 +4917,7 @@ "DdsHandling": { "shape": "DvbddsHandling", "locationName": "ddsHandling", - "documentation": "Specify how MediaConvert handles the display definition segment (DDS). To exclude the DDS from this set of captions: Keep the default, None. To include the DDS: Choose Specified. When you do, also specify the offset coordinates of the display window with DDS x-coordinate and DDS y-coordinate. To include the DDS, but not include display window data: Choose No display window. When you do, you can write position metadata to the page composition segment (PCS) with DDS x-coordinate and DDS y-coordinate. For video resolutions with a height of 576 pixels or less, MediaConvert doesn't include the DDS, regardless of the value you choose for DDS handling. All burn-in and DVB-Sub font settings must match." + "documentation": "Specify how MediaConvert handles the display definition segment (DDS). To exclude the DDS from this set of captions: Keep the default, None. To include the DDS: Choose Specified. When you do, also specify the offset coordinates of the display window with DDS x-coordinate and DDS y-coordinate. To include the DDS, but not include display window data: Choose No display window. When you do, you can write position metadata to the page composition segment (PCS) with DDS x-coordinate and DDS y-coordinate. For video resolutions with a height of 576 pixels or less, MediaConvert doesn't include the DDS, regardless of the value you choose for DDS handling. All burn-in and DVB-Sub font settings must match. To include the DDS, with optimized subtitle placement and reduced data overhead: We recommend that you choose Specified (optimal). This option provides the same visual positioning as Specified while using less bandwidth. This also supports resolutions higher than 1080p while maintaining full DVB-Sub compatibility. When you do, also specify the offset coordinates of the display window with DDS x-coordinate and DDS y-coordinate." }, "DdsXCoordinate": { "shape": "__integerMin0Max2147483647", @@ -5176,11 +5176,12 @@ }, "DvbddsHandling": { "type": "string", - "documentation": "Specify how MediaConvert handles the display definition segment (DDS). To exclude the DDS from this set of captions: Keep the default, None. To include the DDS: Choose Specified. When you do, also specify the offset coordinates of the display window with DDS x-coordinate and DDS y-coordinate. To include the DDS, but not include display window data: Choose No display window. When you do, you can write position metadata to the page composition segment (PCS) with DDS x-coordinate and DDS y-coordinate. For video resolutions with a height of 576 pixels or less, MediaConvert doesn't include the DDS, regardless of the value you choose for DDS handling. All burn-in and DVB-Sub font settings must match.", + "documentation": "Specify how MediaConvert handles the display definition segment (DDS). To exclude the DDS from this set of captions: Keep the default, None. To include the DDS: Choose Specified. When you do, also specify the offset coordinates of the display window with DDS x-coordinate and DDS y-coordinate. To include the DDS, but not include display window data: Choose No display window. When you do, you can write position metadata to the page composition segment (PCS) with DDS x-coordinate and DDS y-coordinate. For video resolutions with a height of 576 pixels or less, MediaConvert doesn't include the DDS, regardless of the value you choose for DDS handling. All burn-in and DVB-Sub font settings must match. To include the DDS, with optimized subtitle placement and reduced data overhead: We recommend that you choose Specified (optimal). This option provides the same visual positioning as Specified while using less bandwidth. This also supports resolutions higher than 1080p while maintaining full DVB-Sub compatibility. When you do, also specify the offset coordinates of the display window with DDS x-coordinate and DDS y-coordinate.", "enum": [ "NONE", "SPECIFIED", - "NO_DISPLAY_WINDOW" + "NO_DISPLAY_WINDOW", + "SPECIFIED_OPTIMAL" ] }, "DynamicAudioSelector": { @@ -5888,6 +5889,11 @@ "shape": "FileSourceTimeDeltaUnits", "locationName": "timeDeltaUnits", "documentation": "When you use the setting Time delta to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units, MediaConvert uses seconds by default." + }, + "UpconvertSTLToTeletext": { + "shape": "CaptionSourceUpconvertSTLToTeletext", + "locationName": "upconvertSTLToTeletext", + "documentation": "Specify whether this set of input captions appears in your outputs in both STL and Teletext format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the STL data through using the Teletext compatibility bytes fields of the Teletext wrapper, and it also translates the STL data into Teletext." } }, "documentation": "If your input captions are SCC, SMI, SRT, STL, TTML, WebVTT, or IMSC 1.1 in an xml file, specify the URI of the input caption source file. If your caption source is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings." @@ -6073,8 +6079,7 @@ }, "GetPolicyRequest": { "type": "structure", - "members": { - } + "members": {} }, "GetPolicyResponse": { "type": "structure", @@ -6542,7 +6547,7 @@ "PerFrameMetrics": { "shape": "__listOfFrameMetricType", "locationName": "perFrameMetrics", - "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the video, video codec, and metric type. For example: video_h264_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." + "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the output name and metric type. For example: videofile_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." }, "QualityTuningLevel": { "shape": "H264QualityTuningLevel", @@ -7041,7 +7046,7 @@ "PerFrameMetrics": { "shape": "__listOfFrameMetricType", "locationName": "perFrameMetrics", - "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the video, video codec, and metric type. For example: video_h264_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." + "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the output name and metric type. For example: videofile_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." }, "QualityTuningLevel": { "shape": "H265QualityTuningLevel", @@ -8636,7 +8641,7 @@ "FollowSource": { "shape": "__integerMin1Max150", "locationName": "followSource", - "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." + "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." }, "Inputs": { "shape": "__listOfInput", @@ -8807,7 +8812,7 @@ "FollowSource": { "shape": "__integerMin1Max150", "locationName": "followSource", - "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." + "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." }, "Inputs": { "shape": "__listOfInputTemplate", @@ -10198,6 +10203,14 @@ }, "documentation": "Required when you set Codec, under AudioDescriptions>CodecSettings, to the value MP3." }, + "Mp4C2paManifest": { + "type": "string", + "documentation": "When enabled, a C2PA compliant manifest will be generated, signed and embeded in the output. For more information on C2PA, see https://c2pa.org/specifications/specifications/2.1/index.html", + "enum": [ + "INCLUDE", + "EXCLUDE" + ] + }, "Mp4CslgAtom": { "type": "string", "documentation": "When enabled, file composition times will start at zero, composition times in the 'ctts' (composition time to sample) box for B-frames will be negative, and a 'cslg' (composition shift least greatest) box will be included per 14496-1 amendment 1. This improves compatibility with Apple players and tools.", @@ -10230,6 +10243,16 @@ "locationName": "audioDuration", "documentation": "Specify this setting only when your output will be consumed by a downstream repackaging workflow that is sensitive to very small duration differences between video and audio. For this situation, choose Match video duration. In all other cases, keep the default value, Default codec duration. When you choose Match video duration, MediaConvert pads the output audio streams with silence or trims them to ensure that the total duration of each audio stream is at least as long as the total duration of the video stream. After padding or trimming, the audio stream duration is no more than one frame longer than the video stream. MediaConvert applies audio padding or trimming only to the end of the last segment of the output. For unsegmented outputs, MediaConvert adds padding only to the end of the file. When you keep the default value, any minor discrepancies between audio and video duration will depend on your output audio codec." }, + "C2paManifest": { + "shape": "Mp4C2paManifest", + "locationName": "c2paManifest", + "documentation": "When enabled, a C2PA compliant manifest will be generated, signed and embeded in the output. For more information on C2PA, see https://c2pa.org/specifications/specifications/2.1/index.html" + }, + "CertificateSecret": { + "shape": "__stringMin1Max2048PatternArnAZSecretsmanagerWD12SecretAZAZ09", + "locationName": "certificateSecret", + "documentation": "Specify the name or ARN of the AWS Secrets Manager secret that contains your C2PA public certificate chain in PEM format. Provide a valid secret name or ARN. Note that your MediaConvert service role must allow access to this secret. The public certificate chain is added to the COSE header (x5chain) for signature validation. Include the signer's certificate and all intermediate certificates. Do not include the root certificate. For details on COSE, see: https://opensource.contentauthenticity.org/docs/manifest/signing-manifests" + }, "CslgAtom": { "shape": "Mp4CslgAtom", "locationName": "cslgAtom", @@ -10254,6 +10277,11 @@ "shape": "__string", "locationName": "mp4MajorBrand", "documentation": "Overrides the \"Major Brand\" field in the output file. Usually not necessary to specify." + }, + "SigningKmsKey": { + "shape": "__stringMin1PatternArnAwsUsGovCnKmsAZ26EastWestCentralNorthSouthEastWest1912D12KeyAFAF098AFAF094AFAF094AFAF094AFAF0912MrkAFAF0932", + "locationName": "signingKmsKey", + "documentation": "Specify the ID or ARN of the AWS KMS key used to sign the C2PA manifest in your MP4 output. Provide a valid KMS key ARN. Note that your MediaConvert service role must allow access to this key." } }, "documentation": "These settings relate to your MP4 output container. You can create audio only outputs with this container. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/supported-codecs-containers-audio-only.html#output-codecs-and-containers-supported-for-audio-only." @@ -10637,7 +10665,7 @@ "PerFrameMetrics": { "shape": "__listOfFrameMetricType", "locationName": "perFrameMetrics", - "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the video, video codec, and metric type. For example: video_h264_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." + "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the output name and metric type. For example: videofile_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." }, "QualityTuningLevel": { "shape": "Mpeg2QualityTuningLevel", @@ -11337,7 +11365,7 @@ "PerFrameMetrics": { "shape": "__listOfFrameMetricType", "locationName": "perFrameMetrics", - "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the video, video codec, and metric type. For example: video_h264_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." + "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the output name and metric type. For example: videofile_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." }, "Type": { "shape": "OutputGroupType", @@ -11712,7 +11740,7 @@ "PerFrameMetrics": { "shape": "__listOfFrameMetricType", "locationName": "perFrameMetrics", - "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the video, video codec, and metric type. For example: video_h264_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." + "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the output name and metric type. For example: videofile_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." }, "ScanTypeConversionMode": { "shape": "ProresScanTypeConversionMode", @@ -12438,8 +12466,7 @@ }, "TagResourceResponse": { "type": "structure", - "members": { - } + "members": {} }, "TeletextDestinationSettings": { "type": "structure", @@ -12884,8 +12911,7 @@ }, "UntagResourceResponse": { "type": "structure", - "members": { - } + "members": {} }, "UpdateJobTemplateRequest": { "type": "structure", @@ -13358,6 +13384,11 @@ "VideoOverlay": { "type": "structure", "members": { + "Crop": { + "shape": "VideoOverlayCrop", + "locationName": "crop", + "documentation": "Specify a rectangle of content to crop and use from your video overlay's input video. When you do, MediaConvert uses the cropped dimensions that you specify under X offset, Y offset, Width, and Height." + }, "EndTimecode": { "shape": "__stringPattern010920405090509092", "locationName": "endTimecode", @@ -13391,6 +13422,37 @@ }, "documentation": "Overlay one or more videos on top of your input video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/video-overlays.html" }, + "VideoOverlayCrop": { + "type": "structure", + "members": { + "Height": { + "shape": "__integerMin0Max2147483647", + "locationName": "height", + "documentation": "Specify the height of the video overlay cropping rectangle. To use the same height as your overlay input video: Keep blank, or enter 0. To specify a different height for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels high. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 108 pixels high." + }, + "Unit": { + "shape": "VideoOverlayUnit", + "locationName": "unit", + "documentation": "Specify the Unit type to use when you enter a value for X position, Y position, Width, or Height. You can choose Pixels or Percentage. Leave blank to use the default value, Pixels." + }, + "Width": { + "shape": "__integerMin0Max2147483647", + "locationName": "width", + "documentation": "Specify the width of the video overlay cropping rectangle. To use the same width as your overlay input video: Keep blank, or enter 0. To specify a different width for the cropping rectangle: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 100 and choose Pixels, the cropping rectangle will 100 pixels wide. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be 192 pixels wide." + }, + "X": { + "shape": "__integerMin0Max2147483647", + "locationName": "x", + "documentation": "Specify the distance between the cropping rectangle and the left edge of your overlay video's frame. To position the cropping rectangle along the left edge: Keep blank, or enter 0. To position the cropping rectangle to the right, relative to the left edge of your overlay video's frame: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 10 and choose Pixels, the cropping rectangle will be positioned 10 pixels from the left edge of the overlay video's frame. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be positioned 192 pixels from the left edge of the overlay video's frame." + }, + "Y": { + "shape": "__integerMin0Max2147483647", + "locationName": "y", + "documentation": "Specify the distance between the cropping rectangle and the top edge of your overlay video's frame. To position the cropping rectangle along the top edge: Keep blank, or enter 0. To position the cropping rectangle down, relative to the top edge of your overlay video's frame: Enter an integer representing the Unit type that you choose, either Pixels or Percentage. For example, when you enter 10 and choose Pixels, the cropping rectangle will be positioned 10 pixels from the top edge of the overlay video's frame. When you enter 10, choose Percentage, and your overlay input video is 1920x1080, the cropping rectangle will be positioned 108 pixels from the top edge of the overlay video's frame." + } + }, + "documentation": "Specify a rectangle of content to crop and use from your video overlay's input video. When you do, MediaConvert uses the cropped dimensions that you specify under X offset, Y offset, Width, and Height." + }, "VideoOverlayInput": { "type": "structure", "members": { @@ -14351,7 +14413,7 @@ "PerFrameMetrics": { "shape": "__listOfFrameMetricType", "locationName": "perFrameMetrics", - "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the video, video codec, and metric type. For example: video_h264_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." + "documentation": "Optionally choose one or more per frame metric reports to generate along with your output. You can use these metrics to analyze your video output according to one or more commonly used image quality metrics. You can specify per frame metrics for output groups or for individual outputs. When you do, MediaConvert writes a CSV (Comma-Separated Values) file to your S3 output destination, named after the output name and metric type. For example: videofile_PSNR.csv Jobs that generate per frame metrics will take longer to complete, depending on the resolution and complexity of your output. For example, some 4K jobs might take up to twice as long to complete. Note that when analyzing the video quality of your output, or when comparing the video quality of multiple different outputs, we generally also recommend a detailed visual review in a controlled environment. You can choose from the following per frame metrics: * PSNR: Peak Signal-to-Noise Ratio * SSIM: Structural Similarity Index Measure * MS_SSIM: Multi-Scale Similarity Index Measure * PSNR_HVS: Peak Signal-to-Noise Ratio, Human Visual System * VMAF: Video Multi-Method Assessment Fusion * QVBR: Quality-Defined Variable Bitrate. This option is only available when your output uses the QVBR rate control mode." }, "Profile": { "shape": "XavcProfile", @@ -15447,6 +15509,11 @@ "max": 50, "pattern": "^[a-zA-Z0-9_\\/_+=.@-]*$" }, + "__stringMin1PatternArnAwsUsGovCnKmsAZ26EastWestCentralNorthSouthEastWest1912D12KeyAFAF098AFAF094AFAF094AFAF094AFAF0912MrkAFAF0932": { + "type": "string", + "min": 1, + "pattern": "^(arn:aws(-us-gov|-cn)?:kms:[a-z-]{2,6}-(east|west|central|((north|south)(east|west)?))-[1-9]{1,2}:\\d{12}:key/)?[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}|mrk-[a-fA-F0-9]{32}$" + }, "__stringMin24Max512PatternAZaZ0902": { "type": "string", "min": 24, diff --git a/services/medialive/pom.xml b/services/medialive/pom.xml index b5fa5d494120..a4f4e8807f45 100644 --- a/services/medialive/pom.xml +++ b/services/medialive/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 medialive diff --git a/services/medialive/src/main/resources/codegen-resources/customization.config b/services/medialive/src/main/resources/codegen-resources/customization.config index 0e60128f3a1d..34826c5c0209 100644 --- a/services/medialive/src/main/resources/codegen-resources/customization.config +++ b/services/medialive/src/main/resources/codegen-resources/customization.config @@ -11,6 +11,5 @@ "createInput", "createInputSecurityGroup" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/medialive/src/main/resources/codegen-resources/service-2.json b/services/medialive/src/main/resources/codegen-resources/service-2.json index 49940d0c24c1..70fd86c4fc9f 100644 --- a/services/medialive/src/main/resources/codegen-resources/service-2.json +++ b/services/medialive/src/main/resources/codegen-resources/service-2.json @@ -16157,6 +16157,11 @@ "shape": "__listOfSrtOutputDestinationSettings", "locationName": "srtSettings", "documentation": "SRT settings for an SRT output; one destination for each redundant encoder." + }, + "LogicalInterfaceNames": { + "shape": "__listOf__string", + "locationName": "logicalInterfaceNames", + "documentation": "Optional assignment of an output to a logical interface on the Node. Only applies to on premises channels." } }, "documentation": "Placeholder documentation for OutputDestination" @@ -25821,6 +25826,16 @@ "shape": "TimecodeBurninSettings", "locationName": "timecodeBurninSettings", "documentation": "Configures the timecode burn-in feature. If you enable this feature, the timecode will become part of the video." + }, + "Bitrate": { + "shape": "__integerMin50000Max8000000", + "locationName": "bitrate", + "documentation": "Average bitrate in bits/second. Required when the rate control mode is CBR. Not used for QVBR." + }, + "RateControlMode": { + "shape": "Av1RateControlMode", + "locationName": "rateControlMode", + "documentation": "Rate control mode.\n\nQVBR: Quality will match the specified quality level except when it is constrained by the\nmaximum bitrate. Recommended if you or your viewers pay for bandwidth.\n\nCBR: Quality varies, depending on the video complexity. Recommended only if you distribute\nyour assets to devices that cannot handle variable bitrates." } }, "documentation": "Av1 Settings", @@ -29254,6 +29269,14 @@ "shape": "CmafIngestCaptionLanguageMapping" }, "documentation": "Placeholder documentation for __listOfCmafIngestCaptionLanguageMapping" + }, + "Av1RateControlMode": { + "type": "string", + "documentation": "Av1 Rate Control Mode", + "enum": [ + "CBR", + "QVBR" + ] } }, "documentation": "API for AWS Elemental MediaLive" diff --git a/services/mediapackage/pom.xml b/services/mediapackage/pom.xml index e09e7fe24d3b..d30ecdb7a06f 100644 --- a/services/mediapackage/pom.xml +++ b/services/mediapackage/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 mediapackage diff --git a/services/mediapackage/src/main/resources/codegen-resources/customization.config b/services/mediapackage/src/main/resources/codegen-resources/customization.config index 6d07f8ab26f7..37a289229927 100644 --- a/services/mediapackage/src/main/resources/codegen-resources/customization.config +++ b/services/mediapackage/src/main/resources/codegen-resources/customization.config @@ -11,6 +11,5 @@ "__AdTriggersElement": "AdTriggersElement", "__PeriodTriggersElement": "PeriodTriggersElement" }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mediapackagev2/pom.xml b/services/mediapackagev2/pom.xml index 13962ace1b92..c766d288f891 100644 --- a/services/mediapackagev2/pom.xml +++ b/services/mediapackagev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT mediapackagev2 AWS Java SDK :: Services :: Media Package V2 diff --git a/services/mediapackagev2/src/main/resources/codegen-resources/customization.config b/services/mediapackagev2/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/mediapackagev2/src/main/resources/codegen-resources/customization.config +++ b/services/mediapackagev2/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mediapackagev2/src/main/resources/codegen-resources/service-2.json b/services/mediapackagev2/src/main/resources/codegen-resources/service-2.json index f82591d7989c..e4137b29261b 100644 --- a/services/mediapackagev2/src/main/resources/codegen-resources/service-2.json +++ b/services/mediapackagev2/src/main/resources/codegen-resources/service-2.json @@ -986,6 +986,30 @@ "UtcTiming":{ "shape":"DashUtcTiming", "documentation":"

        Determines the type of UTC timing included in the DASH Media Presentation Description (MPD).

        " + }, + "Profiles":{ + "shape":"DashProfiles", + "documentation":"

        The profile that the output is compliant with.

        " + }, + "BaseUrls":{ + "shape":"DashBaseUrls", + "documentation":"

        The base URLs to use for retrieving segments.

        " + }, + "ProgramInformation":{ + "shape":"DashProgramInformation", + "documentation":"

        Details about the content that you want MediaPackage to pass through in the manifest to the playback device.

        " + }, + "DvbSettings":{ + "shape":"DashDvbSettings", + "documentation":"

        For endpoints that use the DVB-DASH profile only. The font download and error reporting information that you want MediaPackage to pass through to the manifest.

        " + }, + "Compactness":{ + "shape":"DashCompactness", + "documentation":"

        The layout of the DASH manifest that MediaPackage produces. STANDARD indicates a default manifest, which is compacted. NONE indicates a full manifest.

        For information about compactness, see DASH manifest compactness in the Elemental MediaPackage v2 User Guide.

        " + }, + "SubtitleConfiguration":{ + "shape":"DashSubtitleConfiguration", + "documentation":"

        The configuration for DASH subtitles.

        " } }, "documentation":"

        Create a DASH manifest configuration.

        " @@ -1404,6 +1428,64 @@ } } }, + "DashBaseUrl":{ + "type":"structure", + "required":["Url"], + "members":{ + "Url":{ + "shape":"DashBaseUrlUrlString", + "documentation":"

        A source location for segments.

        " + }, + "ServiceLocation":{ + "shape":"DashBaseUrlServiceLocationString", + "documentation":"

        The name of the source location.

        " + }, + "DvbPriority":{ + "shape":"DashBaseUrlDvbPriorityInteger", + "documentation":"

        For use with DVB-DASH profiles only. The priority of this location for servings segments. The lower the number, the higher the priority.

        " + }, + "DvbWeight":{ + "shape":"DashBaseUrlDvbWeightInteger", + "documentation":"

        For use with DVB-DASH profiles only. The weighting for source locations that have the same priority.

        " + } + }, + "documentation":"

        The base URLs to use for retrieving segments. You can specify multiple locations and indicate the priority and weight for when each should be used, for use in mutli-CDN workflows.

        " + }, + "DashBaseUrlDvbPriorityInteger":{ + "type":"integer", + "box":true, + "max":15000, + "min":1 + }, + "DashBaseUrlDvbWeightInteger":{ + "type":"integer", + "box":true, + "max":15000, + "min":1 + }, + "DashBaseUrlServiceLocationString":{ + "type":"string", + "max":2048, + "min":1 + }, + "DashBaseUrlUrlString":{ + "type":"string", + "max":2048, + "min":1 + }, + "DashBaseUrls":{ + "type":"list", + "member":{"shape":"DashBaseUrl"}, + "max":20, + "min":0 + }, + "DashCompactness":{ + "type":"string", + "enum":[ + "STANDARD", + "NONE" + ] + }, "DashDrmSignaling":{ "type":"string", "enum":[ @@ -1411,6 +1493,86 @@ "REFERENCED" ] }, + "DashDvbErrorMetrics":{ + "type":"list", + "member":{"shape":"DashDvbMetricsReporting"}, + "max":20, + "min":0 + }, + "DashDvbFontDownload":{ + "type":"structure", + "members":{ + "Url":{ + "shape":"DashDvbFontDownloadUrlString", + "documentation":"

        The URL for downloading fonts for subtitles.

        " + }, + "MimeType":{ + "shape":"DashDvbFontDownloadMimeTypeString", + "documentation":"

        The mimeType of the resource that's at the font download URL.

        For information about font MIME types, see the MPEG-DASH Profile for Transport of ISO BMFF Based DVB Services over IP Based Networks document.

        " + }, + "FontFamily":{ + "shape":"DashDvbFontDownloadFontFamilyString", + "documentation":"

        The fontFamily name for subtitles, as described in EBU-TT-D Subtitling Distribution Format.

        " + } + }, + "documentation":"

        For use with DVB-DASH profiles only. The settings for font downloads that you want Elemental MediaPackage to pass through to the manifest.

        " + }, + "DashDvbFontDownloadFontFamilyString":{ + "type":"string", + "max":256, + "min":1 + }, + "DashDvbFontDownloadMimeTypeString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_/-]*[a-zA-Z0-9]" + }, + "DashDvbFontDownloadUrlString":{ + "type":"string", + "max":2048, + "min":1 + }, + "DashDvbMetricsReporting":{ + "type":"structure", + "required":["ReportingUrl"], + "members":{ + "ReportingUrl":{ + "shape":"DashDvbMetricsReportingReportingUrlString", + "documentation":"

        The URL where playback devices send error reports.

        " + }, + "Probability":{ + "shape":"DashDvbMetricsReportingProbabilityInteger", + "documentation":"

        The number of playback devices per 1000 that will send error reports to the reporting URL. This represents the probability that a playback device will be a reporting player for this session.

        " + } + }, + "documentation":"

        For use with DVB-DASH profiles only. The settings for error reporting from the playback device that you want Elemental MediaPackage to pass through to the manifest.

        " + }, + "DashDvbMetricsReportingProbabilityInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "DashDvbMetricsReportingReportingUrlString":{ + "type":"string", + "max":2048, + "min":1 + }, + "DashDvbSettings":{ + "type":"structure", + "members":{ + "FontDownload":{ + "shape":"DashDvbFontDownload", + "documentation":"

        Subtitle font settings.

        " + }, + "ErrorMetrics":{ + "shape":"DashDvbErrorMetrics", + "documentation":"

        Playback device error reporting settings.

        " + } + }, + "documentation":"

        For endpoints that use the DVB-DASH profile only. The font download and error reporting information that you want MediaPackage to pass through to the manifest.

        " + }, "DashPeriodTrigger":{ "type":"string", "enum":[ @@ -1427,10 +1589,100 @@ "max":100, "min":0 }, + "DashProfile":{ + "type":"string", + "enum":["DVB_DASH"] + }, + "DashProfiles":{ + "type":"list", + "member":{"shape":"DashProfile"}, + "max":5, + "min":0 + }, + "DashProgramInformation":{ + "type":"structure", + "members":{ + "Title":{ + "shape":"DashProgramInformationTitleString", + "documentation":"

        The title for the manifest.

        " + }, + "Source":{ + "shape":"DashProgramInformationSourceString", + "documentation":"

        Information about the content provider.

        " + }, + "Copyright":{ + "shape":"DashProgramInformationCopyrightString", + "documentation":"

        A copyright statement about the content.

        " + }, + "LanguageCode":{ + "shape":"DashProgramInformationLanguageCodeString", + "documentation":"

        The language code for this manifest.

        " + }, + "MoreInformationUrl":{ + "shape":"DashProgramInformationMoreInformationUrlString", + "documentation":"

        An absolute URL that contains more information about this content.

        " + } + }, + "documentation":"

        Details about the content that you want MediaPackage to pass through in the manifest to the playback device.

        " + }, + "DashProgramInformationCopyrightString":{ + "type":"string", + "max":2048, + "min":1 + }, + "DashProgramInformationLanguageCodeString":{ + "type":"string", + "max":5, + "min":2, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*[a-zA-Z0-9]" + }, + "DashProgramInformationMoreInformationUrlString":{ + "type":"string", + "max":2048, + "min":1 + }, + "DashProgramInformationSourceString":{ + "type":"string", + "max":2048, + "min":1 + }, + "DashProgramInformationTitleString":{ + "type":"string", + "max":2048, + "min":1 + }, "DashSegmentTemplateFormat":{ "type":"string", "enum":["NUMBER_WITH_TIMELINE"] }, + "DashSubtitleConfiguration":{ + "type":"structure", + "members":{ + "TtmlConfiguration":{ + "shape":"DashTtmlConfiguration", + "documentation":"

        Settings for TTML subtitles.

        " + } + }, + "documentation":"

        The configuration for DASH subtitles.

        " + }, + "DashTtmlConfiguration":{ + "type":"structure", + "required":["TtmlProfile"], + "members":{ + "TtmlProfile":{ + "shape":"DashTtmlProfile", + "documentation":"

        The profile that MediaPackage uses when signaling subtitles in the manifest. IMSC is the default profile. EBU-TT-D produces subtitles that are compliant with the EBU-TT-D TTML profile. MediaPackage passes through subtitle styles to the manifest. For more information about EBU-TT-D subtitles, see EBU-TT-D Subtitling Distribution Format.

        " + } + }, + "documentation":"

        The settings for TTML subtitles.

        " + }, + "DashTtmlProfile":{ + "type":"string", + "enum":[ + "IMSC_1", + "EBU_TT_D_101" + ] + }, "DashUtcTiming":{ "type":"structure", "members":{ @@ -1988,6 +2240,30 @@ "UtcTiming":{ "shape":"DashUtcTiming", "documentation":"

        Determines the type of UTC timing included in the DASH Media Presentation Description (MPD).

        " + }, + "Profiles":{ + "shape":"DashProfiles", + "documentation":"

        The profile that the output is compliant with.

        " + }, + "BaseUrls":{ + "shape":"DashBaseUrls", + "documentation":"

        The base URL to use for retrieving segments.

        " + }, + "ProgramInformation":{ + "shape":"DashProgramInformation", + "documentation":"

        Details about the content that you want MediaPackage to pass through in the manifest to the playback device.

        " + }, + "DvbSettings":{ + "shape":"DashDvbSettings", + "documentation":"

        For endpoints that use the DVB-DASH profile only. The font download and error reporting information that you want MediaPackage to pass through to the manifest.

        " + }, + "Compactness":{ + "shape":"DashCompactness", + "documentation":"

        The layout of the DASH manifest that MediaPackage produces. STANDARD indicates a default manifest, which is compacted. NONE indicates a full manifest.

        " + }, + "SubtitleConfiguration":{ + "shape":"DashSubtitleConfiguration", + "documentation":"

        The configuration for DASH subtitles.

        " } }, "documentation":"

        Retrieve the DASH manifest configuration.

        " @@ -3855,7 +4131,11 @@ "CLIP_START_TIME_WITH_START_OR_END", "START_TAG_TIME_OFFSET_INVALID", "ONLY_CMAF_INPUT_TYPE_ALLOW_MQCS_INPUT_SWITCHING", - "ONLY_CMAF_INPUT_TYPE_ALLOW_MQCS_OUTPUT_CONFIGURATION" + "ONLY_CMAF_INPUT_TYPE_ALLOW_MQCS_OUTPUT_CONFIGURATION", + "INCOMPATIBLE_DASH_PROFILE_DVB_DASH_CONFIGURATION", + "DASH_DVB_ATTRIBUTES_WITHOUT_DVB_DASH_PROFILE", + "INCOMPATIBLE_DASH_COMPACTNESS_CONFIGURATION", + "INCOMPATIBLE_XML_ENCODING" ] } }, diff --git a/services/mediapackagevod/pom.xml b/services/mediapackagevod/pom.xml index 0a3ba59e6f58..bc7db09d2d15 100644 --- a/services/mediapackagevod/pom.xml +++ b/services/mediapackagevod/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT mediapackagevod AWS Java SDK :: Services :: MediaPackage Vod diff --git a/services/mediapackagevod/src/main/resources/codegen-resources/customization.config b/services/mediapackagevod/src/main/resources/codegen-resources/customization.config index d8f0a1d4ff6d..3a8d27a41544 100644 --- a/services/mediapackagevod/src/main/resources/codegen-resources/customization.config +++ b/services/mediapackagevod/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "renameShapes": { // Do not keep adding to this list. Require the service team to name enums like they're naming their shapes. "__PeriodTriggersElement": "PeriodTriggersElement" - }, - "enableFastUnmarshaller": true + } } diff --git a/services/mediastore/pom.xml b/services/mediastore/pom.xml index e130fc070d11..107d625b6e8b 100644 --- a/services/mediastore/pom.xml +++ b/services/mediastore/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 mediastore diff --git a/services/mediastore/src/main/resources/codegen-resources/customization.config b/services/mediastore/src/main/resources/codegen-resources/customization.config index f6f3894942e5..1d165fcb810b 100644 --- a/services/mediastore/src/main/resources/codegen-resources/customization.config +++ b/services/mediastore/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ "excludedSimpleMethods": [ "describeContainer" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mediastoredata/pom.xml b/services/mediastoredata/pom.xml index 8c0e908addc0..b56f22e7d9fa 100644 --- a/services/mediastoredata/pom.xml +++ b/services/mediastoredata/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 mediastoredata diff --git a/services/mediastoredata/src/main/resources/codegen-resources/customization.config b/services/mediastoredata/src/main/resources/codegen-resources/customization.config index bc969d689a10..32a48c48174d 100644 --- a/services/mediastoredata/src/main/resources/codegen-resources/customization.config +++ b/services/mediastoredata/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "enableGenerateCompiledEndpointRules": true, "excludedSimpleMethods" : [ "listItems" - ], - "enableFastUnmarshaller": true + ] } diff --git a/services/mediatailor/pom.xml b/services/mediatailor/pom.xml index e4f950e74c8b..e0b28bff3b60 100644 --- a/services/mediatailor/pom.xml +++ b/services/mediatailor/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT mediatailor AWS Java SDK :: Services :: MediaTailor diff --git a/services/mediatailor/src/main/resources/codegen-resources/customization.config b/services/mediatailor/src/main/resources/codegen-resources/customization.config index 5e82632a800f..df53e3257979 100644 --- a/services/mediatailor/src/main/resources/codegen-resources/customization.config +++ b/services/mediatailor/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listPlaybackConfigurations" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mediatailor/src/main/resources/codegen-resources/service-2.json b/services/mediatailor/src/main/resources/codegen-resources/service-2.json index 7e24af915234..70fb48f34491 100644 --- a/services/mediatailor/src/main/resources/codegen-resources/service-2.json +++ b/services/mediatailor/src/main/resources/codegen-resources/service-2.json @@ -1626,8 +1626,7 @@ }, "DeleteChannelPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteChannelRequest":{ "type":"structure", @@ -1643,8 +1642,7 @@ }, "DeleteChannelResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLiveSourceRequest":{ "type":"structure", @@ -1669,8 +1667,7 @@ }, "DeleteLiveSourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeletePlaybackConfigurationRequest":{ "type":"structure", @@ -1686,8 +1683,7 @@ }, "DeletePlaybackConfigurationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeletePrefetchScheduleRequest":{ "type":"structure", @@ -1712,8 +1708,7 @@ }, "DeletePrefetchScheduleResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteProgramRequest":{ "type":"structure", @@ -1738,8 +1733,7 @@ }, "DeleteProgramResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteSourceLocationRequest":{ "type":"structure", @@ -1755,8 +1749,7 @@ }, "DeleteSourceLocationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteVodSourceRequest":{ "type":"structure", @@ -1781,8 +1774,7 @@ }, "DeleteVodSourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeChannelRequest":{ "type":"structure", @@ -2756,7 +2748,10 @@ }, "LogConfiguration":{ "type":"structure", - "required":["PercentEnabled"], + "required":[ + "PercentEnabled", + "EnabledLoggingStrategies" + ], "members":{ "PercentEnabled":{ "shape":"__integer", @@ -3110,8 +3105,7 @@ }, "PutChannelPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "PutPlaybackConfigurationRequest":{ "type":"structure", @@ -3683,8 +3677,7 @@ }, "StartChannelResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "StopChannelRequest":{ "type":"structure", @@ -3700,8 +3693,7 @@ }, "StopChannelResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "StreamingMediaFileConditioning":{ "type":"string", diff --git a/services/medicalimaging/pom.xml b/services/medicalimaging/pom.xml index 3a589bb9ba35..cbb9499b3462 100644 --- a/services/medicalimaging/pom.xml +++ b/services/medicalimaging/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT medicalimaging AWS Java SDK :: Services :: Medical Imaging diff --git a/services/medicalimaging/src/main/resources/codegen-resources/customization.config b/services/medicalimaging/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/medicalimaging/src/main/resources/codegen-resources/customization.config +++ b/services/medicalimaging/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/medicalimaging/src/main/resources/codegen-resources/service-2.json b/services/medicalimaging/src/main/resources/codegen-resources/service-2.json index 4f4168da52ef..4769b935ed7f 100644 --- a/services/medicalimaging/src/main/resources/codegen-resources/service-2.json +++ b/services/medicalimaging/src/main/resources/codegen-resources/service-2.json @@ -517,9 +517,15 @@ }, "force":{ "shape":"Boolean", - "documentation":"

        Setting this flag will force the CopyImageSet operation, even if Patient, Study, or Series level metadata are mismatched across the sourceImageSet and destinationImageSet.

        ", + "documentation":"

        Providing this parameter will force completion of the CopyImageSet operation, even if there are inconsistent Patient, Study, and/or Series level metadata elements between the sourceImageSet and destinationImageSet.

        ", "location":"querystring", "locationName":"force" + }, + "promoteToPrimary":{ + "shape":"Boolean", + "documentation":"

        Providing this parameter will configure the CopyImageSet operation to promote the given image set to the primary DICOM hierarchy. If successful, a new primary image set ID will be returned as the destination image set.

        ", + "location":"querystring", + "locationName":"promoteToPrimary" } }, "payload":"copyImageSetInformation" @@ -1210,7 +1216,7 @@ }, "contentType":{ "shape":"String", - "documentation":"

        The format in which the image frame information is returned to the customer. Default is application/octet-stream.

        ", + "documentation":"

        The format in which the image frame information is returned to the customer. Default is application/octet-stream.

        • If the stored transfer syntax is 1.2.840.10008.1.2.1, the returned contentType is application/octet-stream.

        • If the stored transfer syntax is 1.2.840.10008.1.2.4.50, the returned contentType is image/jpeg.

        • If the stored transfer syntax is 1.2.840.10008.1.2.4.91, the returned contentType is image/j2c.

        • If the stored transfer syntax is MPEG2, 1.2.840.10008.1.2.4.100, 1.2.840.10008.1.2.4.100.1, 1.2.840.10008.1.2.4.101, or 1.2.840.10008.1.2.4.101.1, the returned contentType is video/mpeg.

        • If the stored transfer syntax is MPEG-4 AVC/H.264, UID 1.2.840.10008.1.2.4.102, 1.2.840.10008.1.2.4.102.1, 1.2.840.10008.1.2.4.103, 1.2.840.10008.1.2.4.103.1, 1.2.840.10008.1.2.4.104, 1.2.840.10008.1.2.4.104.1, 1.2.840.10008.1.2.4.105, 1.2.840.10008.1.2.4.105.1, 1.2.840.10008.1.2.4.106, or 1.2.840.10008.1.2.4.106.1, the returned contentType is video/mp4.

        • If the stored transfer syntax is HEVC/H.265, UID 1.2.840.10008.1.2.4.107 or 1.2.840.10008.1.2.4.108, the returned contentType is video/H256.

        • If the stored transfer syntax is 1.2.840.10008.1.2.4.202 or if the stored transfer syntax is missing, the returned contentType is image/jph.

        • If the stored transfer syntax is 1.2.840.10008.1.2.4.203, the returned contentType is image/jphc.

        ", "location":"header", "locationName":"Content-Type" } @@ -1346,6 +1352,10 @@ "overrides":{ "shape":"Overrides", "documentation":"

        This object contains the details of any overrides used while creating a specific image set version. If an image set was copied or updated using the force flag, this object will contain the forced flag.

        " + }, + "isPrimary":{ + "shape":"Boolean", + "documentation":"

        The flag to determine whether the image set is primary or not.

        " } } }, @@ -1419,6 +1429,10 @@ "overrides":{ "shape":"Overrides", "documentation":"

        Contains details on overrides used when creating the returned version of an image set. For example, if forced exists, the forced flag was used when creating the image set.

        " + }, + "isPrimary":{ + "shape":"Boolean", + "documentation":"

        The flag to determine whether the image set is primary or not.

        " } }, "documentation":"

        The image set properties.

        " @@ -1477,6 +1491,10 @@ "DICOMTags":{ "shape":"DICOMTags", "documentation":"

        The DICOM tags associated with the image set.

        " + }, + "isPrimary":{ + "shape":"Boolean", + "documentation":"

        The flag to determine whether the image set is primary or not.

        " } }, "documentation":"

        Summary of the image set metadata.

        " @@ -1741,7 +1759,7 @@ "members":{ "forced":{ "shape":"Boolean", - "documentation":"

        Setting this flag will force the CopyImageSet and UpdateImageSetMetadata operations, even if Patient, Study, or Series level metadata are mismatched.

        " + "documentation":"

        Providing this parameter will force completion of the CopyImageSet and UpdateImageSetMetadata actions, even if metadata is inconsistent at the Patient, Study, and/or Series levels.

        " } }, "documentation":"

        Specifies the overrides used in image set modification calls to CopyImageSet and UpdateImageSetMetadata.

        " @@ -1809,6 +1827,10 @@ "DICOMStudyDateAndTime":{ "shape":"DICOMStudyDateAndTime", "documentation":"

        The aggregated structure containing DICOM study date and study time for search.

        " + }, + "isPrimary":{ + "shape":"Boolean", + "documentation":"

        The primary image set flag provided for search.

        " } }, "documentation":"

        The search input attribute value.

        ", @@ -2067,8 +2089,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -2111,8 +2132,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateImageSetMetadataRequest":{ "type":"structure", @@ -2211,5 +2231,5 @@ "exception":true } }, - "documentation":"

        This is the AWS HealthImaging API Reference. AWS HealthImaging is a HIPAA eligible service that empowers healthcare providers, life science organizations, and their software partners to store, analyze, and share medical images in the cloud at petabyte scale. For an introduction to the service, see the AWS HealthImaging Developer Guide .

        We recommend using one of the AWS Software Development Kits (SDKs) for your programming language, as they take care of request authentication, serialization, and connection management. For more information, see Tools to build on AWS.

        The following sections list AWS HealthImaging API actions categorized according to functionality. Links are provided to actions within this Reference, along with links back to corresponding sections in the AWS HealthImaging Developer Guide where you can view tested code examples.

        Data store actions

        Import job actions

        Image set access actions

        Image set modification actions

        Tagging actions

        " + "documentation":"

        This is the AWS HealthImaging API Reference. For an introduction to the service, see What is AWS HealthImaging? in the AWS HealthImaging Developer Guide.

        " } diff --git a/services/memorydb/pom.xml b/services/memorydb/pom.xml index 3b23fdc3af03..fa01dcd0169d 100644 --- a/services/memorydb/pom.xml +++ b/services/memorydb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT memorydb AWS Java SDK :: Services :: Memory DB diff --git a/services/memorydb/src/main/resources/codegen-resources/customization.config b/services/memorydb/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/memorydb/src/main/resources/codegen-resources/customization.config +++ b/services/memorydb/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mgn/pom.xml b/services/mgn/pom.xml index e1e5c6daefae..37a08a256e79 100644 --- a/services/mgn/pom.xml +++ b/services/mgn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT mgn AWS Java SDK :: Services :: Mgn diff --git a/services/mgn/src/main/resources/codegen-resources/customization.config b/services/mgn/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/mgn/src/main/resources/codegen-resources/customization.config +++ b/services/mgn/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/migrationhub/pom.xml b/services/migrationhub/pom.xml index e5cc61b73133..267eae837dbc 100644 --- a/services/migrationhub/pom.xml +++ b/services/migrationhub/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 migrationhub diff --git a/services/migrationhub/src/main/resources/codegen-resources/customization.config b/services/migrationhub/src/main/resources/codegen-resources/customization.config index 4797aaf0e76e..da22aaad9103 100644 --- a/services/migrationhub/src/main/resources/codegen-resources/customization.config +++ b/services/migrationhub/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "excludedSimpleMethods": [ "*" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/migrationhubconfig/pom.xml b/services/migrationhubconfig/pom.xml index 2c7682bd38aa..8e969df1e6e2 100644 --- a/services/migrationhubconfig/pom.xml +++ b/services/migrationhubconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT migrationhubconfig AWS Java SDK :: Services :: MigrationHub Config diff --git a/services/migrationhubconfig/src/main/resources/codegen-resources/customization.config b/services/migrationhubconfig/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/migrationhubconfig/src/main/resources/codegen-resources/customization.config +++ b/services/migrationhubconfig/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/migrationhuborchestrator/pom.xml b/services/migrationhuborchestrator/pom.xml index d6ea26c9723c..37de33ed403e 100644 --- a/services/migrationhuborchestrator/pom.xml +++ b/services/migrationhuborchestrator/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT migrationhuborchestrator AWS Java SDK :: Services :: Migration Hub Orchestrator diff --git a/services/migrationhuborchestrator/src/main/resources/codegen-resources/customization.config b/services/migrationhuborchestrator/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/migrationhuborchestrator/src/main/resources/codegen-resources/customization.config +++ b/services/migrationhuborchestrator/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/migrationhubrefactorspaces/pom.xml b/services/migrationhubrefactorspaces/pom.xml index e4735f6749f0..9e775ad4b871 100644 --- a/services/migrationhubrefactorspaces/pom.xml +++ b/services/migrationhubrefactorspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT migrationhubrefactorspaces AWS Java SDK :: Services :: Migration Hub Refactor Spaces diff --git a/services/migrationhubrefactorspaces/src/main/resources/codegen-resources/customization.config b/services/migrationhubrefactorspaces/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/migrationhubrefactorspaces/src/main/resources/codegen-resources/customization.config +++ b/services/migrationhubrefactorspaces/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/migrationhubstrategy/pom.xml b/services/migrationhubstrategy/pom.xml index b7d27e171b1b..d1e2538760fc 100644 --- a/services/migrationhubstrategy/pom.xml +++ b/services/migrationhubstrategy/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT migrationhubstrategy AWS Java SDK :: Services :: Migration Hub Strategy diff --git a/services/migrationhubstrategy/src/main/resources/codegen-resources/customization.config b/services/migrationhubstrategy/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/migrationhubstrategy/src/main/resources/codegen-resources/customization.config +++ b/services/migrationhubstrategy/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mpa/pom.xml b/services/mpa/pom.xml new file mode 100644 index 000000000000..c7b9e4152e37 --- /dev/null +++ b/services/mpa/pom.xml @@ -0,0 +1,60 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.31.76-SNAPSHOT + + mpa + AWS Java SDK :: Services :: MPA + The AWS Java SDK for MPA module holds the client classes that are used for + communicating with MPA. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.mpa + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + software.amazon.awssdk + http-auth-aws + ${awsjavasdk.version} + + + diff --git a/services/mpa/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/mpa/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..7af7d5fc8bd9 --- /dev/null +++ b/services/mpa/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,137 @@ +{ + "version": "1.0", + "parameters": { + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://mpa-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://mpa.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/services/mpa/src/main/resources/codegen-resources/endpoint-tests.json b/services/mpa/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..afb955e9722d --- /dev/null +++ b/services/mpa/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,201 @@ +{ + "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa-fips.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa-fips.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa-fips.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mpa.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/mpa/src/main/resources/codegen-resources/paginators-1.json b/services/mpa/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..f3f8b1b29879 --- /dev/null +++ b/services/mpa/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,40 @@ +{ + "pagination": { + "ListApprovalTeams": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ApprovalTeams" + }, + "ListIdentitySources": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "IdentitySources" + }, + "ListPolicies": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Policies" + }, + "ListPolicyVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "PolicyVersions" + }, + "ListResourcePolicies": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ResourcePolicies" + }, + "ListSessions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Sessions" + } + } +} diff --git a/services/mpa/src/main/resources/codegen-resources/service-2.json b/services/mpa/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..8698e2f6796c --- /dev/null +++ b/services/mpa/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,2293 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2022-07-26", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"mpa", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"AWS Multi-party Approval", + "serviceId":"MPA", + "signatureVersion":"v4", + "signingName":"mpa", + "uid":"mpa-2022-07-26" + }, + "operations":{ + "CancelSession":{ + "name":"CancelSession", + "http":{ + "method":"PUT", + "requestUri":"/sessions/{SessionArn}", + "responseCode":200 + }, + "input":{"shape":"CancelSessionRequest"}, + "output":{"shape":"CancelSessionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Cancels an approval session. For more information, see Session in the Multi-party approval User Guide.

        ", + "idempotent":true + }, + "CreateApprovalTeam":{ + "name":"CreateApprovalTeam", + "http":{ + "method":"POST", + "requestUri":"/approval-teams", + "responseCode":200 + }, + "input":{"shape":"CreateApprovalTeamRequest"}, + "output":{"shape":"CreateApprovalTeamResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Creates a new approval team. For more information, see Approval team in the Multi-party approval User Guide.

        ", + "idempotent":true + }, + "CreateIdentitySource":{ + "name":"CreateIdentitySource", + "http":{ + "method":"POST", + "requestUri":"/identity-sources", + "responseCode":200 + }, + "input":{"shape":"CreateIdentitySourceRequest"}, + "output":{"shape":"CreateIdentitySourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Creates a new identity source. For more information, see Identity Source in the Multi-party approval User Guide.

        ", + "idempotent":true + }, + "DeleteIdentitySource":{ + "name":"DeleteIdentitySource", + "http":{ + "method":"DELETE", + "requestUri":"/identity-sources/{IdentitySourceArn}", + "responseCode":200 + }, + "input":{"shape":"DeleteIdentitySourceRequest"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Deletes an identity source. For more information, see Identity Source in the Multi-party approval User Guide.

        ", + "idempotent":true + }, + "DeleteInactiveApprovalTeamVersion":{ + "name":"DeleteInactiveApprovalTeamVersion", + "http":{ + "method":"DELETE", + "requestUri":"/approval-teams/{Arn}/{VersionId}", + "responseCode":200 + }, + "input":{"shape":"DeleteInactiveApprovalTeamVersionRequest"}, + "output":{"shape":"DeleteInactiveApprovalTeamVersionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Deletes an inactive approval team. For more information, see Team health in the Multi-party approval User Guide.

        You can also use this operation to delete a team draft. For more information, see Interacting with drafts in the Multi-party approval User Guide.

        ", + "idempotent":true + }, + "GetApprovalTeam":{ + "name":"GetApprovalTeam", + "http":{ + "method":"GET", + "requestUri":"/approval-teams/{Arn}", + "responseCode":200 + }, + "input":{"shape":"GetApprovalTeamRequest"}, + "output":{"shape":"GetApprovalTeamResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns details for an approval team.

        " + }, + "GetIdentitySource":{ + "name":"GetIdentitySource", + "http":{ + "method":"GET", + "requestUri":"/identity-sources/{IdentitySourceArn}", + "responseCode":200 + }, + "input":{"shape":"GetIdentitySourceRequest"}, + "output":{"shape":"GetIdentitySourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns details for an identity source. For more information, see Identity Source in the Multi-party approval User Guide.

        " + }, + "GetPolicyVersion":{ + "name":"GetPolicyVersion", + "http":{ + "method":"GET", + "requestUri":"/policy-versions/{PolicyVersionArn}", + "responseCode":200 + }, + "input":{"shape":"GetPolicyVersionRequest"}, + "output":{"shape":"GetPolicyVersionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns details for the version of a policy. Policies define the permissions for team resources.

        The protected operation for a service integration might require specific permissions. For more information, see How other services work with Multi-party approval in the Multi-party approval User Guide.

        " + }, + "GetResourcePolicy":{ + "name":"GetResourcePolicy", + "http":{ + "method":"POST", + "requestUri":"/GetResourcePolicy", + "responseCode":200 + }, + "input":{"shape":"GetResourcePolicyRequest"}, + "output":{"shape":"GetResourcePolicyResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Returns details about a policy for a resource.

        " + }, + "GetSession":{ + "name":"GetSession", + "http":{ + "method":"GET", + "requestUri":"/sessions/{SessionArn}", + "responseCode":200 + }, + "input":{"shape":"GetSessionRequest"}, + "output":{"shape":"GetSessionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns details for an approval session. For more information, see Session in the Multi-party approval User Guide.

        " + }, + "ListApprovalTeams":{ + "name":"ListApprovalTeams", + "http":{ + "method":"POST", + "requestUri":"/approval-teams/?List", + "responseCode":200 + }, + "input":{"shape":"ListApprovalTeamsRequest"}, + "output":{"shape":"ListApprovalTeamsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns a list of approval teams.

        " + }, + "ListIdentitySources":{ + "name":"ListIdentitySources", + "http":{ + "method":"POST", + "requestUri":"/identity-sources/?List", + "responseCode":200 + }, + "input":{"shape":"ListIdentitySourcesRequest"}, + "output":{"shape":"ListIdentitySourcesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns a list of identity sources. For more information, see Identity Source in the Multi-party approval User Guide.

        " + }, + "ListPolicies":{ + "name":"ListPolicies", + "http":{ + "method":"POST", + "requestUri":"/policies/?List", + "responseCode":200 + }, + "input":{"shape":"ListPoliciesRequest"}, + "output":{"shape":"ListPoliciesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns a list of policies. Policies define the permissions for team resources.

        The protected operation for a service integration might require specific permissions. For more information, see How other services work with Multi-party approval in the Multi-party approval User Guide.

        " + }, + "ListPolicyVersions":{ + "name":"ListPolicyVersions", + "http":{ + "method":"POST", + "requestUri":"/policies/{PolicyArn}/?List", + "responseCode":200 + }, + "input":{"shape":"ListPolicyVersionsRequest"}, + "output":{"shape":"ListPolicyVersionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns a list of the versions for policies. Policies define the permissions for team resources.

        The protected operation for a service integration might require specific permissions. For more information, see How other services work with Multi-party approval in the Multi-party approval User Guide.

        " + }, + "ListResourcePolicies":{ + "name":"ListResourcePolicies", + "http":{ + "method":"POST", + "requestUri":"/resource-policies/{ResourceArn}/?List", + "responseCode":200 + }, + "input":{"shape":"ListResourcePoliciesRequest"}, + "output":{"shape":"ListResourcePoliciesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns a list of policies for a resource.

        " + }, + "ListSessions":{ + "name":"ListSessions", + "http":{ + "method":"POST", + "requestUri":"/approval-teams/{ApprovalTeamArn}/sessions/?List", + "responseCode":200 + }, + "input":{"shape":"ListSessionsRequest"}, + "output":{"shape":"ListSessionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns a list of approval sessions. For more information, see Session in the Multi-party approval User Guide.

        " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns a list of the tags for a resource.

        " + }, + "StartActiveApprovalTeamDeletion":{ + "name":"StartActiveApprovalTeamDeletion", + "http":{ + "method":"POST", + "requestUri":"/approval-teams/{Arn}?Delete", + "responseCode":200 + }, + "input":{"shape":"StartActiveApprovalTeamDeletionRequest"}, + "output":{"shape":"StartActiveApprovalTeamDeletionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Starts the deletion process for an active approval team.

        Deletions require team approval

        Requests to delete an active team must be approved by the team.

        ", + "idempotent":true + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"PUT", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"TooManyTagsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Creates or updates a resource tag. Each tag is a label consisting of a user-defined key and value. Tags can help you manage, identify, organize, search for, and filter resources.

        ", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Removes a resource tag. Each tag is a label consisting of a user-defined key and value. Tags can help you manage, identify, organize, search for, and filter resources.

        ", + "idempotent":true + }, + "UpdateApprovalTeam":{ + "name":"UpdateApprovalTeam", + "http":{ + "method":"PATCH", + "requestUri":"/approval-teams/{Arn}", + "responseCode":200 + }, + "input":{"shape":"UpdateApprovalTeamRequest"}, + "output":{"shape":"UpdateApprovalTeamResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Updates an approval team. You can request to update the team description, approval threshold, and approvers in the team.

        Updates require team approval

        Updates to an active team must be approved by the team.

        ", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Message for the AccessDeniedException error.

        " + } + }, + "documentation":"

        You do not have sufficient access to perform this action. Check your permissions, and try again.

        ", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AccountId":{ + "type":"string", + "max":12, + "min":0, + "pattern":"\\d{12}" + }, + "ActionCompletionStrategy":{ + "type":"string", + "enum":["AUTO_COMPLETION_UPON_APPROVAL"] + }, + "ActionName":{ + "type":"string", + "max":500, + "min":0 + }, + "ApprovalStrategy":{ + "type":"structure", + "members":{ + "MofN":{ + "shape":"MofNApprovalStrategy", + "documentation":"

        Minimum number of approvals (M) required for a total number of approvers (N).

        " + } + }, + "documentation":"

        Strategy for how an approval team grants approval.

        ", + "union":true + }, + "ApprovalStrategyResponse":{ + "type":"structure", + "members":{ + "MofN":{ + "shape":"MofNApprovalStrategy", + "documentation":"

        Minimum number of approvals (M) required for a total number of approvers (N).

        " + } + }, + "documentation":"

        Contains details for how an approval team grants approval.

        ", + "union":true + }, + "ApprovalTeamArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:mpa:[a-z0-9-]{1,20}:[0-9]{12}:approval-team/[a-zA-Z0-9._-]+" + }, + "ApprovalTeamName":{ + "type":"string", + "max":64, + "min":0, + "pattern":"[a-zA-Z0-9._-]+" + }, + "ApprovalTeamRequestApprover":{ + "type":"structure", + "required":[ + "PrimaryIdentityId", + "PrimaryIdentitySourceArn" + ], + "members":{ + "PrimaryIdentityId":{ + "shape":"IdentityId", + "documentation":"

        ID for the user.

        " + }, + "PrimaryIdentitySourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the identity source. The identity source manages the user authentication for approvers.

        " + } + }, + "documentation":"

        Contains details for an approver.

        " + }, + "ApprovalTeamRequestApprovers":{ + "type":"list", + "member":{"shape":"ApprovalTeamRequestApprover"}, + "max":20, + "min":1 + }, + "ApprovalTeamStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "INACTIVE", + "DELETING", + "PENDING" + ] + }, + "ApprovalTeamStatusCode":{ + "type":"string", + "enum":[ + "VALIDATING", + "PENDING_ACTIVATION", + "FAILED_VALIDATION", + "FAILED_ACTIVATION", + "UPDATE_PENDING_APPROVAL", + "UPDATE_PENDING_ACTIVATION", + "UPDATE_FAILED_APPROVAL", + "UPDATE_FAILED_ACTIVATION", + "UPDATE_FAILED_VALIDATION", + "DELETE_PENDING_APPROVAL", + "DELETE_FAILED_APPROVAL", + "DELETE_FAILED_VALIDATION" + ] + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "CancelSessionRequest":{ + "type":"structure", + "required":["SessionArn"], + "members":{ + "SessionArn":{ + "shape":"SessionArn", + "documentation":"

        Amazon Resource Name (ARN) for the session.

        ", + "location":"uri", + "locationName":"SessionArn" + } + } + }, + "CancelSessionResponse":{ + "type":"structure", + "members":{ + } + }, + "ConflictException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Message for the ConflictException error.

        " + } + }, + "documentation":"

        The request cannot be completed because it conflicts with the current state of a resource.

        ", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateApprovalTeamRequest":{ + "type":"structure", + "required":[ + "ApprovalStrategy", + "Approvers", + "Description", + "Policies", + "Name" + ], + "members":{ + "ClientToken":{ + "shape":"Token", + "documentation":"

        Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services populates this field.

        What is idempotency?

        When you make a mutating API request, the request typically returns a result before the operation's asynchronous workflows have completed. Operations might also time out or encounter other server issues before they complete, even though the request has already returned a result. This could make it difficult to determine whether the request succeeded or not, and could lead to multiple retries to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation is completed multiple times. This means that you might create more resources than you intended.

        Idempotency ensures that an API request completes no more than one time. With an idempotent request, if the original request completes successfully, any subsequent retries complete successfully without performing any further actions.

        ", + "idempotencyToken":true + }, + "ApprovalStrategy":{ + "shape":"ApprovalStrategy", + "documentation":"

        An ApprovalStrategy object. Contains details for how the team grants approval.

        " + }, + "Approvers":{ + "shape":"ApprovalTeamRequestApprovers", + "documentation":"

        An array of ApprovalTeamRequesterApprovers objects. Contains details for the approvers in the team.

        " + }, + "Description":{ + "shape":"Description", + "documentation":"

        Description for the team.

        " + }, + "Policies":{ + "shape":"PoliciesReferences", + "documentation":"

        An array of PolicyReference objects. Contains a list of policies that define the permissions for team resources.

        The protected operation for a service integration might require specific permissions. For more information, see How other services work with Multi-party approval in the Multi-party approval User Guide.

        " + }, + "Name":{ + "shape":"ApprovalTeamName", + "documentation":"

        Name of the team.

        " + }, + "Tags":{ + "shape":"Tags", + "documentation":"

        Tags you want to attach to the team.

        " + } + } + }, + "CreateApprovalTeamResponse":{ + "type":"structure", + "members":{ + "CreationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the team was created.

        " + }, + "Arn":{ + "shape":"ApprovalTeamArn", + "documentation":"

        Amazon Resource Name (ARN) for the team that was created.

        " + }, + "Name":{ + "shape":"String", + "documentation":"

        Name of the team that was created.

        " + }, + "VersionId":{ + "shape":"String", + "documentation":"

        Version ID for the team that was created. When a team is updated, the version ID changes.

        " + } + } + }, + "CreateIdentitySourceRequest":{ + "type":"structure", + "required":["IdentitySourceParameters"], + "members":{ + "IdentitySourceParameters":{ + "shape":"IdentitySourceParameters", + "documentation":"

        A IdentitySourceParameters object. Contains details for the resource that provides identities to the identity source. For example, an IAM Identity Center instance.

        " + }, + "ClientToken":{ + "shape":"Token", + "documentation":"

        Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services populates this field.

        What is idempotency?

        When you make a mutating API request, the request typically returns a result before the operation's asynchronous workflows have completed. Operations might also time out or encounter other server issues before they complete, even though the request has already returned a result. This could make it difficult to determine whether the request succeeded or not, and could lead to multiple retries to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation is completed multiple times. This means that you might create more resources than you intended.

        Idempotency ensures that an API request completes no more than one time. With an idempotent request, if the original request completes successfully, any subsequent retries complete successfully without performing any further actions.

        ", + "idempotencyToken":true + }, + "Tags":{ + "shape":"Tags", + "documentation":"

        Tag you want to attach to the identity source.

        " + } + } + }, + "CreateIdentitySourceResponse":{ + "type":"structure", + "members":{ + "IdentitySourceType":{ + "shape":"IdentitySourceType", + "documentation":"

        The type of resource that provided identities to the identity source. For example, an IAM Identity Center instance.

        " + }, + "IdentitySourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the identity source that was created.

        " + }, + "CreationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the identity source was created.

        " + } + } + }, + "DeleteIdentitySourceRequest":{ + "type":"structure", + "required":["IdentitySourceArn"], + "members":{ + "IdentitySourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for identity source.

        ", + "location":"uri", + "locationName":"IdentitySourceArn" + } + } + }, + "DeleteInactiveApprovalTeamVersionRequest":{ + "type":"structure", + "required":[ + "Arn", + "VersionId" + ], + "members":{ + "Arn":{ + "shape":"ApprovalTeamArn", + "documentation":"

        Amaazon Resource Name (ARN) for the team.

        ", + "location":"uri", + "locationName":"Arn" + }, + "VersionId":{ + "shape":"String", + "documentation":"

        Version ID for the team.

        ", + "location":"uri", + "locationName":"VersionId" + } + } + }, + "DeleteInactiveApprovalTeamVersionResponse":{ + "type":"structure", + "members":{ + } + }, + "Description":{ + "type":"string", + "max":256, + "min":1, + "sensitive":true + }, + "Filter":{ + "type":"structure", + "members":{ + "FieldName":{ + "shape":"FilterField", + "documentation":"

        Name of the filter to use.

        Supported filters

        The supported filters for ListSessions are: ActionName, SessionStatus, and InitationTime.

        " + }, + "Operator":{ + "shape":"Operator", + "documentation":"

        Operator to use for filtering.

        • EQ: Equal to the specified value

        • NE: Not equal to the specified value

        • GT: Greater than the specified value

        • LT: Less than the specified value

        • GTE: Greater than or equal to the specified value

        • LTE: Less than or equal to the specified value

        • CONTAINS: Contains the specified value

        • NOT_CONTAINS: Does not contain the specified value

        • BETWEEN: Between two values, inclusive of the specified values.

        Supported operators for each filter:

        • ActionName: EQ | NE | CONTAINS | NOT_CONTAINS

        • SessionStatus: EQ | NE

        • InitiationTime: GT | LT | GTE | LTE | BETWEEN

        " + }, + "Value":{ + "shape":"String", + "documentation":"

        Value to use for filtering. For the BETWEEN operator, specify values in the format a AND b (AND is case-insensitive).

        " + } + }, + "documentation":"

        Contains the filter to apply to requests. You can specify up to 10 filters for a request.

        " + }, + "FilterField":{ + "type":"string", + "enum":[ + "ActionName", + "ApprovalTeamName", + "VotingTime", + "Vote", + "SessionStatus", + "InitiationTime" + ] + }, + "Filters":{ + "type":"list", + "member":{"shape":"Filter"}, + "max":10, + "min":0 + }, + "GetApprovalTeamRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"ApprovalTeamArn", + "documentation":"

        Amazon Resource Name (ARN) for the team.

        ", + "location":"uri", + "locationName":"Arn" + } + } + }, + "GetApprovalTeamResponse":{ + "type":"structure", + "members":{ + "CreationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the team was created.

        " + }, + "ApprovalStrategy":{ + "shape":"ApprovalStrategyResponse", + "documentation":"

        An ApprovalStrategyResponse object. Contains details for how the team grants approval.

        " + }, + "NumberOfApprovers":{ + "shape":"Integer", + "documentation":"

        Total number of approvers in the team.

        " + }, + "Approvers":{ + "shape":"GetApprovalTeamResponseApprovers", + "documentation":"

        An array of GetApprovalTeamResponseApprover objects. Contains details for the approvers in the team.

        " + }, + "Arn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the team.

        " + }, + "Description":{ + "shape":"Description", + "documentation":"

        Description for the team.

        " + }, + "Name":{ + "shape":"String", + "documentation":"

        Name of the approval team.

        " + }, + "Status":{ + "shape":"ApprovalTeamStatus", + "documentation":"

        Status for the team. For more information, see Team health in the Multi-party approval User Guide.

        " + }, + "StatusCode":{ + "shape":"ApprovalTeamStatusCode", + "documentation":"

        Status code for the approval team. For more information, see Team health in the Multi-party approval User Guide.

        " + }, + "StatusMessage":{ + "shape":"Message", + "documentation":"

        Message describing the status for the team.

        " + }, + "UpdateSessionArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the session.

        " + }, + "VersionId":{ + "shape":"String", + "documentation":"

        Version ID for the team.

        " + }, + "Policies":{ + "shape":"PoliciesReferences", + "documentation":"

        An array of PolicyReference objects. Contains a list of policies that define the permissions for team resources.

        The protected operation for a service integration might require specific permissions. For more information, see How other services work with Multi-party approval in the Multi-party approval User Guide.

        " + }, + "LastUpdateTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the team was last updated.

        " + }, + "PendingUpdate":{ + "shape":"PendingUpdate", + "documentation":"

        A PendingUpdate object. Contains details for the pending updates for the team, if applicable.

        " + } + } + }, + "GetApprovalTeamResponseApprover":{ + "type":"structure", + "members":{ + "ApproverId":{ + "shape":"ParticipantId", + "documentation":"

        ID for the approver.

        " + }, + "ResponseTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the approver responded to an approval team invitation.

        " + }, + "PrimaryIdentityId":{ + "shape":"IdentityId", + "documentation":"

        ID for the user.

        " + }, + "PrimaryIdentitySourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the identity source. The identity source manages the user authentication for approvers.

        " + }, + "PrimaryIdentityStatus":{ + "shape":"IdentityStatus", + "documentation":"

        Status for the identity source. For example, if an approver has accepted a team invitation with a user authentication method managed by the identity source.

        " + } + }, + "documentation":"

        Contains details for an approver.

        " + }, + "GetApprovalTeamResponseApprovers":{ + "type":"list", + "member":{"shape":"GetApprovalTeamResponseApprover"}, + "max":20, + "min":0 + }, + "GetIdentitySourceRequest":{ + "type":"structure", + "required":["IdentitySourceArn"], + "members":{ + "IdentitySourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the identity source.

        ", + "location":"uri", + "locationName":"IdentitySourceArn" + } + } + }, + "GetIdentitySourceResponse":{ + "type":"structure", + "members":{ + "IdentitySourceType":{ + "shape":"IdentitySourceType", + "documentation":"

        The type of resource that provided identities to the identity source. For example, an IAM Identity Center instance.

        " + }, + "IdentitySourceParameters":{ + "shape":"IdentitySourceParametersForGet", + "documentation":"

        A IdentitySourceParameters object. Contains details for the resource that provides identities to the identity source. For example, an IAM Identity Center instance.

        " + }, + "IdentitySourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the identity source.

        " + }, + "CreationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the identity source was created.

        " + }, + "Status":{ + "shape":"IdentitySourceStatus", + "documentation":"

        Status for the identity source. For example, if the identity source is ACTIVE.

        " + }, + "StatusCode":{ + "shape":"IdentitySourceStatusCode", + "documentation":"

        Status code of the identity source.

        " + }, + "StatusMessage":{ + "shape":"String", + "documentation":"

        Message describing the status for the identity source.

        " + } + } + }, + "GetPolicyVersionRequest":{ + "type":"structure", + "required":["PolicyVersionArn"], + "members":{ + "PolicyVersionArn":{ + "shape":"QualifiedPolicyArn", + "documentation":"

        Amazon Resource Name (ARN) for the policy.

        ", + "location":"uri", + "locationName":"PolicyVersionArn" + } + } + }, + "GetPolicyVersionResponse":{ + "type":"structure", + "required":["PolicyVersion"], + "members":{ + "PolicyVersion":{ + "shape":"PolicyVersion", + "documentation":"

        A PolicyVersion object. Contains details for the version of the policy. Policies define the permissions for team resources.

        The protected operation for a service integration might require specific permissions. For more information, see How other services work with Multi-party approval in the Multi-party approval User Guide.

        " + } + } + }, + "GetResourcePolicyRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "PolicyName", + "PolicyType" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the resource.

        " + }, + "PolicyName":{ + "shape":"String", + "documentation":"

        Name of the policy.

        " + }, + "PolicyType":{ + "shape":"PolicyType", + "documentation":"

        The type of policy.

        " + } + } + }, + "GetResourcePolicyResponse":{ + "type":"structure", + "required":[ + "ResourceArn", + "PolicyType", + "PolicyName", + "PolicyDocument" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the resource.

        " + }, + "PolicyType":{ + "shape":"PolicyType", + "documentation":"

        The type of policy

        " + }, + "PolicyVersionArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the policy version.

        " + }, + "PolicyName":{ + "shape":"PolicyName", + "documentation":"

        Name of the policy.

        " + }, + "PolicyDocument":{ + "shape":"PolicyDocument", + "documentation":"

        Document that contains the contents for the policy.

        " + } + } + }, + "GetSessionRequest":{ + "type":"structure", + "required":["SessionArn"], + "members":{ + "SessionArn":{ + "shape":"SessionArn", + "documentation":"

        Amazon Resource Name (ARN) for the session.

        ", + "location":"uri", + "locationName":"SessionArn" + } + } + }, + "GetSessionResponse":{ + "type":"structure", + "members":{ + "SessionArn":{ + "shape":"SessionArn", + "documentation":"

        Amazon Resource Name (ARN) for the session.

        " + }, + "ApprovalTeamArn":{ + "shape":"ApprovalTeamArn", + "documentation":"

        Amazon Resource Name (ARN) for the approval team.

        " + }, + "ApprovalTeamName":{ + "shape":"ApprovalTeamName", + "documentation":"

        Name of the approval team.

        " + }, + "ProtectedResourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the protected operation.

        " + }, + "ApprovalStrategy":{ + "shape":"ApprovalStrategyResponse", + "documentation":"

        An ApprovalStrategyResponse object. Contains details for how the team grants approval

        " + }, + "NumberOfApprovers":{ + "shape":"Integer", + "documentation":"

        Total number of approvers in the session.

        " + }, + "InitiationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the session was initiated.

        " + }, + "ExpirationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the session will expire.

        " + }, + "CompletionTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the session completed.

        " + }, + "Description":{ + "shape":"Description", + "documentation":"

        Description for the session.

        " + }, + "Metadata":{ + "shape":"SessionMetadata", + "documentation":"

        Metadata for the session.

        " + }, + "Status":{ + "shape":"SessionStatus", + "documentation":"

        Status for the session. For example, if the team has approved the requested operation.

        " + }, + "StatusCode":{ + "shape":"SessionStatusCode", + "documentation":"

        Status code of the session.

        " + }, + "StatusMessage":{ + "shape":"Message", + "documentation":"

        Message describing the status for session.

        " + }, + "ExecutionStatus":{ + "shape":"SessionExecutionStatus", + "documentation":"

        Status for the protected operation. For example, if the operation is PENDING.

        " + }, + "ActionName":{ + "shape":"ActionName", + "documentation":"

        Name of the protected operation.

        " + }, + "RequesterServicePrincipal":{ + "shape":"ServicePrincipal", + "documentation":"

        Service principal for the service associated with the protected operation.

        " + }, + "RequesterPrincipalArn":{ + "shape":"String", + "documentation":"

        IAM principal that made the operation request.

        " + }, + "RequesterAccountId":{ + "shape":"AccountId", + "documentation":"

        ID for the account that made the operation request.

        " + }, + "RequesterRegion":{ + "shape":"Region", + "documentation":"

        Amazon Web Services Region where the operation request originated.

        " + }, + "RequesterComment":{ + "shape":"RequesterComment", + "documentation":"

        Message from the account that made the operation request

        " + }, + "ActionCompletionStrategy":{ + "shape":"ActionCompletionStrategy", + "documentation":"

        Strategy for executing the protected operation. AUTO_COMPLETION_UPON_APPROVAL means the operation is automatically executed using the requester's permissions, if approved.

        " + }, + "ApproverResponses":{ + "shape":"GetSessionResponseApproverResponses", + "documentation":"

        An array of GetSessionResponseApproverResponse objects. Contains details for approver responses in the session.

        " + } + } + }, + "GetSessionResponseApproverResponse":{ + "type":"structure", + "members":{ + "ApproverId":{ + "shape":"ParticipantId", + "documentation":"

        ID for the approver.

        " + }, + "IdentitySourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the identity source. The identity source manages the user authentication for approvers.

        " + }, + "IdentityId":{ + "shape":"IdentityId", + "documentation":"

        ID for the identity source. The identity source manages the user authentication for approvers.

        " + }, + "Response":{ + "shape":"SessionResponse", + "documentation":"

        Response to the operation request.

        " + }, + "ResponseTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when a approver responded to the operation request.

        " + } + }, + "documentation":"

        Contains details for an approver response in an approval session.

        " + }, + "GetSessionResponseApproverResponses":{ + "type":"list", + "member":{"shape":"GetSessionResponseApproverResponse"}, + "max":20, + "min":0 + }, + "IamIdentityCenter":{ + "type":"structure", + "required":[ + "InstanceArn", + "Region" + ], + "members":{ + "InstanceArn":{ + "shape":"IdcInstanceArn", + "documentation":"

        Amazon Resource Name (ARN) for the IAM Identity Center instance.

        " + }, + "Region":{ + "shape":"String", + "documentation":"

        Amazon Web Services Region where the IAM Identity Center instance is located.

        " + } + }, + "documentation":"

        IAM Identity Center credentials. For more information see, IAM Identity Center .

        " + }, + "IamIdentityCenterForGet":{ + "type":"structure", + "members":{ + "InstanceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the IAM Identity Center instance.

        " + }, + "ApprovalPortalUrl":{ + "shape":"String", + "documentation":"

        URL for the approval portal associated with the IAM Identity Center instance.

        " + }, + "Region":{ + "shape":"String", + "documentation":"

        Amazon Web Services Region where the IAM Identity Center instance is located.

        " + } + }, + "documentation":"

        IAM Identity Center credentials. For more information see, IAM Identity Center .

        " + }, + "IamIdentityCenterForList":{ + "type":"structure", + "members":{ + "InstanceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the IAM Identity Center instance.

        " + }, + "ApprovalPortalUrl":{ + "shape":"String", + "documentation":"

        URL for the approval portal associated with the IAM Identity Center instance.

        " + }, + "Region":{ + "shape":"String", + "documentation":"

        Amazon Web Services Region where the IAM Identity Center instance is located.

        " + } + }, + "documentation":"

        IAM Identity Center credentials. For more information see, IAM Identity Center .

        " + }, + "IdcInstanceArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:.+:sso:::instance/(?:sso)?ins-[a-zA-Z0-9-.]{16}" + }, + "IdentityId":{ + "type":"string", + "max":100, + "min":1 + }, + "IdentitySourceForList":{ + "type":"structure", + "members":{ + "IdentitySourceType":{ + "shape":"IdentitySourceType", + "documentation":"

        The type of resource that provided identities to the identity source. For example, an IAM Identity Center instance.

        " + }, + "IdentitySourceParameters":{ + "shape":"IdentitySourceParametersForList", + "documentation":"

        A IdentitySourceParametersForList object. Contains details for the resource that provides identities to the identity source. For example, an IAM Identity Center instance.

        " + }, + "IdentitySourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the identity source.

        " + }, + "CreationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the identity source was created.

        " + }, + "Status":{ + "shape":"IdentitySourceStatus", + "documentation":"

        Status for the identity source. For example, if the identity source is ACTIVE.

        " + }, + "StatusCode":{ + "shape":"IdentitySourceStatusCode", + "documentation":"

        Status code of the identity source.

        " + }, + "StatusMessage":{ + "shape":"String", + "documentation":"

        Message describing the status for the identity source.

        " + } + }, + "documentation":"

        Contains details for an identity source. For more information, see Identity source in the Multi-party approval User Guide.

        " + }, + "IdentitySourceParameters":{ + "type":"structure", + "members":{ + "IamIdentityCenter":{ + "shape":"IamIdentityCenter", + "documentation":"

        IAM Identity Center credentials.

        " + } + }, + "documentation":"

        Contains details for the resource that provides identities to the identity source. For example, an IAM Identity Center instance.

        " + }, + "IdentitySourceParametersForGet":{ + "type":"structure", + "members":{ + "IamIdentityCenter":{ + "shape":"IamIdentityCenterForGet", + "documentation":"

        IAM Identity Center credentials.

        " + } + }, + "documentation":"

        Contains details for the resource that provides identities to the identity source. For example, an IAM Identity Center instance. For more information, see Identity source in the Multi-party approval User Guide.

        ", + "union":true + }, + "IdentitySourceParametersForList":{ + "type":"structure", + "members":{ + "IamIdentityCenter":{ + "shape":"IamIdentityCenterForList", + "documentation":"

        IAM Identity Center credentials.

        " + } + }, + "documentation":"

        Contains details for the resource that provides identities to the identity source. For example, an IAM Identity Center instance. For more information, see Identity source in the Multi-party approval User Guide.

        ", + "union":true + }, + "IdentitySourceStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "DELETING", + "ERROR" + ] + }, + "IdentitySourceStatusCode":{ + "type":"string", + "enum":[ + "ACCESS_DENIED", + "DELETION_FAILED", + "IDC_INSTANCE_NOT_FOUND", + "IDC_INSTANCE_NOT_VALID" + ] + }, + "IdentitySourceType":{ + "type":"string", + "enum":["IAM_IDENTITY_CENTER"] + }, + "IdentitySources":{ + "type":"list", + "member":{"shape":"IdentitySourceForList"}, + "max":20, + "min":0 + }, + "IdentityStatus":{ + "type":"string", + "enum":[ + "PENDING", + "ACCEPTED", + "REJECTED", + "INVALID" + ] + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Message for the InternalServerException error.

        " + } + }, + "documentation":"

        The service encountered an internal error. Try your request again. If the problem persists, contact Amazon Web Services Support.

        ", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "InvalidParameterException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Message for the InvalidParameterException error.

        " + } + }, + "documentation":"

        The request contains an invalid parameter value.

        ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "IsoTimestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "ListApprovalTeamsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of items to return in the response. If more results exist than the specified MaxResults value, a token is included in the response so that you can retrieve the remaining results.

        ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

        If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a next call to the operation to get more output. You can repeat this until the NextToken response element returns null.

        ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListApprovalTeamsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

        If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a next call to the operation to get more output. You can repeat this until the NextToken response element returns null.

        " + }, + "ApprovalTeams":{ + "shape":"ListApprovalTeamsResponseApprovalTeams", + "documentation":"

        An array of ListApprovalTeamsResponseApprovalTeam objects. Contains details for approval teams.

        " + } + } + }, + "ListApprovalTeamsResponseApprovalTeam":{ + "type":"structure", + "members":{ + "CreationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the team was created.

        " + }, + "ApprovalStrategy":{ + "shape":"ApprovalStrategyResponse", + "documentation":"

        An ApprovalStrategyResponse object. Contains details for how an approval team grants approval.

        " + }, + "NumberOfApprovers":{ + "shape":"Integer", + "documentation":"

        Total number of approvers in the team.

        " + }, + "Arn":{ + "shape":"ApprovalTeamArn", + "documentation":"

        Amazon Resource Name (ARN) for the team.

        " + }, + "Name":{ + "shape":"ApprovalTeamName", + "documentation":"

        Name of the team.

        " + }, + "Description":{ + "shape":"Description", + "documentation":"

        Description for the team.

        " + }, + "Status":{ + "shape":"ApprovalTeamStatus", + "documentation":"

        Status for the team. For more information, see Team health in the Multi-party approval User Guide.

        " + }, + "StatusCode":{ + "shape":"ApprovalTeamStatusCode", + "documentation":"

        Status code for the team. For more information, see Team health in the Multi-party approval User Guide.

        " + }, + "StatusMessage":{ + "shape":"Message", + "documentation":"

        Message describing the status for the team.

        " + } + }, + "documentation":"

        Contains details for an approval team

        " + }, + "ListApprovalTeamsResponseApprovalTeams":{ + "type":"list", + "member":{"shape":"ListApprovalTeamsResponseApprovalTeam"}, + "max":20, + "min":0 + }, + "ListIdentitySourcesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of items to return in the response. If more results exist than the specified MaxResults value, a token is included in the response so that you can retrieve the remaining results.

        ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

        If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a next call to the operation to get more output. You can repeat this until the NextToken response element returns null.

        ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListIdentitySourcesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

        If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a next call to the operation to get more output. You can repeat this until the NextToken response element returns null.

        " + }, + "IdentitySources":{ + "shape":"IdentitySources", + "documentation":"

        A IdentitySources. Contains details for identity sources.

        " + } + } + }, + "ListPoliciesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of items to return in the response. If more results exist than the specified MaxResults value, a token is included in the response so that you can retrieve the remaining results.

        ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

        If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a next call to the operation to get more output. You can repeat this until the NextToken response element returns null.

        ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListPoliciesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

        If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a next call to the operation to get more output. You can repeat this until the NextToken response element returns null.

        " + }, + "Policies":{ + "shape":"Policies", + "documentation":"

        An array of Policy objects. Contains a list of policies that define the permissions for team resources.

        The protected operation for a service integration might require specific permissions. For more information, see How other services work with Multi-party approval in the Multi-party approval User Guide.

        " + } + } + }, + "ListPolicyVersionsRequest":{ + "type":"structure", + "required":["PolicyArn"], + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of items to return in the response. If more results exist than the specified MaxResults value, a token is included in the response so that you can retrieve the remaining results.

        ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

        If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a next call to the operation to get more output. You can repeat this until the NextToken response element returns null.

        ", + "location":"querystring", + "locationName":"NextToken" + }, + "PolicyArn":{ + "shape":"UnqualifiedPolicyArn", + "documentation":"

        Amazon Resource Name (ARN) for the policy.

        ", + "location":"uri", + "locationName":"PolicyArn" + } + } + }, + "ListPolicyVersionsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

        If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a next call to the operation to get more output. You can repeat this until the NextToken response element returns null.

        " + }, + "PolicyVersions":{ + "shape":"PolicyVersions", + "documentation":"

        An array of PolicyVersionSummary objects. Contains details for the version of the policies that define the permissions for team resources.

        The protected operation for a service integration might require specific permissions. For more information, see How other services work with Multi-party approval in the Multi-party approval User Guide.

        " + } + } + }, + "ListResourcePoliciesRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the resource.

        ", + "location":"uri", + "locationName":"ResourceArn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of items to return in the response. If more results exist than the specified MaxResults value, a token is included in the response so that you can retrieve the remaining results.

        ", + "location":"querystring", + "locationName":"MaxResults" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

        If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a next call to the operation to get more output. You can repeat this until the NextToken response element returns null.

        ", + "location":"querystring", + "locationName":"NextToken" + } + } + }, + "ListResourcePoliciesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

        If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a next call to the operation to get more output. You can repeat this until the NextToken response element returns null.

        " + }, + "ResourcePolicies":{ + "shape":"ListResourcePoliciesResponseResourcePolicies", + "documentation":"

        An array of ListResourcePoliciesResponseResourcePolicy objects. Contains details about the policy for the resource.

        " + } + } + }, + "ListResourcePoliciesResponseResourcePolicies":{ + "type":"list", + "member":{"shape":"ListResourcePoliciesResponseResourcePolicy"}, + "max":100, + "min":0 + }, + "ListResourcePoliciesResponseResourcePolicy":{ + "type":"structure", + "members":{ + "PolicyArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for policy.

        " + }, + "PolicyType":{ + "shape":"PolicyType", + "documentation":"

        The type of policy.

        " + }, + "PolicyName":{ + "shape":"String", + "documentation":"

        Name of the policy.

        " + } + }, + "documentation":"

        Contains details about a policy for a resource.

        " + }, + "ListSessionsRequest":{ + "type":"structure", + "required":["ApprovalTeamArn"], + "members":{ + "ApprovalTeamArn":{ + "shape":"ApprovalTeamArn", + "documentation":"

        Amazon Resource Name (ARN) for the approval team.

        ", + "location":"uri", + "locationName":"ApprovalTeamArn" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of items to return in the response. If more results exist than the specified MaxResults value, a token is included in the response so that you can retrieve the remaining results.

        " + }, + "NextToken":{ + "shape":"Token", + "documentation":"

        If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a next call to the operation to get more output. You can repeat this until the NextToken response element returns null.

        " + }, + "Filters":{ + "shape":"Filters", + "documentation":"

        An array of Filter objects. Contains the filter to apply when listing sessions.

        " + } + } + }, + "ListSessionsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

        If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a next call to the operation to get more output. You can repeat this until the NextToken response element returns null.

        " + }, + "Sessions":{ + "shape":"ListSessionsResponseSessions", + "documentation":"

        An array of ListSessionsResponseSession objects. Contains details for the sessions.

        " + } + } + }, + "ListSessionsResponseSession":{ + "type":"structure", + "members":{ + "SessionArn":{ + "shape":"SessionArn", + "documentation":"

        Amazon Resource Name (ARN) for the session.

        " + }, + "ApprovalTeamName":{ + "shape":"ApprovalTeamName", + "documentation":"

        Name of the approval team.

        " + }, + "ApprovalTeamArn":{ + "shape":"ApprovalTeamArn", + "documentation":"

        Amazon Resource Name (ARN) for the approval team.

        " + }, + "InitiationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the session was initiated.

        " + }, + "ExpirationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the session was expire.

        " + }, + "CompletionTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the session was completed.

        " + }, + "Description":{ + "shape":"Description", + "documentation":"

        Description for the team.

        " + }, + "ActionName":{ + "shape":"ActionName", + "documentation":"

        Name of the protected operation.

        " + }, + "ProtectedResourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the protected operation.

        " + }, + "RequesterServicePrincipal":{ + "shape":"ServicePrincipal", + "documentation":"

        Service principal for the service associated with the protected operation.

        " + }, + "RequesterPrincipalArn":{ + "shape":"String", + "documentation":"

        IAM principal that made the operation request.

        " + }, + "RequesterRegion":{ + "shape":"Region", + "documentation":"

        Amazon Web Services Region where the operation request originated.

        " + }, + "RequesterAccountId":{ + "shape":"AccountId", + "documentation":"

        ID for the account that made the operation request.

        " + }, + "Status":{ + "shape":"SessionStatus", + "documentation":"

        Status for the protected operation. For example, if the operation is PENDING.

        " + }, + "StatusCode":{ + "shape":"SessionStatusCode", + "documentation":"

        Status code of the session.

        " + }, + "StatusMessage":{ + "shape":"Message", + "documentation":"

        Message describing the status for session.

        " + }, + "ActionCompletionStrategy":{ + "shape":"ActionCompletionStrategy", + "documentation":"

        Strategy for executing the protected operation. AUTO_COMPLETION_UPON_APPROVAL means the operation is executed automatically using the requester's permissions, if approved.

        " + } + }, + "documentation":"

        Contains details for an approval session. For more information, see Session in the Multi-party approval User Guide

        " + }, + "ListSessionsResponseSessions":{ + "type":"list", + "member":{"shape":"ListSessionsResponseSession"}, + "max":20, + "min":0 + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the resource.

        ", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"Tags", + "documentation":"

        Tags attached to the resource.

        " + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":20, + "min":1 + }, + "Message":{ + "type":"string", + "max":500, + "min":0 + }, + "MofNApprovalStrategy":{ + "type":"structure", + "required":["MinApprovalsRequired"], + "members":{ + "MinApprovalsRequired":{ + "shape":"MofNApprovalStrategyMinApprovalsRequiredInteger", + "documentation":"

        Minimum number of approvals (M) required for a total number of approvers (N).

        " + } + }, + "documentation":"

        Strategy for how an approval team grants approval.

        " + }, + "MofNApprovalStrategyMinApprovalsRequiredInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "Operator":{ + "type":"string", + "enum":[ + "EQ", + "NE", + "GT", + "LT", + "GTE", + "LTE", + "CONTAINS", + "NOT_CONTAINS", + "BETWEEN" + ] + }, + "ParticipantId":{ + "type":"string", + "max":100, + "min":1 + }, + "PendingUpdate":{ + "type":"structure", + "members":{ + "VersionId":{ + "shape":"String", + "documentation":"

        Version ID for the team.

        " + }, + "Description":{ + "shape":"String", + "documentation":"

        Description for the team.

        " + }, + "ApprovalStrategy":{ + "shape":"ApprovalStrategyResponse", + "documentation":"

        An ApprovalStrategyResponse object. Contains details for how the team grants approval.

        " + }, + "NumberOfApprovers":{ + "shape":"Integer", + "documentation":"

        Total number of approvers in the team.

        " + }, + "Status":{ + "shape":"ApprovalTeamStatus", + "documentation":"

        Status for the team. For more information, see Team health in the Multi-party approval User Guide.

        " + }, + "StatusCode":{ + "shape":"ApprovalTeamStatusCode", + "documentation":"

        Status code for the update. For more information, see Team health in the Multi-party approval User Guide.

        " + }, + "StatusMessage":{ + "shape":"Message", + "documentation":"

        Message describing the status for the team.

        " + }, + "Approvers":{ + "shape":"GetApprovalTeamResponseApprovers", + "documentation":"

        An array of GetApprovalTeamResponseApprover objects. Contains details for the approvers in the team.

        " + }, + "UpdateInitiationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the update request was initiated.

        " + } + }, + "documentation":"

        Contains details for the pending updates for an approval team, if applicable.

        " + }, + "Policies":{ + "type":"list", + "member":{"shape":"Policy"}, + "max":20, + "min":0 + }, + "PoliciesReferences":{ + "type":"list", + "member":{"shape":"PolicyReference"}, + "max":10, + "min":1 + }, + "Policy":{ + "type":"structure", + "required":[ + "Arn", + "DefaultVersion", + "PolicyType", + "Name" + ], + "members":{ + "Arn":{ + "shape":"UnqualifiedPolicyArn", + "documentation":"

        Amazon Resource Name (ARN) for the policy.

        " + }, + "DefaultVersion":{ + "shape":"PolicyVersionId", + "documentation":"

        Determines if the specified policy is the default for the team.

        " + }, + "PolicyType":{ + "shape":"PolicyType", + "documentation":"

        The type of policy.

        " + }, + "Name":{ + "shape":"PolicyName", + "documentation":"

        Name of the policy.

        " + } + }, + "documentation":"

        Contains details for a policy. Policies define what operations a team that define the permissions for team resources.

        The protected operation for a service integration might require specific permissions. For more information, see How other services work with Multi-party approval in the Multi-party approval User Guide.

        " + }, + "PolicyDocument":{ + "type":"string", + "max":400000, + "min":0, + "sensitive":true + }, + "PolicyName":{ + "type":"string", + "max":64, + "min":0 + }, + "PolicyReference":{ + "type":"structure", + "required":["PolicyArn"], + "members":{ + "PolicyArn":{ + "shape":"QualifiedPolicyArn", + "documentation":"

        Amazon Resource Name (ARN) for the policy.

        " + } + }, + "documentation":"

        Contains the Amazon Resource Name (ARN) for a policy. Policies define what operations a team that define the permissions for team resources.

        The protected operation for a service integration might require specific permissions. For more information, see How other services work with Multi-party approval in the Multi-party approval User Guide.

        " + }, + "PolicyStatus":{ + "type":"string", + "enum":[ + "ATTACHABLE", + "DEPRECATED" + ] + }, + "PolicyType":{ + "type":"string", + "enum":[ + "AWS_MANAGED", + "AWS_RAM" + ] + }, + "PolicyVersion":{ + "type":"structure", + "required":[ + "Arn", + "PolicyArn", + "VersionId", + "PolicyType", + "IsDefault", + "Name", + "Status", + "CreationTime", + "LastUpdatedTime", + "Document" + ], + "members":{ + "Arn":{ + "shape":"QualifiedPolicyArn", + "documentation":"

        Amazon Resource Name (ARN) for the team.

        " + }, + "PolicyArn":{ + "shape":"UnqualifiedPolicyArn", + "documentation":"

        Amazon Resource Name (ARN) for the policy.

        " + }, + "VersionId":{ + "shape":"PolicyVersionId", + "documentation":"

        Verison ID

        " + }, + "PolicyType":{ + "shape":"PolicyType", + "documentation":"

        The type of policy.

        " + }, + "IsDefault":{ + "shape":"Boolean", + "documentation":"

        Determines if the specified policy is the default for the team.

        " + }, + "Name":{ + "shape":"PolicyName", + "documentation":"

        Name of the policy.

        " + }, + "Status":{ + "shape":"PolicyStatus", + "documentation":"

        Status for the policy. For example, if the policy is attachable or deprecated.

        " + }, + "CreationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the policy was created.

        " + }, + "LastUpdatedTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the policy was last updated.

        " + }, + "Document":{ + "shape":"PolicyDocument", + "documentation":"

        Document that contains the policy contents.

        " + } + }, + "documentation":"

        Contains details for the version of a policy. Policies define what operations a team that define the permissions for team resources.

        The protected operation for a service integration might require specific permissions. For more information, see How other services work with Multi-party approval in the Multi-party approval User Guide.

        " + }, + "PolicyVersionId":{ + "type":"integer", + "box":true, + "min":1 + }, + "PolicyVersionSummary":{ + "type":"structure", + "required":[ + "Arn", + "PolicyArn", + "VersionId", + "PolicyType", + "IsDefault", + "Name", + "Status", + "CreationTime", + "LastUpdatedTime" + ], + "members":{ + "Arn":{ + "shape":"QualifiedPolicyArn", + "documentation":"

        Amazon Resource Name (ARN) for the team.

        " + }, + "PolicyArn":{ + "shape":"UnqualifiedPolicyArn", + "documentation":"

        Amazon Resource Name (ARN) for the policy.

        " + }, + "VersionId":{ + "shape":"PolicyVersionId", + "documentation":"

        Version ID for the policy.

        " + }, + "PolicyType":{ + "shape":"PolicyType", + "documentation":"

        The type of policy.

        " + }, + "IsDefault":{ + "shape":"Boolean", + "documentation":"

        Determines if the specified policy is the default for the team.

        " + }, + "Name":{ + "shape":"PolicyName", + "documentation":"

        Name of the policy

        " + }, + "Status":{ + "shape":"PolicyStatus", + "documentation":"

        Status for the policy. For example, if the policy is attachable or deprecated.

        " + }, + "CreationTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the policy was created.

        " + }, + "LastUpdatedTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the policy was last updated.

        " + } + }, + "documentation":"

        Contains details for the version of a policy. Policies define what operations a team that define the permissions for team resources.

        The protected operation for a service integration might require specific permissions. For more information, see How other services work with Multi-party approval in the Multi-party approval User Guide.

        " + }, + "PolicyVersions":{ + "type":"list", + "member":{"shape":"PolicyVersionSummary"}, + "max":20, + "min":0 + }, + "QualifiedPolicyArn":{ + "type":"string", + "max":1224, + "min":0, + "pattern":"arn:.{1,63}:mpa:::aws:policy/[a-zA-Z0-9_\\.-]{1,1023}/[a-zA-Z0-9_\\.-]{1,1023}/(?:[\\d]+|\\$DEFAULT)" + }, + "Region":{ + "type":"string", + "max":100, + "min":0 + }, + "RequesterComment":{ + "type":"string", + "max":200, + "min":0, + "sensitive":true + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Message for the ResourceNotFoundException error.

        " + } + }, + "documentation":"

        The specified resource doesn't exist. Check the resource ID, and try again.

        ", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ServicePrincipal":{ + "type":"string", + "max":100, + "min":1 + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Message for the ServiceQuotaExceededException error.

        " + } + }, + "documentation":"

        The request exceeds the service quota for your account. Request a quota increase or reduce your request size.

        ", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "SessionArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws(-[^:]+)?:mpa:[a-z0-9-]{1,20}:[0-9]{12}:session/[a-zA-Z0-9._-]+/[a-zA-Z0-9_-]+" + }, + "SessionExecutionStatus":{ + "type":"string", + "enum":[ + "EXECUTED", + "FAILED", + "PENDING" + ] + }, + "SessionKey":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9\\p{P}]*", + "sensitive":true + }, + "SessionMetadata":{ + "type":"map", + "key":{"shape":"SessionKey"}, + "value":{"shape":"SessionValue"}, + "sensitive":true + }, + "SessionResponse":{ + "type":"string", + "enum":[ + "APPROVED", + "REJECTED", + "NO_RESPONSE" + ] + }, + "SessionStatus":{ + "type":"string", + "enum":[ + "PENDING", + "CANCELLED", + "APPROVED", + "FAILED", + "CREATING" + ] + }, + "SessionStatusCode":{ + "type":"string", + "enum":[ + "REJECTED", + "EXPIRED", + "CONFIGURATION_CHANGED" + ] + }, + "SessionValue":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[a-zA-Z0-9\\p{P}]*", + "sensitive":true + }, + "StartActiveApprovalTeamDeletionRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "PendingWindowDays":{ + "shape":"Integer", + "documentation":"

        Number of days between when the team approves the delete request and when the team is deleted.

        " + }, + "Arn":{ + "shape":"ApprovalTeamArn", + "documentation":"

        Amazon Resource Name (ARN) for the team.

        ", + "location":"uri", + "locationName":"Arn" + } + } + }, + "StartActiveApprovalTeamDeletionResponse":{ + "type":"structure", + "members":{ + "DeletionCompletionTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the deletion process is scheduled to complete.

        " + }, + "DeletionStartTime":{ + "shape":"IsoTimestamp", + "documentation":"

        Timestamp when the deletion process was initiated.

        " + } + } + }, + "String":{ + "type":"string", + "max":1000, + "min":0 + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "sensitive":true + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0, + "sensitive":true + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the resource you want to tag.

        ", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"Tags", + "documentation":"

        Tags that you have added to the specified resource.

        " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "sensitive":true + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "sensitive":true + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Message for the ThrottlingException error.

        " + } + }, + "documentation":"

        The request was denied due to request throttling.

        ", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "Token":{ + "type":"string", + "max":4096, + "min":0 + }, + "TooManyTagsException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Message for the TooManyTagsException error.

        " + }, + "ResourceName":{ + "shape":"String", + "documentation":"

        Name of the resource for the TooManyTagsException error.

        " + } + }, + "documentation":"

        The request exceeds the maximum number of tags allowed for this resource. Remove some tags, and try again.

        ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UnqualifiedPolicyArn":{ + "type":"string", + "max":1224, + "min":0, + "pattern":"arn:.{1,63}:mpa:::aws:policy/[a-zA-Z0-9_\\.-]{1,1023}/[a-zA-Z0-9_\\.-]{1,1023}" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

        Amazon Resource Name (ARN) for the resource you want to untag.

        ", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

        Array of tag key-value pairs that you want to untag.

        " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateApprovalTeamRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "ApprovalStrategy":{ + "shape":"ApprovalStrategy", + "documentation":"

        An ApprovalStrategy object. Contains details for how the team grants approval.

        " + }, + "Approvers":{ + "shape":"ApprovalTeamRequestApprovers", + "documentation":"

        An array of ApprovalTeamRequestApprover objects. Contains details for the approvers in the team.

        " + }, + "Description":{ + "shape":"Description", + "documentation":"

        Description for the team.

        " + }, + "Arn":{ + "shape":"ApprovalTeamArn", + "documentation":"

        Amazon Resource Name (ARN) for the team.

        ", + "location":"uri", + "locationName":"Arn" + } + } + }, + "UpdateApprovalTeamResponse":{ + "type":"structure", + "members":{ + "VersionId":{ + "shape":"String", + "documentation":"

        Version ID for the team that was created. When an approval team is updated, the version ID changes.

        " + } + } + }, + "ValidationException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Message for the ValidationException error.

        " + } + }, + "documentation":"

        The input fails to satisfy the constraints specified by an Amazon Web Services service.

        ", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + }, + "documentation":"

        Multi-party approval is a capability of Organizations that allows you to protect a predefined list of operations through a distributed approval process. Use Multi-party approval to establish approval workflows and transform security processes into team-based decisions.

        When to use Multi-party approval:

        • You need to align with the Zero Trust principle of \"never trust, always verify\"

        • You need to make sure that the right humans have access to the right things in the right way

        • You need distributed decision-making for sensitive or critical operations

        • You need to protect against unintended operations on sensitive or critical resources

        • You need formal reviews and approvals for auditing or compliance reasons

        For more information, see What is Multi-party approval in the Multi-party approval User Guide.

        " +} diff --git a/services/mpa/src/main/resources/codegen-resources/waiters-2.json b/services/mpa/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/mpa/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/mq/pom.xml b/services/mq/pom.xml index 608737f5ad52..58572660b2e9 100644 --- a/services/mq/pom.xml +++ b/services/mq/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 mq diff --git a/services/mq/src/main/resources/codegen-resources/customization.config b/services/mq/src/main/resources/codegen-resources/customization.config index 3108bd46a8eb..f2a240391567 100644 --- a/services/mq/src/main/resources/codegen-resources/customization.config +++ b/services/mq/src/main/resources/codegen-resources/customization.config @@ -7,6 +7,5 @@ "createBroker", "createConfiguration" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mturk/pom.xml b/services/mturk/pom.xml index 4b8522506173..d2650d08f832 100644 --- a/services/mturk/pom.xml +++ b/services/mturk/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT mturk AWS Java SDK :: Services :: Amazon Mechanical Turk Requester diff --git a/services/mturk/src/main/resources/codegen-resources/customization.config b/services/mturk/src/main/resources/codegen-resources/customization.config index f59e1cbc74bc..ce10323d7ce7 100644 --- a/services/mturk/src/main/resources/codegen-resources/customization.config +++ b/services/mturk/src/main/resources/codegen-resources/customization.config @@ -9,6 +9,5 @@ "listQualificationRequests", "listHITs" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mwaa/pom.xml b/services/mwaa/pom.xml index 84cb2d8af7d6..8c5e2dd55b24 100644 --- a/services/mwaa/pom.xml +++ b/services/mwaa/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT mwaa AWS Java SDK :: Services :: MWAA diff --git a/services/mwaa/src/main/resources/codegen-resources/customization.config b/services/mwaa/src/main/resources/codegen-resources/customization.config index 2880fc39d3a3..cdf857bdc287 100644 --- a/services/mwaa/src/main/resources/codegen-resources/customization.config +++ b/services/mwaa/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/mwaa/src/main/resources/codegen-resources/service-2.json b/services/mwaa/src/main/resources/codegen-resources/service-2.json index f3e7465469c2..c96ab1c6500a 100644 --- a/services/mwaa/src/main/resources/codegen-resources/service-2.json +++ b/services/mwaa/src/main/resources/codegen-resources/service-2.json @@ -825,6 +825,10 @@ "Source":{ "shape":"UpdateSource", "documentation":"

        The source of the last update to the environment. Includes internal processes by Amazon MWAA, such as an environment maintenance update.

        " + }, + "WorkerReplacementStrategy":{ + "shape":"WorkerReplacementStrategy", + "documentation":"

        The worker replacement strategy used in the last update of the environment.

        " } }, "documentation":"

        Describes the status of the last update on the environment, and any errors that were encountered.

        " @@ -1375,18 +1379,50 @@ "shape":"IamRoleArn", "documentation":"

        The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Amazon MWAA Execution role.

        " }, + "AirflowConfigurationOptions":{ + "shape":"AirflowConfigurationOptions", + "documentation":"

        A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options.

        " + }, "AirflowVersion":{ "shape":"AirflowVersion", "documentation":"

        The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

        Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment.

        Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3.

        " }, - "SourceBucketArn":{ - "shape":"S3BucketArn", - "documentation":"

        The Amazon Resource Name (ARN) of the Amazon S3 bucket where your DAG code and supporting files are stored. For example, arn:aws:s3:::my-airflow-bucket-unique-name. For more information, see Create an Amazon S3 bucket for Amazon MWAA.

        " - }, "DagS3Path":{ "shape":"RelativePath", "documentation":"

        The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs.

        " }, + "EnvironmentClass":{ + "shape":"EnvironmentClass", + "documentation":"

        The environment class type. Valid values: mw1.micro, mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class.

        " + }, + "LoggingConfiguration":{ + "shape":"LoggingConfigurationInput", + "documentation":"

        The Apache Airflow log types to send to CloudWatch Logs.

        " + }, + "MaxWorkers":{ + "shape":"MaxWorkers", + "documentation":"

        The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. For example, 20. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in MinWorkers.

        " + }, + "MinWorkers":{ + "shape":"MinWorkers", + "documentation":"

        The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the worker count you specify in the MinWorkers field. For example, 2.

        " + }, + "MaxWebservers":{ + "shape":"MaxWebservers", + "documentation":"

        The maximum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

        Valid values: For environments larger than mw1.micro, accepts values from 2 to 5. Defaults to 2 for all environment sizes except mw1.micro, which defaults to 1.

        " + }, + "MinWebservers":{ + "shape":"MinWebservers", + "documentation":"

        The minimum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

        Valid values: For environments larger than mw1.micro, accepts values from 2 to 5. Defaults to 2 for all environment sizes except mw1.micro, which defaults to 1.

        " + }, + "WorkerReplacementStrategy":{ + "shape":"WorkerReplacementStrategy", + "documentation":"

        The worker replacement strategy to use when updating the environment.

        You can select one of the following strategies:

        • Forced - Stops and replaces Apache Airflow workers without waiting for tasks to complete before an update.

        • Graceful - Allows Apache Airflow workers to complete running tasks for up to 12 hours during an update before they're stopped and replaced.

        " + }, + "NetworkConfiguration":{ + "shape":"UpdateNetworkConfigurationInput", + "documentation":"

        The VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. For more information, see About networking on Amazon MWAA.

        " + }, "PluginsS3Path":{ "shape":"RelativePath", "documentation":"

        The relative path to the plugins.zip file on your Amazon S3 bucket. For example, plugins.zip. If specified, then the plugins.zip version is required. For more information, see Installing custom plugins.

        " @@ -1403,6 +1439,14 @@ "shape":"S3ObjectVersion", "documentation":"

        The version of the requirements.txt file on your Amazon S3 bucket. You must specify a version each time a requirements.txt file is updated. For more information, see How S3 Versioning works.

        " }, + "Schedulers":{ + "shape":"Schedulers", + "documentation":"

        The number of Apache Airflow schedulers to run in your Amazon MWAA environment.

        " + }, + "SourceBucketArn":{ + "shape":"S3BucketArn", + "documentation":"

        The Amazon Resource Name (ARN) of the Amazon S3 bucket where your DAG code and supporting files are stored. For example, arn:aws:s3:::my-airflow-bucket-unique-name. For more information, see Create an Amazon S3 bucket for Amazon MWAA.

        " + }, "StartupScriptS3Path":{ "shape":"RelativePath", "documentation":"

        The relative path to the startup shell script in your Amazon S3 bucket. For example, s3://mwaa-environment/startup.sh.

        Amazon MWAA runs the script as your environment starts, and before running the Apache Airflow process. You can use this script to install dependencies, modify Apache Airflow configuration options, and set environment variables. For more information, see Using a startup script.

        " @@ -1411,49 +1455,13 @@ "shape":"S3ObjectVersion", "documentation":"

        The version of the startup shell script in your Amazon S3 bucket. You must specify the version ID that Amazon S3 assigns to the file every time you update the script.

        Version IDs are Unicode, UTF-8 encoded, URL-ready, opaque strings that are no more than 1,024 bytes long. The following is an example:

        3sL4kqtJlcpXroDTDmJ+rmSpXd3dIbrHY+MTRCxf3vjVBH40Nr8X8gdRQBpUMLUo

        For more information, see Using a startup script.

        " }, - "AirflowConfigurationOptions":{ - "shape":"AirflowConfigurationOptions", - "documentation":"

        A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options.

        " - }, - "EnvironmentClass":{ - "shape":"EnvironmentClass", - "documentation":"

        The environment class type. Valid values: mw1.micro, mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class.

        " - }, - "MaxWorkers":{ - "shape":"MaxWorkers", - "documentation":"

        The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. For example, 20. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in MinWorkers.

        " - }, - "NetworkConfiguration":{ - "shape":"UpdateNetworkConfigurationInput", - "documentation":"

        The VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. For more information, see About networking on Amazon MWAA.

        " - }, - "LoggingConfiguration":{ - "shape":"LoggingConfigurationInput", - "documentation":"

        The Apache Airflow log types to send to CloudWatch Logs.

        " - }, - "WeeklyMaintenanceWindowStart":{ - "shape":"WeeklyMaintenanceWindowStart", - "documentation":"

        The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only.

        " - }, "WebserverAccessMode":{ "shape":"WebserverAccessMode", "documentation":"

        The Apache Airflow Web server access mode. For more information, see Apache Airflow access modes.

        " }, - "MinWorkers":{ - "shape":"MinWorkers", - "documentation":"

        The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the worker count you specify in the MinWorkers field. For example, 2.

        " - }, - "Schedulers":{ - "shape":"Schedulers", - "documentation":"

        The number of Apache Airflow schedulers to run in your Amazon MWAA environment.

        " - }, - "MinWebservers":{ - "shape":"MinWebservers", - "documentation":"

        The minimum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

        Valid values: For environments larger than mw1.micro, accepts values from 2 to 5. Defaults to 2 for all environment sizes except mw1.micro, which defaults to 1.

        " - }, - "MaxWebservers":{ - "shape":"MaxWebservers", - "documentation":"

        The maximum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.

        Valid values: For environments larger than mw1.micro, accepts values from 2 to 5. Defaults to 2 for all environment sizes except mw1.micro, which defaults to 1.

        " + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

        The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only.

        " } } }, @@ -1541,6 +1549,13 @@ "max":9, "min":1, "pattern":".*(MON|TUE|WED|THU|FRI|SAT|SUN):([01]\\d|2[0-3]):(00|30).*" + }, + "WorkerReplacementStrategy":{ + "type":"string", + "enum":[ + "FORCED", + "GRACEFUL" + ] } }, "documentation":"

        Amazon Managed Workflows for Apache Airflow

        This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What is Amazon MWAA?.

        Endpoints

        Regions

        For a list of supported regions, see Amazon MWAA endpoints and quotas in the Amazon Web Services General Reference.

        " diff --git a/services/neptune/pom.xml b/services/neptune/pom.xml index 63d897da75fe..a1a16f9d60dd 100644 --- a/services/neptune/pom.xml +++ b/services/neptune/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT neptune AWS Java SDK :: Services :: Neptune diff --git a/services/neptune/src/main/resources/codegen-resources/service-2.json b/services/neptune/src/main/resources/codegen-resources/service-2.json index 1c0515ce69c0..86b8fd330bee 100644 --- a/services/neptune/src/main/resources/codegen-resources/service-2.json +++ b/services/neptune/src/main/resources/codegen-resources/service-2.json @@ -1261,6 +1261,25 @@ {"shape":"InvalidDBInstanceStateFault"} ], "documentation":"

        Stops an Amazon Neptune DB cluster. When you stop a DB cluster, Neptune retains the DB cluster's metadata, including its endpoints and DB parameter groups.

        Neptune also retains the transaction logs so you can do a point-in-time restore if necessary.

        " + }, + "SwitchoverGlobalCluster":{ + "name":"SwitchoverGlobalCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SwitchoverGlobalClusterMessage"}, + "output":{ + "shape":"SwitchoverGlobalClusterResult", + "resultWrapper":"SwitchoverGlobalClusterResult" + }, + "errors":[ + {"shape":"GlobalClusterNotFoundFault"}, + {"shape":"InvalidGlobalClusterStateFault"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

        Switches over the specified secondary DB cluster to be the new primary DB cluster in the global database cluster. Switchover operations were previously called \"managed planned failovers.\"

        Promotes the specified secondary cluster to assume full read/write capabilities and demotes the current primary cluster to a secondary (read-only) cluster, maintaining the original replication topology. All secondary clusters are synchronized with the primary at the beginning of the process so the new primary continues operations for the global database without losing any data. Your database is unavailable for a short time while the primary and selected secondary clusters are assuming their new roles.

        This operation is intended for controlled environments, for operations such as \"regional rotation\" or to fall back to the original primary after a global database failover.

        " } }, "shapes":{ @@ -1369,8 +1388,7 @@ }, "AuthorizationNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.

        Neptune may not also be authorized via IAM to perform necessary actions on your behalf.

        ", "error":{ "code":"AuthorizationNotFound", @@ -1408,8 +1426,7 @@ "BooleanOptional":{"type":"boolean"}, "CertificateNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        CertificateIdentifier does not refer to an existing certificate.

        ", "error":{ "code":"CertificateNotFound", @@ -1471,7 +1488,7 @@ }, "StorageType":{ "shape":"String", - "documentation":"

        The storage type for the DB cluster.

        " + "documentation":"

        The pending change in storage type for the DB cluster.   Valid Values:

        • standard   –   ( the default ) Configures cost-effective database storage for applications with moderate to small I/O usage.

        • iopt1   –   Enables I/O-Optimized storage that's designed to meet the needs of I/O-intensive graph workloads that require predictable pricing with low I/O latency and consistent I/O throughput.

          Neptune I/O-Optimized storage is only available starting with engine release 1.3.0.0.

        " }, "AllocatedStorage":{ "shape":"IntegerOptional", @@ -1479,7 +1496,7 @@ }, "Iops":{ "shape":"IntegerOptional", - "documentation":"

        The Provisioned IOPS (I/O operations per second) value. This setting is only for non-Aurora Multi-AZ DB clusters.

        " + "documentation":"

        The Provisioned IOPS (I/O operations per second) value. This setting is only for Multi-AZ DB clusters.

        " } }, "documentation":"

        This data type is used as a response element in the ModifyDBCluster operation and contains changes that will be applied during the next maintenance window.

        " @@ -1716,7 +1733,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"

        The version number of the database engine to use for the new DB cluster.

        Example: 1.0.2.1

        " + "documentation":"

        The version number of the database engine to use for the new DB cluster.

        Example: 1.2.1.0

        " }, "Port":{ "shape":"IntegerOptional", @@ -1784,7 +1801,7 @@ }, "StorageType":{ "shape":"String", - "documentation":"

        The storage type to associate with the DB cluster.

        Valid Values:

        • standard | iopt1

        Default:

        • standard

        When you create a Neptune cluster with the storage type set to iopt1, the storage type is returned in the response. The storage type isn't returned when you set it to standard.

        " + "documentation":"

        The storage type for the new DB cluster.

        Valid Values:

        • standard   –   ( the default ) Configures cost-effective database storage for applications with moderate to small I/O usage. When set to standard, the storage type is not returned in the response.

        • iopt1   –   Enables I/O-Optimized storage that's designed to meet the needs of I/O-intensive graph workloads that require predictable pricing with low I/O latency and consistent I/O throughput.

          Neptune I/O-Optimized storage is only available starting with engine release 1.3.0.0.

        " } } }, @@ -1969,7 +1986,7 @@ }, "StorageType":{ "shape":"String", - "documentation":"

        Specifies the storage type to be associated with the DB instance.

        Not applicable. Storage is managed by the DB Cluster.

        " + "documentation":"

        Not applicable. In Neptune the storage type is managed at the DB Cluster level.

        " }, "TdeCredentialArn":{ "shape":"String", @@ -2367,7 +2384,7 @@ }, "StorageType":{ "shape":"String", - "documentation":"

        The storage type associated with the DB cluster.

        " + "documentation":"

        The storage type used by the DB cluster.

        Valid Values:

        • standard   –   ( the default ) Provides cost-effective database storage for applications with moderate to small I/O usage.

        • iopt1   –   Enables I/O-Optimized storage that's designed to meet the needs of I/O-intensive graph workloads that require predictable pricing with low I/O latency and consistent I/O throughput.

          Neptune I/O-Optimized storage is only available starting with engine release 1.3.0.0.

        " } }, "documentation":"

        Contains the details of an Amazon Neptune DB cluster.

        This data type is used as a response element in the DescribeDBClusters.

        ", @@ -2375,8 +2392,7 @@ }, "DBClusterAlreadyExistsFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        User already has a DB cluster with the given identifier.

        ", "error":{ "code":"DBClusterAlreadyExistsFault", @@ -2433,8 +2449,7 @@ }, "DBClusterEndpointAlreadyExistsFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified custom endpoint cannot be created because it already exists.

        ", "error":{ "code":"DBClusterEndpointAlreadyExistsFault", @@ -2455,7 +2470,7 @@ "members":{ "Marker":{ "shape":"String", - "documentation":"

        An optional pagination token provided by a previous DescribeDBClusterEndpoints request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

        " + "documentation":"

        n optional pagination token provided by a previous DescribeDBClusterEndpoints request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

        " }, "DBClusterEndpoints":{ "shape":"DBClusterEndpointList", @@ -2465,8 +2480,7 @@ }, "DBClusterEndpointNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified custom endpoint doesn't exist.

        ", "error":{ "code":"DBClusterEndpointNotFoundFault", @@ -2477,8 +2491,7 @@ }, "DBClusterEndpointQuotaExceededFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The cluster already has the maximum number of custom endpoints.

        ", "error":{ "code":"DBClusterEndpointQuotaExceededFault", @@ -2539,8 +2552,7 @@ }, "DBClusterNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        DBClusterIdentifier does not refer to an existing DB cluster.

        ", "error":{ "code":"DBClusterNotFoundFault", @@ -2624,8 +2636,7 @@ }, "DBClusterParameterGroupNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        DBClusterParameterGroupName does not refer to an existing DB Cluster parameter group.

        ", "error":{ "code":"DBClusterParameterGroupNotFound", @@ -2649,8 +2660,7 @@ }, "DBClusterQuotaExceededFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        User attempted to create a new DB cluster and the user has already reached the maximum allowed DB cluster quota.

        ", "error":{ "code":"DBClusterQuotaExceededFault", @@ -2679,8 +2689,7 @@ }, "DBClusterRoleAlreadyExistsFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified IAM role Amazon Resource Name (ARN) is already associated with the specified DB cluster.

        ", "error":{ "code":"DBClusterRoleAlreadyExists", @@ -2691,8 +2700,7 @@ }, "DBClusterRoleNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified IAM role Amazon Resource Name (ARN) is not associated with the specified DB cluster.

        ", "error":{ "code":"DBClusterRoleNotFound", @@ -2703,8 +2711,7 @@ }, "DBClusterRoleQuotaExceededFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You have exceeded the maximum number of IAM roles that can be associated with the specified DB cluster.

        ", "error":{ "code":"DBClusterRoleQuotaExceeded", @@ -2813,8 +2820,7 @@ }, "DBClusterSnapshotAlreadyExistsFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        User already has a DB cluster snapshot with the given identifier.

        ", "error":{ "code":"DBClusterSnapshotAlreadyExistsFault", @@ -2881,8 +2887,7 @@ }, "DBClusterSnapshotNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        DBClusterSnapshotIdentifier does not refer to an existing DB cluster snapshot.

        ", "error":{ "code":"DBClusterSnapshotNotFoundFault", @@ -3103,7 +3108,7 @@ }, "StorageType":{ "shape":"String", - "documentation":"

        Specifies the storage type associated with DB instance.

        " + "documentation":"

        Specifies the storage type associated with the DB instance.

        " }, "TdeCredentialArn":{ "shape":"String", @@ -3191,8 +3196,7 @@ }, "DBInstanceAlreadyExistsFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        User already has a DB instance with the given identifier.

        ", "error":{ "code":"DBInstanceAlreadyExists", @@ -3223,8 +3227,7 @@ }, "DBInstanceNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        DBInstanceIdentifier does not refer to an existing DB instance.

        ", "error":{ "code":"DBInstanceNotFound", @@ -3287,8 +3290,7 @@ }, "DBParameterGroupAlreadyExistsFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A DB parameter group with the same name exists.

        ", "error":{ "code":"DBParameterGroupAlreadyExists", @@ -3328,8 +3330,7 @@ }, "DBParameterGroupNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        DBParameterGroupName does not refer to an existing DB parameter group.

        ", "error":{ "code":"DBParameterGroupNotFound", @@ -3340,8 +3341,7 @@ }, "DBParameterGroupQuotaExceededFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Request would result in user exceeding the allowed number of DB parameter groups.

        ", "error":{ "code":"DBParameterGroupQuotaExceeded", @@ -3414,8 +3414,7 @@ }, "DBSecurityGroupNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        DBSecurityGroupName does not refer to an existing DB security group.

        ", "error":{ "code":"DBSecurityGroupNotFound", @@ -3426,8 +3425,7 @@ }, "DBSnapshotAlreadyExistsFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        DBSnapshotIdentifier is already used by an existing snapshot.

        ", "error":{ "code":"DBSnapshotAlreadyExists", @@ -3438,8 +3436,7 @@ }, "DBSnapshotNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        DBSnapshotIdentifier does not refer to an existing DB snapshot.

        ", "error":{ "code":"DBSnapshotNotFound", @@ -3481,8 +3478,7 @@ }, "DBSubnetGroupAlreadyExistsFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        DBSubnetGroupName is already used by an existing DB subnet group.

        ", "error":{ "code":"DBSubnetGroupAlreadyExists", @@ -3493,8 +3489,7 @@ }, "DBSubnetGroupDoesNotCoverEnoughAZs":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.

        ", "error":{ "code":"DBSubnetGroupDoesNotCoverEnoughAZs", @@ -3518,8 +3513,7 @@ }, "DBSubnetGroupNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        DBSubnetGroupName does not refer to an existing DB subnet group.

        ", "error":{ "code":"DBSubnetGroupNotFoundFault", @@ -3530,8 +3524,7 @@ }, "DBSubnetGroupQuotaExceededFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Request would result in user exceeding the allowed number of DB subnet groups.

        ", "error":{ "code":"DBSubnetGroupQuotaExceeded", @@ -3549,8 +3542,7 @@ }, "DBSubnetQuotaExceededFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Request would result in user exceeding the allowed number of subnets in a DB subnet groups.

        ", "error":{ "code":"DBSubnetQuotaExceededFault", @@ -3561,8 +3553,7 @@ }, "DBUpgradeDependencyFailureFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The DB upgrade failed because a resource the DB depends on could not be modified.

        ", "error":{ "code":"DBUpgradeDependencyFailure", @@ -3770,11 +3761,11 @@ }, "MaxRecords":{ "shape":"IntegerOptional", - "documentation":"

        The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results.

        Default: 100

        Constraints: Minimum 20, maximum 100.

        " + "documentation":"

        The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results.

        Default: 100

        Constraints: Minimum 20, maximum 100.

        " }, "Marker":{ "shape":"String", - "documentation":"

        An optional pagination token provided by a previous DescribeDBClusterEndpoints request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

        " + "documentation":"

        An optional pagination token provided by a previous DescribeDBClusterEndpoints request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

        " } } }, @@ -4283,8 +4274,7 @@ }, "DomainNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Domain does not refer to an existing Active Directory Domain.

        ", "error":{ "code":"DomainNotFoundFault", @@ -4477,8 +4467,7 @@ }, "EventSubscriptionQuotaExceededFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You have exceeded the number of events you can subscribe to.

        ", "error":{ "code":"EventSubscriptionQuotaExceeded", @@ -4553,6 +4542,14 @@ "TargetDbClusterIdentifier":{ "shape":"String", "documentation":"

        The Amazon Resource Name (ARN) of the secondary Neptune DB cluster that you want to promote to primary for the global database.

        " + }, + "AllowDataLoss":{ + "shape":"BooleanOptional", + "documentation":"

        Specifies whether to allow data loss for this global database cluster operation. Allowing data loss triggers a global failover operation.

        If you don't specify AllowDataLoss, the global database cluster operation defaults to a switchover.

        Constraints:Can't be specified together with the Switchover parameter.

        " + }, + "Switchover":{ + "shape":"BooleanOptional", + "documentation":"

        Specifies whether to switch over this global database cluster.

        Constraints:Can't be specified together with the AllowDataLoss parameter.

        " } } }, @@ -4562,6 +4559,37 @@ "GlobalCluster":{"shape":"GlobalCluster"} } }, + "FailoverState":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"FailoverStatus", + "documentation":"

        The current status of the global cluster. Possible values are as follows:

        • pending – The service received a request to switch over or fail over the global cluster. The global cluster's primary DB cluster and the specified secondary DB cluster are being verified before the operation starts.

        • failing-over – Neptune is promoting the chosen secondary Neptune DB cluster to become the new primary DB cluster to fail over the global cluster.

        • cancelling – The request to switch over or fail over the global cluster was cancelled and the primary Neptune DB cluster and the selected secondary Neptune DB cluster are returning to their previous states.

        • switching-over – This status covers the range of Neptune internal operations that take place during the switchover process, such as demoting the primary Neptune DB cluster, promoting the secondary Neptune DB cluster, and synchronizing replicas.

        " + }, + "FromDbClusterArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the Neptune DB cluster that is currently being demoted, and which is associated with this state.

        " + }, + "ToDbClusterArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the Neptune DB cluster that is currently being promoted, and which is associated with this state.

        " + }, + "IsDataLossAllowed":{ + "shape":"Boolean", + "documentation":"

        Indicates whether the operation is a global switchover or a global failover. If data loss is allowed, then the operation is a global failover. Otherwise, it's a switchover.

        " + } + }, + "documentation":"

        Contains the state of scheduled or in-process operations on a global cluster (Neptune global database). This data type is empty unless a switchover or failover operation is scheduled or is in progress on the Neptune global database.

        ", + "wrapper":true + }, + "FailoverStatus":{ + "type":"string", + "enum":[ + "pending", + "failing-over", + "cancelling" + ] + }, "Filter":{ "type":"structure", "required":[ @@ -4632,6 +4660,10 @@ "GlobalClusterMembers":{ "shape":"GlobalClusterMemberList", "documentation":"

        A list of cluster ARNs and instance ARNs for all the DB clusters that are part of the global database.

        " + }, + "FailoverState":{ + "shape":"FailoverState", + "documentation":"

        A data object containing all properties for the current state of an in-process or pending switchover or failover process for this global cluster (Neptune global database). This object is empty unless the SwitchoverGlobalCluster or FailoverGlobalCluster operation was called on this global cluster.

        " } }, "documentation":"

        Contains the details of an Amazon Neptune global database.

        This data type is used as a response element for the CreateGlobalCluster, DescribeGlobalClusters, ModifyGlobalCluster, DeleteGlobalCluster, FailoverGlobalCluster, and RemoveFromGlobalCluster actions.

        ", @@ -4639,8 +4671,7 @@ }, "GlobalClusterAlreadyExistsFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The GlobalClusterIdentifier already exists. Choose a new global database identifier (unique name) to create a new global database cluster.

        ", "error":{ "code":"GlobalClusterAlreadyExistsFault", @@ -4690,8 +4721,7 @@ }, "GlobalClusterNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The GlobalClusterIdentifier doesn't refer to an existing global database cluster.

        ", "error":{ "code":"GlobalClusterNotFoundFault", @@ -4702,8 +4732,7 @@ }, "GlobalClusterQuotaExceededFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The number of global database clusters for this account is already at the maximum allowed.

        ", "error":{ "code":"GlobalClusterQuotaExceededFault", @@ -4727,8 +4756,7 @@ }, "InstanceQuotaExceededFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Request would result in user exceeding the allowed number of DB instances.

        ", "error":{ "code":"InstanceQuotaExceeded", @@ -4739,8 +4767,7 @@ }, "InsufficientDBClusterCapacityFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The DB cluster does not have enough capacity for the current operation.

        ", "error":{ "code":"InsufficientDBClusterCapacityFault", @@ -4751,8 +4778,7 @@ }, "InsufficientDBInstanceCapacityFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Specified DB instance class is not available in the specified Availability Zone.

        ", "error":{ "code":"InsufficientDBInstanceCapacity", @@ -4763,8 +4789,7 @@ }, "InsufficientStorageClusterCapacityFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        There is insufficient storage available for the current action. You may be able to resolve this error by updating your subnet group to use different Availability Zones that have more storage available.

        ", "error":{ "code":"InsufficientStorageClusterCapacity", @@ -4777,8 +4802,7 @@ "IntegerOptional":{"type":"integer"}, "InvalidDBClusterEndpointStateFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The requested operation cannot be performed on the endpoint while the endpoint is in this state.

        ", "error":{ "code":"InvalidDBClusterEndpointStateFault", @@ -4789,8 +4813,7 @@ }, "InvalidDBClusterSnapshotStateFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The supplied value is not a valid DB cluster snapshot state.

        ", "error":{ "code":"InvalidDBClusterSnapshotStateFault", @@ -4801,8 +4824,7 @@ }, "InvalidDBClusterStateFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The DB cluster is not in a valid state.

        ", "error":{ "code":"InvalidDBClusterStateFault", @@ -4813,8 +4835,7 @@ }, "InvalidDBInstanceStateFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified DB instance is not in the available state.

        ", "error":{ "code":"InvalidDBInstanceState", @@ -4825,8 +4846,7 @@ }, "InvalidDBParameterGroupStateFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The DB parameter group is in use or is in an invalid state. If you are attempting to delete the parameter group, you cannot delete it when the parameter group is in this state.

        ", "error":{ "code":"InvalidDBParameterGroupState", @@ -4837,8 +4857,7 @@ }, "InvalidDBSecurityGroupStateFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The state of the DB security group does not allow deletion.

        ", "error":{ "code":"InvalidDBSecurityGroupState", @@ -4849,8 +4868,7 @@ }, "InvalidDBSnapshotStateFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The state of the DB snapshot does not allow deletion.

        ", "error":{ "code":"InvalidDBSnapshotState", @@ -4861,8 +4879,7 @@ }, "InvalidDBSubnetGroupStateFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The DB subnet group cannot be deleted because it is in use.

        ", "error":{ "code":"InvalidDBSubnetGroupStateFault", @@ -4873,8 +4890,7 @@ }, "InvalidDBSubnetStateFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The DB subnet is not in the available state.

        ", "error":{ "code":"InvalidDBSubnetStateFault", @@ -4885,8 +4901,7 @@ }, "InvalidEventSubscriptionStateFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The event subscription is in an invalid state.

        ", "error":{ "code":"InvalidEventSubscriptionState", @@ -4897,8 +4912,7 @@ }, "InvalidGlobalClusterStateFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The global cluster is in an invalid state and can't perform the requested operation.

        ", "error":{ "code":"InvalidGlobalClusterStateFault", @@ -4909,8 +4923,7 @@ }, "InvalidRestoreFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Cannot restore from vpc backup to non-vpc DB instance.

        ", "error":{ "code":"InvalidRestoreFault", @@ -4921,8 +4934,7 @@ }, "InvalidSubnet":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.

        ", "error":{ "code":"InvalidSubnet", @@ -4933,8 +4945,7 @@ }, "InvalidVPCNetworkStateFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        DB subnet group does not cover all Availability Zones after it is created because users' change.

        ", "error":{ "code":"InvalidVPCNetworkStateFault", @@ -4945,8 +4956,7 @@ }, "KMSKeyNotAccessibleFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Error accessing KMS key.

        ", "error":{ "code":"KMSKeyNotAccessibleFault", @@ -5127,7 +5137,7 @@ }, "StorageType":{ "shape":"String", - "documentation":"

        The storage type to associate with the DB cluster.

        Valid Values:

        • standard | iopt1

        Default:

        • standard

        " + "documentation":"

        The storage type to associate with the DB cluster.

        Valid Values:

        • standard   –   ( the default ) Configures cost-effective database storage for applications with moderate to small I/O usage.

        • iopt1   –   Enables I/O-Optimized storage that's designed to meet the needs of I/O-intensive graph workloads that require predictable pricing with low I/O latency and consistent I/O throughput.

          Neptune I/O-Optimized storage is only available starting with engine release 1.3.0.0.

        " } } }, @@ -5271,7 +5281,7 @@ }, "StorageType":{ "shape":"String", - "documentation":"

        Not supported.

        " + "documentation":"

        Not applicable. In Neptune the storage type is managed at the DB Cluster level.

        " }, "TdeCredentialArn":{ "shape":"String", @@ -5477,8 +5487,7 @@ }, "OptionGroupNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The designated option group could not be found.

        ", "error":{ "code":"OptionGroupNotFoundFault", @@ -5528,7 +5537,7 @@ }, "StorageType":{ "shape":"String", - "documentation":"

        Indicates the storage type for a DB instance.

        " + "documentation":"

        Not applicable. In Neptune the storage type is managed at the DB Cluster level.

        " }, "SupportsIops":{ "shape":"Boolean", @@ -5767,7 +5776,7 @@ }, "StorageType":{ "shape":"String", - "documentation":"

        Specifies the storage type to be associated with the DB instance.

        " + "documentation":"

        Not applicable. In Neptune the storage type is managed at the DB Cluster level.

        " }, "CACertificateIdentifier":{ "shape":"String", @@ -5802,8 +5811,7 @@ }, "ProvisionedIopsNotAvailableInAZFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Provisioned IOPS not available in the specified Availability Zone.

        ", "error":{ "code":"ProvisionedIopsNotAvailableInAZFault", @@ -6004,8 +6012,7 @@ }, "ResourceNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified resource ID was not found.

        ", "error":{ "code":"ResourceNotFoundFault", @@ -6206,8 +6213,7 @@ }, "SNSInvalidTopicFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The SNS topic is invalid.

        ", "error":{ "code":"SNSInvalidTopic", @@ -6218,8 +6224,7 @@ }, "SNSNoAuthorizationFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        There is no SNS authorization.

        ", "error":{ "code":"SNSNoAuthorization", @@ -6230,8 +6235,7 @@ }, "SNSTopicArnNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The ARN of the SNS topic could not be found.

        ", "error":{ "code":"SNSTopicArnNotFound", @@ -6274,8 +6278,7 @@ }, "SharedSnapshotQuotaExceededFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You have exceeded the maximum number of accounts that you can share a manual DB snapshot with.

        ", "error":{ "code":"SharedSnapshotQuotaExceeded", @@ -6286,8 +6289,7 @@ }, "SnapshotQuotaExceededFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Request would result in user exceeding the allowed number of DB snapshots.

        ", "error":{ "code":"SnapshotQuotaExceeded", @@ -6305,8 +6307,7 @@ }, "SourceNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The source could not be found.

        ", "error":{ "code":"SourceNotFound", @@ -6360,8 +6361,7 @@ }, "StorageQuotaExceededFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Request would result in user exceeding the allowed amount of storage available across all DB instances.

        ", "error":{ "code":"StorageQuotaExceeded", @@ -6372,8 +6372,7 @@ }, "StorageTypeNotSupportedFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        StorageType specified cannot be associated with the DB Instance.

        ", "error":{ "code":"StorageTypeNotSupported", @@ -6407,8 +6406,7 @@ }, "SubnetAlreadyInUse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The DB subnet is already in use in the Availability Zone.

        ", "error":{ "code":"SubnetAlreadyInUse", @@ -6433,8 +6431,7 @@ }, "SubscriptionAlreadyExistFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        This subscription already exists.

        ", "error":{ "code":"SubscriptionAlreadyExist", @@ -6445,8 +6442,7 @@ }, "SubscriptionCategoryNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The designated subscription category could not be found.

        ", "error":{ "code":"SubscriptionCategoryNotFound", @@ -6457,8 +6453,7 @@ }, "SubscriptionNotFoundFault":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The designated subscription could not be found.

        ", "error":{ "code":"SubscriptionNotFound", @@ -6481,6 +6476,29 @@ "locationName":"Timezone" } }, + "SwitchoverGlobalClusterMessage":{ + "type":"structure", + "required":[ + "GlobalClusterIdentifier", + "TargetDbClusterIdentifier" + ], + "members":{ + "GlobalClusterIdentifier":{ + "shape":"GlobalClusterIdentifier", + "documentation":"

        The identifier of the global database cluster to switch over. This parameter isn't case-sensitive.

        Constraints: Must match the identifier of an existing global database cluster.

        " + }, + "TargetDbClusterIdentifier":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the secondary Neptune DB cluster that you want to promote to primary for the global database.

        " + } + } + }, + "SwitchoverGlobalClusterResult":{ + "type":"structure", + "members":{ + "GlobalCluster":{"shape":"GlobalCluster"} + } + }, "TStamp":{"type":"timestamp"}, "Tag":{ "type":"structure", @@ -6568,22 +6586,22 @@ "members":{ "StorageType":{ "shape":"String", - "documentation":"

        The valid storage types for your DB instance. For example, gp2, io1.

        " + "documentation":"

        Not applicable. In Neptune the storage type is managed at the DB Cluster level.

        " }, "StorageSize":{ "shape":"RangeList", - "documentation":"

        The valid range of storage in gibibytes. For example, 100 to 16384.

        " + "documentation":"

        Not applicable. In Neptune the storage type is managed at the DB Cluster level.

        " }, "ProvisionedIops":{ "shape":"RangeList", - "documentation":"

        The valid range of provisioned IOPS. For example, 1000-20000.

        " + "documentation":"

        Not applicable. In Neptune the storage type is managed at the DB Cluster level.

        " }, "IopsToStorageRatio":{ "shape":"DoubleRangeList", - "documentation":"

        The valid range of Provisioned IOPS to gibibytes of storage multiplier. For example, 3-10, which means that provisioned IOPS can be between 3 and 10 times storage.

        " + "documentation":"

        Not applicable. In Neptune the storage type is managed at the DB Cluster level.

        " } }, - "documentation":"

        Information about valid modifications that you can make to your DB instance.

        Contains the result of a successful call to the DescribeValidDBInstanceModifications action.

        " + "documentation":"

        Not applicable. In Neptune the storage type is managed at the DB Cluster level.

        " }, "ValidStorageOptionsList":{ "type":"list", diff --git a/services/neptunedata/pom.xml b/services/neptunedata/pom.xml index a261f988dadf..f2689f35520b 100644 --- a/services/neptunedata/pom.xml +++ b/services/neptunedata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT neptunedata AWS Java SDK :: Services :: Neptunedata diff --git a/services/neptunedata/src/main/resources/codegen-resources/customization.config b/services/neptunedata/src/main/resources/codegen-resources/customization.config index 179dde7fa596..e16c687a7da2 100644 --- a/services/neptunedata/src/main/resources/codegen-resources/customization.config +++ b/services/neptunedata/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "customErrorCodeFieldName": "code", - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/neptunegraph/pom.xml b/services/neptunegraph/pom.xml index 9dd063602289..ffe2036be6ee 100644 --- a/services/neptunegraph/pom.xml +++ b/services/neptunegraph/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT neptunegraph AWS Java SDK :: Services :: Neptune Graph diff --git a/services/neptunegraph/src/main/resources/codegen-resources/customization.config b/services/neptunegraph/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/neptunegraph/src/main/resources/codegen-resources/customization.config +++ b/services/neptunegraph/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/networkfirewall/pom.xml b/services/networkfirewall/pom.xml index 50bf7dcf6c6e..b487776a76c9 100644 --- a/services/networkfirewall/pom.xml +++ b/services/networkfirewall/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT networkfirewall AWS Java SDK :: Services :: Network Firewall diff --git a/services/networkfirewall/src/main/resources/codegen-resources/customization.config b/services/networkfirewall/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/networkfirewall/src/main/resources/codegen-resources/customization.config +++ b/services/networkfirewall/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json b/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json index 0107ef321d91..504835e7bba3 100644 --- a/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json +++ b/services/networkfirewall/src/main/resources/codegen-resources/paginators-1.json @@ -53,6 +53,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Tags" + }, + "ListVpcEndpointAssociations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "VpcEndpointAssociations" } } } diff --git a/services/networkfirewall/src/main/resources/codegen-resources/service-2.json b/services/networkfirewall/src/main/resources/codegen-resources/service-2.json index 5e4742fe7ed6..3dbc59057982 100644 --- a/services/networkfirewall/src/main/resources/codegen-resources/service-2.json +++ b/services/networkfirewall/src/main/resources/codegen-resources/service-2.json @@ -16,6 +16,41 @@ "auth":["aws.auth#sigv4"] }, "operations":{ + "AcceptNetworkFirewallTransitGatewayAttachment":{ + "name":"AcceptNetworkFirewallTransitGatewayAttachment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptNetworkFirewallTransitGatewayAttachmentRequest"}, + "output":{"shape":"AcceptNetworkFirewallTransitGatewayAttachmentResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Accepts a transit gateway attachment request for Network Firewall. When you accept the attachment request, Network Firewall creates the necessary routing components to enable traffic flow between the transit gateway and firewall endpoints.

        You must accept a transit gateway attachment to complete the creation of a transit gateway-attached firewall, unless auto-accept is enabled on the transit gateway. After acceptance, use DescribeFirewall to verify the firewall status.

        To reject an attachment instead of accepting it, use RejectNetworkFirewallTransitGatewayAttachment.

        It can take several minutes for the attachment acceptance to complete and the firewall to become available.

        " + }, + "AssociateAvailabilityZones":{ + "name":"AssociateAvailabilityZones", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateAvailabilityZonesRequest"}, + "output":{"shape":"AssociateAvailabilityZonesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"InvalidOperationException"}, + {"shape":"InsufficientCapacityException"} + ], + "documentation":"

        Associates the specified Availability Zones with a transit gateway-attached firewall. For each Availability Zone, Network Firewall creates a firewall endpoint to process traffic. You can specify one or more Availability Zones where you want to deploy the firewall.

        After adding Availability Zones, you must update your transit gateway route tables to direct traffic through the new firewall endpoints. Use DescribeFirewall to monitor the status of the new endpoints.

        " + }, "AssociateFirewallPolicy":{ "name":"AssociateFirewallPolicy", "http":{ @@ -122,6 +157,25 @@ ], "documentation":"

        Creates an Network Firewall TLS inspection configuration. Network Firewall uses TLS inspection configurations to decrypt your firewall's inbound and outbound SSL/TLS traffic. After decryption, Network Firewall inspects the traffic according to your firewall policy's stateful rules, and then re-encrypts it before sending it to its destination. You can enable inspection of your firewall's inbound traffic, outbound traffic, or both. To use TLS inspection with your firewall, you must first import or provision certificates using ACM, create a TLS inspection configuration, add that configuration to a new firewall policy, and then associate that policy with your firewall.

        To update the settings for a TLS inspection configuration, use UpdateTLSInspectionConfiguration.

        To manage a TLS inspection configuration's tags, use the standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, and UntagResource.

        To retrieve information about TLS inspection configurations, use ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration.

        For more information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS inspection configurations in the Network Firewall Developer Guide.

        " }, + "CreateVpcEndpointAssociation":{ + "name":"CreateVpcEndpointAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVpcEndpointAssociationRequest"}, + "output":{"shape":"CreateVpcEndpointAssociationResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"InsufficientCapacityException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

        Creates a firewall endpoint for an Network Firewall firewall. This type of firewall endpoint is independent of the firewall endpoints that you specify in the Firewall itself, and you define it in addition to those endpoints after the firewall has been created. You can define a VPC endpoint association using a different VPC than the one you used in the firewall specifications.

        " + }, "DeleteFirewall":{ "name":"DeleteFirewall", "http":{ @@ -158,6 +212,22 @@ ], "documentation":"

        Deletes the specified FirewallPolicy.

        " }, + "DeleteNetworkFirewallTransitGatewayAttachment":{ + "name":"DeleteNetworkFirewallTransitGatewayAttachment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteNetworkFirewallTransitGatewayAttachmentRequest"}, + "output":{"shape":"DeleteNetworkFirewallTransitGatewayAttachmentResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Deletes a transit gateway attachment from a Network Firewall. Either the firewall owner or the transit gateway owner can delete the attachment.

        After you delete a transit gateway attachment, raffic will no longer flow through the firewall endpoints.

        After you initiate the delete operation, use DescribeFirewall to monitor the deletion status.

        " + }, "DeleteResourcePolicy":{ "name":"DeleteResourcePolicy", "http":{ @@ -210,6 +280,23 @@ ], "documentation":"

        Deletes the specified TLSInspectionConfiguration.

        " }, + "DeleteVpcEndpointAssociation":{ + "name":"DeleteVpcEndpointAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVpcEndpointAssociationRequest"}, + "output":{"shape":"DeleteVpcEndpointAssociationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

        Deletes the specified VpcEndpointAssociation.

        You can check whether an endpoint association is in use by reviewing the route tables for the Availability Zones where you have the endpoint subnet mapping. You can retrieve the subnet mapping by calling DescribeVpcEndpointAssociation. You define and update the route tables through Amazon VPC. As needed, update the route tables for the Availability Zone to remove the firewall endpoint for the association. When the route tables no longer use the firewall endpoint, you can remove the endpoint association safely.

        " + }, "DescribeFirewall":{ "name":"DescribeFirewall", "http":{ @@ -226,6 +313,22 @@ ], "documentation":"

        Returns the data objects for the specified firewall.

        " }, + "DescribeFirewallMetadata":{ + "name":"DescribeFirewallMetadata", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFirewallMetadataRequest"}, + "output":{"shape":"DescribeFirewallMetadataResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Returns the high-level information about a firewall, including the Availability Zones where the Firewall is currently in use.

        " + }, "DescribeFirewallPolicy":{ "name":"DescribeFirewallPolicy", "http":{ @@ -322,6 +425,22 @@ ], "documentation":"

        High-level information about a rule group, returned by operations like create and describe. You can use the information provided in the metadata to retrieve and manage a rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

        " }, + "DescribeRuleGroupSummary":{ + "name":"DescribeRuleGroupSummary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeRuleGroupSummaryRequest"}, + "output":{"shape":"DescribeRuleGroupSummaryResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

        Returns detailed information for a stateful rule group.

        For active threat defense Amazon Web Services managed rule groups, this operation provides insight into the protections enabled by the rule group, based on Suricata rule metadata fields. Summaries are available for rule groups you manage and for active threat defense Amazon Web Services managed rule groups.

        To modify how threat information appears in summaries, use the SummaryConfiguration parameter in UpdateRuleGroup.

        " + }, "DescribeTLSInspectionConfiguration":{ "name":"DescribeTLSInspectionConfiguration", "http":{ @@ -338,6 +457,40 @@ ], "documentation":"

        Returns the data objects for the specified TLS inspection configuration.

        " }, + "DescribeVpcEndpointAssociation":{ + "name":"DescribeVpcEndpointAssociation", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeVpcEndpointAssociationRequest"}, + "output":{"shape":"DescribeVpcEndpointAssociationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Returns the data object for the specified VPC endpoint association.

        " + }, + "DisassociateAvailabilityZones":{ + "name":"DisassociateAvailabilityZones", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateAvailabilityZonesRequest"}, + "output":{"shape":"DisassociateAvailabilityZonesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

        Removes the specified Availability Zone associations from a transit gateway-attached firewall. This removes the firewall endpoints from these Availability Zones and stops traffic filtering in those zones. Before removing an Availability Zone, ensure you've updated your transit gateway route tables to redirect traffic appropriately.

        If AvailabilityZoneChangeProtection is enabled, you must first disable it using UpdateAvailabilityZoneChangeProtection.

        To verify the status of your Availability Zone changes, use DescribeFirewall.

        " + }, "DisassociateSubnets":{ "name":"DisassociateSubnets", "http":{ @@ -496,6 +649,21 @@ ], "documentation":"

        Retrieves the tags associated with the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each Amazon Web Services resource, up to 50 tags for a resource.

        You can tag the Amazon Web Services resources that you manage through Network Firewall: firewalls, firewall policies, and rule groups.

        " }, + "ListVpcEndpointAssociations":{ + "name":"ListVpcEndpointAssociations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListVpcEndpointAssociationsRequest"}, + "output":{"shape":"ListVpcEndpointAssociationsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"} + ], + "documentation":"

        Retrieves the metadata for the VPC endpoint associations that you have defined. If you specify a fireawll, this returns only the endpoint associations for that firewall.

        Depending on your setting for max results and the number of associations, a single call might not return the full list.

        " + }, "PutResourcePolicy":{ "name":"PutResourcePolicy", "http":{ @@ -511,7 +679,23 @@ {"shape":"ThrottlingException"}, {"shape":"InvalidResourcePolicyException"} ], - "documentation":"

        Creates or updates an IAM policy for your rule group or firewall policy. Use this to share rule groups and firewall policies between accounts. This operation works in conjunction with the Amazon Web Services Resource Access Manager (RAM) service to manage resource sharing for Network Firewall.

        Use this operation to create or update a resource policy for your rule group or firewall policy. In the policy, you specify the accounts that you want to share the resource with and the operations that you want the accounts to be able to perform.

        When you add an account in the resource policy, you then run the following Resource Access Manager (RAM) operations to access and accept the shared rule group or firewall policy.

        For additional information about resource sharing using RAM, see Resource Access Manager User Guide.

        " + "documentation":"

        Creates or updates an IAM policy for your rule group, firewall policy, or firewall. Use this to share these resources between accounts. This operation works in conjunction with the Amazon Web Services Resource Access Manager (RAM) service to manage resource sharing for Network Firewall.

        For information about using sharing with Network Firewall resources, see Sharing Network Firewall resources in the Network Firewall Developer Guide.

        Use this operation to create or update a resource policy for your Network Firewall rule group, firewall policy, or firewall. In the resource policy, you specify the accounts that you want to share the Network Firewall resource with and the operations that you want the accounts to be able to perform.

        When you add an account in the resource policy, you then run the following Resource Access Manager (RAM) operations to access and accept the shared resource.

        For additional information about resource sharing using RAM, see Resource Access Manager User Guide.

        " + }, + "RejectNetworkFirewallTransitGatewayAttachment":{ + "name":"RejectNetworkFirewallTransitGatewayAttachment", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RejectNetworkFirewallTransitGatewayAttachmentRequest"}, + "output":{"shape":"RejectNetworkFirewallTransitGatewayAttachmentResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Rejects a transit gateway attachment request for Network Firewall. When you reject the attachment request, Network Firewall cancels the creation of routing components between the transit gateway and firewall endpoints.

        Only the firewall owner can reject the attachment. After rejection, no traffic will flow through the firewall endpoints for this attachment.

        Use DescribeFirewall to monitor the rejection status. To accept the attachment instead of rejecting it, use AcceptNetworkFirewallTransitGatewayAttachment.

        Once rejected, you cannot reverse this action. To establish connectivity, you must create a new transit gateway-attached firewall.

        " }, "StartAnalysisReport":{ "name":"StartAnalysisReport", @@ -593,6 +777,24 @@ ], "documentation":"

        Removes the tags with the specified keys from the specified resource. Tags are key:value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to \"customer\" and the value to the customer name or ID. You can specify one or more tags to add to each Amazon Web Services resource, up to 50 tags for a resource.

        You can manage tags for the Amazon Web Services resources that you manage through Network Firewall: firewalls, firewall policies, and rule groups.

        " }, + "UpdateAvailabilityZoneChangeProtection":{ + "name":"UpdateAvailabilityZoneChangeProtection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateAvailabilityZoneChangeProtectionRequest"}, + "output":{"shape":"UpdateAvailabilityZoneChangeProtectionResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServerError"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidTokenException"}, + {"shape":"ResourceOwnerCheckException"} + ], + "documentation":"

        Modifies the AvailabilityZoneChangeProtection setting for a transit gateway-attached firewall. When enabled, this setting prevents accidental changes to the firewall's Availability Zone configuration. This helps protect against disrupting traffic flow in production environments.

        When enabled, you must disable this protection before using AssociateAvailabilityZones or DisassociateAvailabilityZones to modify the firewall's Availability Zone configuration.

        " + }, "UpdateFirewallAnalysisSettings":{ "name":"UpdateFirewallAnalysisSettings", "http":{ @@ -769,6 +971,46 @@ } }, "shapes":{ + "AWSAccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^\\d{12}$" + }, + "AZSyncState":{ + "type":"structure", + "members":{ + "Attachment":{"shape":"Attachment"} + }, + "documentation":"

        The status of the firewall endpoint defined by a VpcEndpointAssociation.

        " + }, + "AcceptNetworkFirewallTransitGatewayAttachmentRequest":{ + "type":"structure", + "required":["TransitGatewayAttachmentId"], + "members":{ + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        Required. The unique identifier of the transit gateway attachment to accept. This ID is returned in the response when creating a transit gateway-attached firewall.

        " + } + } + }, + "AcceptNetworkFirewallTransitGatewayAttachmentResponse":{ + "type":"structure", + "required":[ + "TransitGatewayAttachmentId", + "TransitGatewayAttachmentStatus" + ], + "members":{ + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        The unique identifier of the transit gateway attachment that was accepted.

        " + }, + "TransitGatewayAttachmentStatus":{ + "shape":"TransitGatewayAttachmentStatus", + "documentation":"

        The current status of the transit gateway attachment. Valid values are:

        • CREATING - The attachment is being created

        • DELETING - The attachment is being deleted

        • DELETED - The attachment has been deleted

        • FAILED - The attachment creation has failed and cannot be recovered

        • ERROR - The attachment is in an error state that might be recoverable

        • READY - The attachment is active and processing traffic

        • PENDING_ACCEPTANCE - The attachment is waiting to be accepted

        • REJECTING - The attachment is in the process of being rejected

        • REJECTED - The attachment has been rejected

        " + } + } + }, "ActionDefinition":{ "type":"structure", "members":{ @@ -900,6 +1142,49 @@ }, "documentation":"

        The results of a COMPLETED analysis report generated with StartAnalysisReport.

        For an example of traffic analysis report results, see the response syntax of GetAnalysisReportResults.

        " }, + "AssociateAvailabilityZonesRequest":{ + "type":"structure", + "required":["AvailabilityZoneMappings"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

        An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

        To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

        To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

        " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "AvailabilityZoneMappings":{ + "shape":"AvailabilityZoneMappings", + "documentation":"

        Required. The Availability Zones where you want to create firewall endpoints. You must specify at least one Availability Zone.

        " + } + } + }, + "AssociateAvailabilityZonesResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        " + }, + "AvailabilityZoneMappings":{ + "shape":"AvailabilityZoneMappings", + "documentation":"

        The Availability Zones where Network Firewall created firewall endpoints. Each mapping specifies an Availability Zone where the firewall processes traffic.

        " + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

        An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

        To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

        To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

        " + } + } + }, "AssociateFirewallPolicyRequest":{ "type":"structure", "required":["FirewallPolicyArn"], @@ -986,6 +1271,11 @@ } } }, + "AssociationSyncState":{ + "type":"map", + "key":{"shape":"AvailabilityZone"}, + "value":{"shape":"AZSyncState"} + }, "Attachment":{ "type":"structure", "members":{ @@ -999,15 +1289,16 @@ }, "Status":{ "shape":"AttachmentStatus", - "documentation":"

        The current status of the firewall endpoint in the subnet. This value reflects both the instantiation of the endpoint in the VPC subnet and the sync states that are reported in the Config settings. When this value is READY, the endpoint is available and configured properly to handle network traffic. When the endpoint isn't available for traffic, this value will reflect its state, for example CREATING or DELETING.

        " + "documentation":"

        The current status of the firewall endpoint instantiation in the subnet.

        When this value is READY, the endpoint is available to handle network traffic. Otherwise, this value reflects its state, for example CREATING or DELETING.

        " }, "StatusMessage":{ "shape":"StatusMessage", "documentation":"

        If Network Firewall fails to create or delete the firewall endpoint in the subnet, it populates this with the reason for the error or failure and how to resolve it. A FAILED status indicates a non-recoverable state, and a ERROR status indicates an issue that you can fix. Depending on the error, it can take as many as 15 minutes to populate this field. For more information about the causes for failiure or errors and solutions available for this field, see Troubleshooting firewall endpoint failures in the Network Firewall Developer Guide.

        " } }, - "documentation":"

        The configuration and status for a single subnet that you've specified for use by the Network Firewall firewall. This is part of the FirewallStatus.

        " + "documentation":"

        The definition and status of the firewall endpoint for a single subnet. In each configured subnet, Network Firewall instantiates a firewall endpoint to handle network traffic.

        This data type is used for any firewall endpoint type:

        • For Firewall.SubnetMappings, this Attachment is part of the FirewallStatus sync states information. You define firewall subnets using CreateFirewall and AssociateSubnets.

        • For VpcEndpointAssociation, this Attachment is part of the VpcEndpointAssociationStatus sync states information. You define these subnets using CreateVpcEndpointAssociation.

        " }, + "AttachmentId":{"type":"string"}, "AttachmentStatus":{ "type":"string", "enum":[ @@ -1020,6 +1311,37 @@ ] }, "AvailabilityZone":{"type":"string"}, + "AvailabilityZoneMapping":{ + "type":"structure", + "required":["AvailabilityZone"], + "members":{ + "AvailabilityZone":{ + "shape":"AvailabilityZoneMappingString", + "documentation":"

        The ID of the Availability Zone where the firewall endpoint is located. For example, us-east-2a. The Availability Zone must be in the same Region as the transit gateway.

        " + } + }, + "documentation":"

        Defines the mapping between an Availability Zone and a firewall endpoint for a transit gateway-attached firewall. Each mapping represents where the firewall can process traffic. You use these mappings when calling CreateFirewall, AssociateAvailabilityZones, and DisassociateAvailabilityZones.

        To retrieve the current Availability Zone mappings for a firewall, use DescribeFirewall.

        " + }, + "AvailabilityZoneMappingString":{ + "type":"string", + "max":128, + "min":1, + "pattern":"\\S+" + }, + "AvailabilityZoneMappings":{ + "type":"list", + "member":{"shape":"AvailabilityZoneMapping"} + }, + "AvailabilityZoneMetadata":{ + "type":"structure", + "members":{ + "IPAddressType":{ + "shape":"IPAddressType", + "documentation":"

        The IP address type of the Firewall subnet in the Availability Zone. You can't change the IP address type after you create the subnet.

        " + } + }, + "documentation":"

        High-level information about an Availability Zone where the firewall has an endpoint defined.

        " + }, "AzSubnet":{ "type":"string", "max":128, @@ -1193,6 +1515,18 @@ "EnabledAnalysisTypes":{ "shape":"EnabledAnalysisTypes", "documentation":"

        An optional setting indicating the specific traffic analysis types to enable on the firewall.

        " + }, + "TransitGatewayId":{ + "shape":"TransitGatewayId", + "documentation":"

        Required when creating a transit gateway-attached firewall. The unique identifier of the transit gateway to attach to this firewall. You can provide either a transit gateway from your account or one that has been shared with you through Resource Access Manager.

        After creating the firewall, you cannot change the transit gateway association. To use a different transit gateway, you must create a new firewall.

        For information about creating firewalls, see CreateFirewall. For specific guidance about transit gateway-attached firewalls, see Considerations for transit gateway-attached firewalls in the Network Firewall Developer Guide.

        " + }, + "AvailabilityZoneMappings":{ + "shape":"AvailabilityZoneMappings", + "documentation":"

        Required. The Availability Zones where you want to create firewall endpoints for a transit gateway-attached firewall. You must specify at least one Availability Zone. Consider enabling the firewall in every Availability Zone where you have workloads to maintain Availability Zone independence.

        You can modify Availability Zones later using AssociateAvailabilityZones or DisassociateAvailabilityZones, but this may briefly disrupt traffic. The AvailabilityZoneChangeProtection setting controls whether you can make these modifications.

        " + }, + "AvailabilityZoneChangeProtection":{ + "shape":"Boolean", + "documentation":"

        Optional. A setting indicating whether the firewall is protected against changes to its Availability Zone configuration. When set to TRUE, you cannot add or remove Availability Zones without first disabling this protection using UpdateAvailabilityZoneChangeProtection.

        Default value: FALSE

        " } } }, @@ -1205,7 +1539,7 @@ }, "FirewallStatus":{ "shape":"FirewallStatus", - "documentation":"

        Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

        " + "documentation":"

        Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

        The firewall status indicates a combined status. It indicates whether all subnets are up-to-date with the latest firewall configurations, which is based on the sync states config values, and also whether all subnets have their endpoints fully enabled, based on their sync states attachment values.

        " } } }, @@ -1260,6 +1594,10 @@ "AnalyzeRuleGroup":{ "shape":"Boolean", "documentation":"

        Indicates whether you want Network Firewall to analyze the stateless rules in the rule group for rule behavior such as asymmetric routing. If set to TRUE, Network Firewall runs the analysis and then creates the rule group for you. To run the stateless rule group analyzer without creating the rule group, set DryRun to TRUE.

        " + }, + "SummaryConfiguration":{ + "shape":"SummaryConfiguration", + "documentation":"

        An object that contains a RuleOptions array of strings. You use RuleOptions to determine which of the following RuleSummary values are returned in response to DescribeRuleGroupSummary.

        • Metadata - returns

        • Msg

        • SID

        " } } }, @@ -1323,6 +1661,46 @@ } } }, + "CreateVpcEndpointAssociationRequest":{ + "type":"structure", + "required":[ + "FirewallArn", + "VpcId", + "SubnetMapping" + ], + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

        The unique identifier of the VPC where you want to create a firewall endpoint.

        " + }, + "SubnetMapping":{"shape":"SubnetMapping"}, + "Description":{ + "shape":"Description", + "documentation":"

        A description of the VPC endpoint association.

        " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

        The key:value pairs to associate with the resource.

        " + } + } + }, + "CreateVpcEndpointAssociationResponse":{ + "type":"structure", + "members":{ + "VpcEndpointAssociation":{ + "shape":"VpcEndpointAssociation", + "documentation":"

        The configuration settings for the VPC endpoint association. These settings include the firewall and the VPC and subnet to use for the firewall endpoint.

        " + }, + "VpcEndpointAssociationStatus":{ + "shape":"VpcEndpointAssociationStatus", + "documentation":"

        Detailed information about the current status of a VpcEndpointAssociation. You can retrieve this by calling DescribeVpcEndpointAssociation and providing the VPC endpoint association ARN.

        " + } + } + }, "CustomAction":{ "type":"structure", "required":[ @@ -1345,6 +1723,7 @@ "type":"list", "member":{"shape":"CustomAction"} }, + "DeepThreatInspection":{"type":"boolean"}, "DeleteFirewallPolicyRequest":{ "type":"structure", "members":{ @@ -1388,6 +1767,33 @@ "FirewallStatus":{"shape":"FirewallStatus"} } }, + "DeleteNetworkFirewallTransitGatewayAttachmentRequest":{ + "type":"structure", + "required":["TransitGatewayAttachmentId"], + "members":{ + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        Required. The unique identifier of the transit gateway attachment to delete.

        " + } + } + }, + "DeleteNetworkFirewallTransitGatewayAttachmentResponse":{ + "type":"structure", + "required":[ + "TransitGatewayAttachmentId", + "TransitGatewayAttachmentStatus" + ], + "members":{ + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        The ID of the transit gateway attachment that was deleted.

        " + }, + "TransitGatewayAttachmentStatus":{ + "shape":"TransitGatewayAttachmentStatus", + "documentation":"

        The current status of the transit gateway attachment deletion process.

        Valid values are:

        • CREATING - The attachment is being created

        • DELETING - The attachment is being deleted

        • DELETED - The attachment has been deleted

        • FAILED - The attachment creation has failed and cannot be recovered

        • ERROR - The attachment is in an error state that might be recoverable

        • READY - The attachment is active and processing traffic

        • PENDING_ACCEPTANCE - The attachment is waiting to be accepted

        • REJECTING - The attachment is in the process of being rejected

        • REJECTED - The attachment has been rejected

        " + } + } + }, "DeleteResourcePolicyRequest":{ "type":"structure", "required":["ResourceArn"], @@ -1400,8 +1806,7 @@ }, "DeleteResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteRuleGroupRequest":{ "type":"structure", @@ -1453,6 +1858,67 @@ } } }, + "DeleteVpcEndpointAssociationRequest":{ + "type":"structure", + "required":["VpcEndpointAssociationArn"], + "members":{ + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + } + } + }, + "DeleteVpcEndpointAssociationResponse":{ + "type":"structure", + "members":{ + "VpcEndpointAssociation":{ + "shape":"VpcEndpointAssociation", + "documentation":"

        The configuration settings for the VPC endpoint association. These settings include the firewall and the VPC and subnet to use for the firewall endpoint.

        " + }, + "VpcEndpointAssociationStatus":{ + "shape":"VpcEndpointAssociationStatus", + "documentation":"

        Detailed information about the current status of a VpcEndpointAssociation. You can retrieve this by calling DescribeVpcEndpointAssociation and providing the VPC endpoint association ARN.

        " + } + } + }, + "DescribeFirewallMetadataRequest":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + } + } + }, + "DescribeFirewallMetadataResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "FirewallPolicyArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall policy.

        " + }, + "Description":{ + "shape":"Description", + "documentation":"

        A description of the firewall.

        " + }, + "Status":{ + "shape":"FirewallStatusValue", + "documentation":"

        The readiness of the configured firewall to handle network traffic across all of the Availability Zones where you have it configured. This setting is READY only when the ConfigurationSyncStateSummary value is IN_SYNC and the Attachment Status values for all of the configured subnets are READY.

        " + }, + "SupportedAvailabilityZones":{ + "shape":"SupportedAvailabilityZones", + "documentation":"

        The Availability Zones that the firewall currently supports. This includes all Availability Zones for which the firewall has a subnet defined.

        " + }, + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        The unique identifier of the transit gateway attachment associated with this firewall. This field is only present for transit gateway-attached firewalls.

        " + } + } + }, "DescribeFirewallPolicyRequest":{ "type":"structure", "members":{ @@ -1513,7 +1979,7 @@ }, "FirewallStatus":{ "shape":"FirewallStatus", - "documentation":"

        Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

        " + "documentation":"

        Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

        The firewall status indicates a combined status. It indicates whether all subnets are up-to-date with the latest firewall configurations, which is based on the sync states config values, and also whether all subnets have their endpoints fully enabled, based on their sync states attachment values.

        " } } }, @@ -1532,6 +1998,14 @@ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        A unique identifier for the primary endpoint associated with a firewall.

        " + }, "FlowOperationId":{ "shape":"FlowOperationId", "documentation":"

        A unique identifier for the flow operation. This ID is returned in the responses to start and list commands. You provide to describe commands.

        " @@ -1549,6 +2023,14 @@ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        A unique identifier for the primary endpoint associated with a firewall.

        " + }, "FlowOperationId":{ "shape":"FlowOperationId", "documentation":"

        A unique identifier for the flow operation. This ID is returned in the responses to start and list commands. You provide to describe commands.

        " @@ -1595,7 +2077,11 @@ "shape":"ResourceArn", "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " }, - "LoggingConfiguration":{"shape":"LoggingConfiguration"} + "LoggingConfiguration":{"shape":"LoggingConfiguration"}, + "EnableMonitoringDashboard":{ + "shape":"EnableMonitoringDashboard", + "documentation":"

        A boolean that reflects whether or not the firewall monitoring dashboard is enabled on a firewall.

        Returns TRUE when the firewall monitoring dashboard is enabled on the firewall. Returns FALSE when the firewall monitoring dashboard is not enabled on the firewall.

        " + } } }, "DescribeResourcePolicyRequest":{ @@ -1664,7 +2150,7 @@ "StatefulRuleOptions":{"shape":"StatefulRuleOptions"}, "LastModifiedTime":{ "shape":"LastUpdateTime", - "documentation":"

        The last time that the rule group was changed.

        " + "documentation":"

        A timestamp indicating when the rule group was last modified.

        " } } }, @@ -1710,6 +2196,41 @@ } } }, + "DescribeRuleGroupSummaryRequest":{ + "type":"structure", + "members":{ + "RuleGroupName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the rule group. You can't change the name of a rule group after you create it.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "RuleGroupArn":{ + "shape":"ResourceArn", + "documentation":"

        Required. The Amazon Resource Name (ARN) of the rule group.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "Type":{ + "shape":"RuleGroupType", + "documentation":"

        The type of rule group you want a summary for. This is a required field.

        Valid value: STATEFUL

        Note that STATELESS exists but is not currently supported. If you provide STATELESS, an exception is returned.

        " + } + } + }, + "DescribeRuleGroupSummaryResponse":{ + "type":"structure", + "required":["RuleGroupName"], + "members":{ + "RuleGroupName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the rule group. You can't change the name of a rule group after you create it.

        " + }, + "Description":{ + "shape":"Description", + "documentation":"

        A description of the rule group.

        " + }, + "Summary":{ + "shape":"Summary", + "documentation":"

        A complex type that contains rule information based on the rule group's configured summary settings. The content varies depending on the fields that you specified to extract in your SummaryConfiguration. When you haven't configured any summary settings, this returns an empty array. The response might include:

        • Rule identifiers

        • Rule descriptions

        • Any metadata fields that you specified in your SummaryConfiguration

        " + } + } + }, "DescribeTLSInspectionConfigurationRequest":{ "type":"structure", "members":{ @@ -1744,6 +2265,29 @@ } } }, + "DescribeVpcEndpointAssociationRequest":{ + "type":"structure", + "required":["VpcEndpointAssociationArn"], + "members":{ + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + } + } + }, + "DescribeVpcEndpointAssociationResponse":{ + "type":"structure", + "members":{ + "VpcEndpointAssociation":{ + "shape":"VpcEndpointAssociation", + "documentation":"

        The configuration settings for the VPC endpoint association. These settings include the firewall and the VPC and subnet to use for the firewall endpoint.

        " + }, + "VpcEndpointAssociationStatus":{ + "shape":"VpcEndpointAssociationStatus", + "documentation":"

        Detailed information about the current status of a VpcEndpointAssociation. You can retrieve this by calling DescribeVpcEndpointAssociation and providing the VPC endpoint association ARN.

        " + } + } + }, "Description":{ "type":"string", "max":512, @@ -1778,6 +2322,49 @@ "max":1, "min":1 }, + "DisassociateAvailabilityZonesRequest":{ + "type":"structure", + "required":["AvailabilityZoneMappings"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

        An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

        To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

        To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

        " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "AvailabilityZoneMappings":{ + "shape":"AvailabilityZoneMappings", + "documentation":"

        Required. The Availability Zones to remove from the firewall's configuration.

        " + } + } + }, + "DisassociateAvailabilityZonesResponse":{ + "type":"structure", + "members":{ + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        " + }, + "AvailabilityZoneMappings":{ + "shape":"AvailabilityZoneMappings", + "documentation":"

        The remaining Availability Zones where the firewall has endpoints after the disassociation.

        " + }, + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

        An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

        To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

        To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

        " + } + } + }, "DisassociateSubnetsRequest":{ "type":"structure", "required":["SubnetIds"], @@ -1822,6 +2409,7 @@ } }, "Domain":{"type":"string"}, + "EnableMonitoringDashboard":{"type":"boolean"}, "EnabledAnalysisType":{ "type":"string", "enum":[ @@ -1885,7 +2473,7 @@ }, "SubnetMappings":{ "shape":"SubnetMappings", - "documentation":"

        The public subnets that Network Firewall is using for the firewall. Each subnet must belong to a different Availability Zone.

        " + "documentation":"

        The primary public subnets that Network Firewall is using for the firewall. Network Firewall creates a firewall endpoint in each subnet. Create a subnet mapping for each Availability Zone where you want to use the firewall.

        These subnets are all defined for a single, primary VPC, and each must belong to a different Availability Zone. Each of these subnets establishes the availability of the firewall in its Availability Zone.

        In addition to these subnets, you can define other endpoints for the firewall in VpcEndpointAssociation resources. You can define these additional endpoints for any VPC, and for any of the Availability Zones where the firewall resource already has a subnet mapping. VPC endpoint associations give you the ability to protect multiple VPCs using a single firewall, and to define multiple firewall endpoints for a VPC in a single Availability Zone.

        " }, "DeleteProtection":{ "shape":"Boolean", @@ -1915,12 +2503,32 @@ "shape":"EncryptionConfiguration", "documentation":"

        A complex type that contains the Amazon Web Services KMS encryption configuration settings for your firewall.

        " }, + "NumberOfAssociations":{ + "shape":"NumberOfAssociations", + "documentation":"

        The number of VpcEndpointAssociation resources that use this firewall.

        " + }, "EnabledAnalysisTypes":{ "shape":"EnabledAnalysisTypes", "documentation":"

        An optional setting indicating the specific traffic analysis types to enable on the firewall.

        " + }, + "TransitGatewayId":{ + "shape":"TransitGatewayId", + "documentation":"

        The unique identifier of the transit gateway associated with this firewall. This field is only present for transit gateway-attached firewalls.

        " + }, + "TransitGatewayOwnerAccountId":{ + "shape":"AWSAccountId", + "documentation":"

        The Amazon Web Services account ID that owns the transit gateway. This may be different from the firewall owner's account ID when using a shared transit gateway.

        " + }, + "AvailabilityZoneMappings":{ + "shape":"AvailabilityZoneMappings", + "documentation":"

        The Availability Zones where the firewall endpoints are created for a transit gateway-attached firewall. Each mapping specifies an Availability Zone where the firewall processes traffic.

        " + }, + "AvailabilityZoneChangeProtection":{ + "shape":"Boolean", + "documentation":"

        A setting indicating whether the firewall is protected against changes to its Availability Zone configuration. When set to TRUE, you must first disable this protection before adding or removing Availability Zones.

        " } }, - "documentation":"

        The firewall defines the configuration settings for an Network Firewall firewall. These settings include the firewall policy, the subnets in your VPC to use for the firewall endpoints, and any tags that are attached to the firewall Amazon Web Services resource.

        The status of the firewall, for example whether it's ready to filter network traffic, is provided in the corresponding FirewallStatus. You can retrieve both objects by calling DescribeFirewall.

        " + "documentation":"

        A firewall defines the behavior of a firewall, the main VPC where the firewall is used, the Availability Zones where the firewall can be used, and one subnet to use for a firewall endpoint within each of the Availability Zones. The Availability Zones are defined implicitly in the subnet specifications.

        In addition to the firewall endpoints that you define in this Firewall specification, you can create firewall endpoints in VpcEndpointAssociation resources for any VPC, in any Availability Zone where the firewall is already in use.

        The status of the firewall, for example whether it's ready to filter network traffic, is provided in the corresponding FirewallStatus. You can retrieve both the firewall and firewall status by calling DescribeFirewall.

        " }, "FirewallMetadata":{ "type":"structure", @@ -1932,6 +2540,10 @@ "FirewallArn":{ "shape":"ResourceArn", "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        The unique identifier of the transit gateway attachment associated with this firewall. This field is only present for transit gateway-attached firewalls.

        " } }, "documentation":"

        High-level information about a firewall, returned by operations like create and describe. You can use the information provided in the metadata to retrieve and manage a firewall.

        " @@ -2064,22 +2676,26 @@ "members":{ "Status":{ "shape":"FirewallStatusValue", - "documentation":"

        The readiness of the configured firewall to handle network traffic across all of the Availability Zones where you've configured it. This setting is READY only when the ConfigurationSyncStateSummary value is IN_SYNC and the Attachment Status values for all of the configured subnets are READY.

        " + "documentation":"

        The readiness of the configured firewall to handle network traffic across all of the Availability Zones where you have it configured. This setting is READY only when the ConfigurationSyncStateSummary value is IN_SYNC and the Attachment Status values for all of the configured subnets are READY.

        " }, "ConfigurationSyncStateSummary":{ "shape":"ConfigurationSyncState", - "documentation":"

        The configuration sync state for the firewall. This summarizes the sync states reported in the Config settings for all of the Availability Zones where you have configured the firewall.

        When you create a firewall or update its configuration, for example by adding a rule group to its firewall policy, Network Firewall distributes the configuration changes to all zones where the firewall is in use. This summary indicates whether the configuration changes have been applied everywhere.

        This status must be IN_SYNC for the firewall to be ready for use, but it doesn't indicate that the firewall is ready. The Status setting indicates firewall readiness.

        " + "documentation":"

        The configuration sync state for the firewall. This summarizes the Config settings in the SyncStates for this firewall status object.

        When you create a firewall or update its configuration, for example by adding a rule group to its firewall policy, Network Firewall distributes the configuration changes to all Availability Zones that have subnets defined for the firewall. This summary indicates whether the configuration changes have been applied everywhere.

        This status must be IN_SYNC for the firewall to be ready for use, but it doesn't indicate that the firewall is ready. The Status setting indicates firewall readiness. It's based on this setting and the readiness of the firewall endpoints to take traffic.

        " }, "SyncStates":{ "shape":"SyncStates", - "documentation":"

        The subnets that you've configured for use by the Network Firewall firewall. This contains one array element per Availability Zone where you've configured a subnet. These objects provide details of the information that is summarized in the ConfigurationSyncStateSummary and Status, broken down by zone and configuration object.

        " + "documentation":"

        Status for the subnets that you've configured in the firewall. This contains one array element per Availability Zone where you've configured a subnet in the firewall.

        These objects provide detailed information for the settings ConfigurationSyncStateSummary and Status.

        " }, "CapacityUsageSummary":{ "shape":"CapacityUsageSummary", - "documentation":"

        Describes the capacity usage of the resources contained in a firewall's reference sets. Network Firewall calclulates the capacity usage by taking an aggregated count of all of the resources used by all of the reference sets in a firewall.

        " + "documentation":"

        Describes the capacity usage of the resources contained in a firewall's reference sets. Network Firewall calculates the capacity usage by taking an aggregated count of all of the resources used by all of the reference sets in a firewall.

        " + }, + "TransitGatewayAttachmentSyncState":{ + "shape":"TransitGatewayAttachmentSyncState", + "documentation":"

        The synchronization state of the transit gateway attachment. This indicates whether the firewall's transit gateway configuration is properly synchronized and operational. Use this to verify that your transit gateway configuration changes have been applied.

        " } }, - "documentation":"

        Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

        " + "documentation":"

        Detailed information about the current status of a Firewall. You can retrieve this for a firewall by calling DescribeFirewall and providing the firewall name and ARN.

        The firewall status indicates a combined status. It indicates whether all subnets are up-to-date with the latest firewall configurations, which is based on the sync states config values, and also whether all subnets have their endpoints fully enabled, based on their sync states attachment values.

        " }, "FirewallStatusValue":{ "type":"string", @@ -2613,6 +3229,14 @@ "AvailabilityZone":{ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        A unique identifier for the primary endpoint associated with a firewall.

        " + }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " } } }, @@ -2627,6 +3251,14 @@ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        " + }, "FlowOperationId":{ "shape":"FlowOperationId", "documentation":"

        A unique identifier for the flow operation. This ID is returned in the responses to start and list commands. You provide to describe commands.

        " @@ -2665,6 +3297,14 @@ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        A unique identifier for the primary endpoint associated with a firewall.

        " + }, "FlowOperationType":{ "shape":"FlowOperationType", "documentation":"

        An optional string that defines whether any or all operation types are returned.

        " @@ -2787,6 +3427,36 @@ } } }, + "ListVpcEndpointAssociationsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

        When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

        " + }, + "MaxResults":{ + "shape":"PaginationMaxResults", + "documentation":"

        The maximum number of objects that you want Network Firewall to return for this request. If more objects are available, in the response, Network Firewall provides a NextToken value that you can use in a subsequent call to get the next batch of objects.

        " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        If you don't specify this, Network Firewall retrieves all VPC endpoint associations that you have defined.

        " + } + } + }, + "ListVpcEndpointAssociationsResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"

        When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Network Firewall returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

        " + }, + "VpcEndpointAssociations":{ + "shape":"VpcEndpointAssociations", + "documentation":"

        The VPC endpoint assocation metadata objects for the firewall that you specified. If you didn't specify a firewall, this is all VPC endpoint associations that you have defined.

        Depending on your setting for max results and the number of firewalls you have, a single call might not be the full list.

        " + } + } + }, "LogDestinationConfig":{ "type":"structure", "required":[ @@ -3034,18 +3704,17 @@ "members":{ "ResourceArn":{ "shape":"ResourceArn", - "documentation":"

        The Amazon Resource Name (ARN) of the account that you want to share rule groups and firewall policies with.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the account that you want to share your Network Firewall resources with.

        " }, "Policy":{ "shape":"PolicyString", - "documentation":"

        The IAM policy statement that lists the accounts that you want to share your rule group or firewall policy with and the operations that you want the accounts to be able to perform.

        For a rule group resource, you can specify the following operations in the Actions section of the statement:

        • network-firewall:CreateFirewallPolicy

        • network-firewall:UpdateFirewallPolicy

        • network-firewall:ListRuleGroups

        For a firewall policy resource, you can specify the following operations in the Actions section of the statement:

        • network-firewall:AssociateFirewallPolicy

        • network-firewall:ListFirewallPolicies

        In the Resource section of the statement, you specify the ARNs for the rule groups and firewall policies that you want to share with the account that you specified in Arn.

        " + "documentation":"

        The IAM policy statement that lists the accounts that you want to share your Network Firewall resources with and the operations that you want the accounts to be able to perform.

        For a rule group resource, you can specify the following operations in the Actions section of the statement:

        • network-firewall:CreateFirewallPolicy

        • network-firewall:UpdateFirewallPolicy

        • network-firewall:ListRuleGroups

        For a firewall policy resource, you can specify the following operations in the Actions section of the statement:

        • network-firewall:AssociateFirewallPolicy

        • network-firewall:ListFirewallPolicies

        For a firewall resource, you can specify the following operations in the Actions section of the statement:

        • network-firewall:CreateVpcEndpointAssociation

        • network-firewall:DescribeFirewallMetadata

        • network-firewall:ListFirewalls

        In the Resource section of the statement, you specify the ARNs for the Network Firewall resources that you want to share with the account that you specified in Arn.

        " } } }, "PutResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ReferenceSets":{ "type":"structure", @@ -3057,6 +3726,33 @@ }, "documentation":"

        Contains a set of IP set references.

        " }, + "RejectNetworkFirewallTransitGatewayAttachmentRequest":{ + "type":"structure", + "required":["TransitGatewayAttachmentId"], + "members":{ + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        Required. The unique identifier of the transit gateway attachment to reject. This ID is returned in the response when creating a transit gateway-attached firewall.

        " + } + } + }, + "RejectNetworkFirewallTransitGatewayAttachmentResponse":{ + "type":"structure", + "required":[ + "TransitGatewayAttachmentId", + "TransitGatewayAttachmentStatus" + ], + "members":{ + "TransitGatewayAttachmentId":{ + "shape":"TransitGatewayAttachmentId", + "documentation":"

        The unique identifier of the transit gateway attachment that was rejected.

        " + }, + "TransitGatewayAttachmentStatus":{ + "shape":"TransitGatewayAttachmentStatus", + "documentation":"

        The current status of the transit gateway attachment. Valid values are:

        • CREATING - The attachment is being created

        • DELETING - The attachment is being deleted

        • DELETED - The attachment has been deleted

        • FAILED - The attachment creation has failed and cannot be recovered

        • ERROR - The attachment is in an error state that might be recoverable

        • READY - The attachment is active and processing traffic

        • PENDING_ACCEPTANCE - The attachment is waiting to be accepted

        • REJECTING - The attachment is in the process of being rejected

        • REJECTED - The attachment has been rejected

        For information about troubleshooting endpoint failures, see Troubleshooting firewall endpoint failures in the Network Firewall Developer Guide.

        " + } + } + }, "ReportTime":{"type":"timestamp"}, "ResourceArn":{ "type":"string", @@ -3081,7 +3777,8 @@ "type":"string", "enum":[ "AWS_MANAGED_THREAT_SIGNATURES", - "AWS_MANAGED_DOMAIN_LISTS" + "AWS_MANAGED_DOMAIN_LISTS", + "ACTIVE_THREAT_DEFENSE" ] }, "ResourceName":{ @@ -3236,7 +3933,7 @@ }, "SnsTopic":{ "shape":"ResourceArn", - "documentation":"

        The Amazon resource name (ARN) of the Amazon Simple Notification Service SNS topic that's used to record changes to the managed rule group. You can subscribe to the SNS topic to receive notifications when the managed rule group is modified, such as for new versions and for version expiration. For more information, see the Amazon Simple Notification Service Developer Guide..

        " + "documentation":"

        The Amazon Resource Name (ARN) of the Amazon Simple Notification Service SNS topic that's used to record changes to the managed rule group. You can subscribe to the SNS topic to receive notifications when the managed rule group is modified, such as for new versions and for version expiration. For more information, see the Amazon Simple Notification Service Developer Guide..

        " }, "LastModifiedTime":{ "shape":"LastUpdateTime", @@ -3245,6 +3942,10 @@ "AnalysisResults":{ "shape":"AnalysisResultList", "documentation":"

        The list of analysis results for AnalyzeRuleGroup. If you set AnalyzeRuleGroup to TRUE in CreateRuleGroup, UpdateRuleGroup, or DescribeRuleGroup, Network Firewall analyzes the rule group and identifies the rules that might adversely effect your firewall's functionality. For example, if Network Firewall detects a rule that's routing traffic asymmetrically, which impacts the service's ability to properly process traffic, the service includes the rule in the list of analysis results.

        " + }, + "SummaryConfiguration":{ + "shape":"SummaryConfiguration", + "documentation":"

        A complex type containing the currently selected rule option fields that will be displayed for rule summarization returned by DescribeRuleGroupSummary.

        " } }, "documentation":"

        The high-level properties of a rule group. This, along with the RuleGroup, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

        " @@ -3290,6 +3991,28 @@ "STRICT_ORDER" ] }, + "RuleSummaries":{ + "type":"list", + "member":{"shape":"RuleSummary"} + }, + "RuleSummary":{ + "type":"structure", + "members":{ + "SID":{ + "shape":"CollectionMember_String", + "documentation":"

        The unique identifier (Signature ID) of the Suricata rule.

        " + }, + "Msg":{ + "shape":"CollectionMember_String", + "documentation":"

        The contents taken from the rule's msg field.

        " + }, + "Metadata":{ + "shape":"CollectionMember_String", + "documentation":"

        The contents of the rule's metadata.

        " + } + }, + "documentation":"

        A complex type containing details about a Suricata rule. Contains:

        • SID

        • Msg

        • Metadata

        Summaries are available for rule groups you manage and for active threat defense Amazon Web Services managed rule groups.

        " + }, "RuleTargets":{ "type":"list", "member":{"shape":"CollectionMember_String"} @@ -3312,7 +4035,7 @@ "documentation":"

        A list of port ranges.

        " } }, - "documentation":"

        Settings that are available for use in the rules in the RuleGroup where this is defined.

        " + "documentation":"

        Settings that are available for use in the rules in the RuleGroup where this is defined. See CreateRuleGroup or UpdateRuleGroup for usage.

        " }, "RulesSource":{ "type":"structure", @@ -3387,7 +4110,7 @@ }, "CertificateAuthorityArn":{ "shape":"ResourceArn", - "documentation":"

        The Amazon Resource Name (ARN) of the imported certificate authority (CA) certificate within Certificate Manager (ACM) to use for outbound SSL/TLS inspection.

        The following limitations apply:

        • You can use CA certificates that you imported into ACM, but you can't generate CA certificates with ACM.

        • You can't use certificates issued by Private Certificate Authority.

        For more information about configuring certificates for outbound inspection, see Using SSL/TLS certificates with certificates with TLS inspection configurations in the Network Firewall Developer Guide.

        For information about working with certificates in ACM, see Importing certificates in the Certificate Manager User Guide.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the imported certificate authority (CA) certificate within Certificate Manager (ACM) to use for outbound SSL/TLS inspection.

        The following limitations apply:

        • You can use CA certificates that you imported into ACM, but you can't generate CA certificates with ACM.

        • You can't use certificates issued by Private Certificate Authority.

        For more information about configuring certificates for outbound inspection, see Using SSL/TLS certificates with TLS inspection configurations in the Network Firewall Developer Guide.

        For information about working with certificates in ACM, see Importing certificates in the Certificate Manager User Guide.

        " }, "CheckCertificateRevocationStatus":{ "shape":"CheckCertificateRevocationStatusActions", @@ -3507,6 +4230,14 @@ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        A unique identifier for the primary endpoint associated with a firewall.

        " + }, "MinimumFlowAgeInSeconds":{ "shape":"Age", "documentation":"

        The reqested FlowOperation ignores flows with an age (in seconds) lower than MinimumFlowAgeInSeconds. You provide this for start commands.

        We recommend setting this value to at least 1 minute (60 seconds) to reduce chance of capturing flows that are not yet established.

        " @@ -3549,6 +4280,14 @@ "shape":"AvailabilityZone", "documentation":"

        The ID of the Availability Zone where the firewall is located. For example, us-east-2a.

        Defines the scope a flow operation. You can use up to 20 filters to configure a single flow operation.

        " }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + }, + "VpcEndpointId":{ + "shape":"VpcEndpointId", + "documentation":"

        A unique identifier for the primary endpoint associated with a firewall.

        " + }, "MinimumFlowAgeInSeconds":{ "shape":"Age", "documentation":"

        The reqested FlowOperation ignores flows with an age (in seconds) lower than MinimumFlowAgeInSeconds. You provide this for start commands.

        " @@ -3595,7 +4334,7 @@ "members":{ "RuleOrder":{ "shape":"RuleOrder", - "documentation":"

        Indicates how to manage the order of stateful rule evaluation for the policy. STRICT_ORDER is the default and recommended option. With STRICT_ORDER, provide your rules in the order that you want them to be evaluated. You can then choose one or more default actions for packets that don't match any rules. Choose STRICT_ORDER to have the stateful rules engine determine the evaluation order of your rules. The default action for this rule order is PASS, followed by DROP, REJECT, and ALERT actions. Stateful rules are provided to the rule engine as Suricata compatible strings, and Suricata evaluates them based on your settings. For more information, see Evaluation order for stateful rules in the Network Firewall Developer Guide.

        " + "documentation":"

        Indicates how to manage the order of stateful rule evaluation for the policy. STRICT_ORDER is the recommended option, but DEFAULT_ACTION_ORDER is the default option. With STRICT_ORDER, provide your rules in the order that you want them to be evaluated. You can then choose one or more default actions for packets that don't match any rules. Choose STRICT_ORDER to have the stateful rules engine determine the evaluation order of your rules. The default action for this rule order is PASS, followed by DROP, REJECT, and ALERT actions. Stateful rules are provided to the rule engine as Suricata compatible strings, and Suricata evaluates them based on your settings. For more information, see Evaluation order for stateful rules in the Network Firewall Developer Guide.

        " }, "StreamExceptionPolicy":{ "shape":"StreamExceptionPolicy", @@ -3664,6 +4403,10 @@ "Override":{ "shape":"StatefulRuleGroupOverride", "documentation":"

        The action that allows the policy owner to override the behavior of the rule group within a policy.

        " + }, + "DeepThreatInspection":{ + "shape":"DeepThreatInspection", + "documentation":"

        Network Firewall plans to augment the active threat defense managed rule group with an additional deep threat inspection capability. When this capability is released, Amazon Web Services will analyze service logs of network traffic processed by these rule groups to identify threat indicators across customers. Amazon Web Services will use these threat indicators to improve the active threat defense managed rule groups and protect the security of Amazon Web Services customers and services.

        Customers can opt-out of deep threat inspection at any time through the Network Firewall console or API. When customers opt out, Network Firewall will not use the network traffic processed by those customers' active threat defense rule groups for rule group improvement.

        " } }, "documentation":"

        Identifier for a single stateful rule group, used in a firewall policy to refer to a rule group.

        " @@ -3703,7 +4446,9 @@ "IKEV2", "TFTP", "NTP", - "DHCP" + "DHCP", + "HTTP2", + "QUIC" ] }, "StatefulRules":{ @@ -3802,25 +4547,62 @@ "documentation":"

        The subnet's IP address type. You can't change the IP address type after you create the subnet.

        " } }, - "documentation":"

        The ID for a subnet that you want to associate with the firewall. This is used with CreateFirewall and AssociateSubnets. Network Firewall creates an instance of the associated firewall in each subnet that you specify, to filter traffic in the subnet's Availability Zone.

        " + "documentation":"

        The ID for a subnet that's used in an association with a firewall. This is used in CreateFirewall, AssociateSubnets, and CreateVpcEndpointAssociation. Network Firewall creates an instance of the associated firewall in each subnet that you specify, to filter traffic in the subnet's Availability Zone.

        " }, "SubnetMappings":{ "type":"list", "member":{"shape":"SubnetMapping"} }, + "Summary":{ + "type":"structure", + "members":{ + "RuleSummaries":{ + "shape":"RuleSummaries", + "documentation":"

        An array of RuleSummary objects containing individual rule details that had been configured by the rulegroup's SummaryConfiguration.

        " + } + }, + "documentation":"

        A complex type containing summaries of security protections provided by a rule group.

        Network Firewall extracts this information from selected fields in the rule group's Suricata rules, based on your SummaryConfiguration settings.

        " + }, + "SummaryConfiguration":{ + "type":"structure", + "members":{ + "RuleOptions":{ + "shape":"SummaryRuleOptions", + "documentation":"

        Specifies the selected rule options returned by DescribeRuleGroupSummary.

        " + } + }, + "documentation":"

        A complex type that specifies which Suricata rule metadata fields to use when displaying threat information. Contains:

        • RuleOptions - The Suricata rule options fields to extract and display

        These settings affect how threat information appears in both the console and API responses. Summaries are available for rule groups you manage and for active threat defense Amazon Web Services managed rule groups.

        " + }, + "SummaryRuleOption":{ + "type":"string", + "enum":[ + "SID", + "MSG", + "METADATA" + ] + }, + "SummaryRuleOptions":{ + "type":"list", + "member":{"shape":"SummaryRuleOption"} + }, + "SupportedAvailabilityZones":{ + "type":"map", + "key":{"shape":"AvailabilityZone"}, + "value":{"shape":"AvailabilityZoneMetadata"} + }, "SyncState":{ "type":"structure", "members":{ "Attachment":{ "shape":"Attachment", - "documentation":"

        The attachment status of the firewall's association with a single VPC subnet. For each configured subnet, Network Firewall creates the attachment by instantiating the firewall endpoint in the subnet so that it's ready to take traffic. This is part of the FirewallStatus.

        " + "documentation":"

        The configuration and status for a single firewall subnet. For each configured subnet, Network Firewall creates the attachment by instantiating the firewall endpoint in the subnet so that it's ready to take traffic.

        " }, "Config":{ "shape":"SyncStateConfig", - "documentation":"

        The configuration status of the firewall endpoint in a single VPC subnet. Network Firewall provides each endpoint with the rules that are configured in the firewall policy. Each time you add a subnet or modify the associated firewall policy, Network Firewall synchronizes the rules in the endpoint, so it can properly filter network traffic. This is part of the FirewallStatus.

        " + "documentation":"

        The configuration status of the firewall endpoint in a single VPC subnet. Network Firewall provides each endpoint with the rules that are configured in the firewall policy. Each time you add a subnet or modify the associated firewall policy, Network Firewall synchronizes the rules in the endpoint, so it can properly filter network traffic.

        " } }, - "documentation":"

        The status of the firewall endpoint and firewall policy configuration for a single VPC subnet.

        For each VPC subnet that you associate with a firewall, Network Firewall does the following:

        • Instantiates a firewall endpoint in the subnet, ready to take traffic.

        • Configures the endpoint with the current firewall policy settings, to provide the filtering behavior for the endpoint.

        When you update a firewall, for example to add a subnet association or change a rule group in the firewall policy, the affected sync states reflect out-of-sync or not ready status until the changes are complete.

        " + "documentation":"

        The status of the firewall endpoint and firewall policy configuration for a single VPC subnet. This is part of the FirewallStatus.

        For each VPC subnet that you associate with a firewall, Network Firewall does the following:

        • Instantiates a firewall endpoint in the subnet, ready to take traffic.

        • Configures the endpoint with the current firewall policy settings, to provide the filtering behavior for the endpoint.

        When you update a firewall, for example to add a subnet association or change a rule group in the firewall policy, the affected sync states reflect out-of-sync or not ready status until the changes are complete.

        " }, "SyncStateConfig":{ "type":"map", @@ -3999,8 +4781,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -4055,6 +4836,51 @@ }, "documentation":"

        Contains metadata about an Certificate Manager certificate.

        " }, + "TransitGatewayAttachmentId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^tgw-attach-[0-9a-z]+$" + }, + "TransitGatewayAttachmentStatus":{ + "type":"string", + "enum":[ + "CREATING", + "DELETING", + "DELETED", + "FAILED", + "ERROR", + "READY", + "PENDING_ACCEPTANCE", + "REJECTING", + "REJECTED" + ] + }, + "TransitGatewayAttachmentSyncState":{ + "type":"structure", + "members":{ + "AttachmentId":{ + "shape":"AttachmentId", + "documentation":"

        The unique identifier of the transit gateway attachment.

        " + }, + "TransitGatewayAttachmentStatus":{ + "shape":"TransitGatewayAttachmentStatus", + "documentation":"

        The current status of the transit gateway attachment.

        Valid values are:

        • CREATING - The attachment is being created

        • DELETING - The attachment is being deleted

        • DELETED - The attachment has been deleted

        • FAILED - The attachment creation has failed and cannot be recovered

        • ERROR - The attachment is in an error state that might be recoverable

        • READY - The attachment is active and processing traffic

        • PENDING_ACCEPTANCE - The attachment is waiting to be accepted

        • REJECTING - The attachment is in the process of being rejected

        • REJECTED - The attachment has been rejected

        " + }, + "StatusMessage":{ + "shape":"TransitGatewayAttachmentSyncStateMessage", + "documentation":"

        A message providing additional information about the current status, particularly useful when the transit gateway attachment is in a non-READY state.

        Valid values are:

        • CREATING - The attachment is being created

        • DELETING - The attachment is being deleted

        • DELETED - The attachment has been deleted

        • FAILED - The attachment creation has failed and cannot be recovered

        • ERROR - The attachment is in an error state that might be recoverable

        • READY - The attachment is active and processing traffic

        • PENDING_ACCEPTANCE - The attachment is waiting to be accepted

        • REJECTING - The attachment is in the process of being rejected

        • REJECTED - The attachment has been rejected

        For information about troubleshooting endpoint failures, see Troubleshooting firewall endpoint failures in the Network Firewall Developer Guide.

        " + } + }, + "documentation":"

        Contains information about the synchronization state of a transit gateway attachment, including its current status and any error messages. Network Firewall uses this to track the state of your transit gateway configuration changes.

        " + }, + "TransitGatewayAttachmentSyncStateMessage":{"type":"string"}, + "TransitGatewayId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^tgw-[0-9a-z]+$" + }, "UniqueSources":{ "type":"structure", "members":{ @@ -4091,8 +4917,50 @@ } }, "UntagResourceResponse":{ + "type":"structure", + "members":{} + }, + "UpdateAvailabilityZoneChangeProtectionRequest":{ + "type":"structure", + "required":["AvailabilityZoneChangeProtection"], + "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

        An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

        To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

        To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

        " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        You must specify the ARN or the name, and you can specify both.

        " + }, + "AvailabilityZoneChangeProtection":{ + "shape":"Boolean", + "documentation":"

        A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

        " + } + } + }, + "UpdateAvailabilityZoneChangeProtectionResponse":{ "type":"structure", "members":{ + "UpdateToken":{ + "shape":"UpdateToken", + "documentation":"

        An optional token that you can use for optimistic locking. Network Firewall returns a token to your requests that access the firewall. The token marks the state of the firewall resource at the time of the request.

        To make an unconditional change to the firewall, omit the token in your update request. Without the token, Network Firewall performs your updates regardless of whether the firewall has changed since you last retrieved it.

        To make a conditional change to the firewall, provide the token in your update request. Network Firewall uses the token to ensure that the firewall hasn't changed since you last retrieved it. If it has changed, the operation fails with an InvalidTokenException. If this happens, retrieve the firewall again to get a current copy of it with a new token. Reapply your changes as needed, then try the operation again using the new token.

        " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "FirewallName":{ + "shape":"ResourceName", + "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        " + }, + "AvailabilityZoneChangeProtection":{ + "shape":"Boolean", + "documentation":"

        A setting indicating whether the firewall is protected against changes to the subnet associations. Use this setting to protect against accidentally modifying the subnet associations for a firewall that is in use. When you create a firewall, the operation initializes this setting to TRUE.

        " + } } }, "UpdateFirewallAnalysisSettingsRequest":{ @@ -4369,6 +5237,10 @@ "LoggingConfiguration":{ "shape":"LoggingConfiguration", "documentation":"

        Defines how Network Firewall performs logging for a firewall. If you omit this setting, Network Firewall disables logging for the firewall.

        " + }, + "EnableMonitoringDashboard":{ + "shape":"EnableMonitoringDashboard", + "documentation":"

        A boolean that lets you enable or disable the detailed firewall monitoring dashboard on the firewall.

        The monitoring dashboard provides comprehensive visibility into your firewall's flow logs and alert logs. After you enable detailed monitoring, you can access these dashboards directly from the Monitoring page of the Network Firewall console.

        Specify TRUE to enable the the detailed monitoring dashboard on the firewall. Specify FALSE to disable the the detailed monitoring dashboard on the firewall.

        " } } }, @@ -4383,7 +5255,11 @@ "shape":"ResourceName", "documentation":"

        The descriptive name of the firewall. You can't change the name of a firewall after you create it.

        " }, - "LoggingConfiguration":{"shape":"LoggingConfiguration"} + "LoggingConfiguration":{"shape":"LoggingConfiguration"}, + "EnableMonitoringDashboard":{ + "shape":"EnableMonitoringDashboard", + "documentation":"

        A boolean that reflects whether or not the firewall monitoring dashboard is enabled on a firewall.

        Returns TRUE when the firewall monitoring dashboard is enabled on the firewall. Returns FALSE when the firewall monitoring dashboard is not enabled on the firewall.

        " + } } }, "UpdateRuleGroupRequest":{ @@ -4433,6 +5309,10 @@ "AnalyzeRuleGroup":{ "shape":"Boolean", "documentation":"

        Indicates whether you want Network Firewall to analyze the stateless rules in the rule group for rule behavior such as asymmetric routing. If set to TRUE, Network Firewall runs the analysis and then updates the rule group for you. To run the stateless rule group analyzer without updating the rule group, set DryRun to TRUE.

        " + }, + "SummaryConfiguration":{ + "shape":"SummaryConfiguration", + "documentation":"

        Updates the selected summary configuration for a rule group.

        Changes affect subsequent responses from DescribeRuleGroupSummary.

        " } } }, @@ -4561,6 +5441,78 @@ "type":"list", "member":{"shape":"VariableDefinition"} }, + "VpcEndpointAssociation":{ + "type":"structure", + "required":[ + "VpcEndpointAssociationArn", + "FirewallArn", + "VpcId", + "SubnetMapping" + ], + "members":{ + "VpcEndpointAssociationId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the VPC endpoint association.

        " + }, + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + }, + "FirewallArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the firewall.

        " + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

        The unique identifier of the VPC for the endpoint association.

        " + }, + "SubnetMapping":{"shape":"SubnetMapping"}, + "Description":{ + "shape":"Description", + "documentation":"

        A description of the VPC endpoint association.

        " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

        The key:value pairs to associate with the resource.

        " + } + }, + "documentation":"

        A VPC endpoint association defines a single subnet to use for a firewall endpoint for a Firewall. You can define VPC endpoint associations only in the Availability Zones that already have a subnet mapping defined in the Firewall resource.

        You can retrieve the list of Availability Zones that are available for use by calling DescribeFirewallMetadata.

        To manage firewall endpoints, first, in the Firewall specification, you specify a single VPC and one subnet for each of the Availability Zones where you want to use the firewall. Then you can define additional endpoints as VPC endpoint associations.

        You can use VPC endpoint associations to expand the protections of the firewall as follows:

        • Protect multiple VPCs with a single firewall - You can use the firewall to protect other VPCs, either in your account or in accounts where the firewall is shared. You can only specify Availability Zones that already have a firewall endpoint defined in the Firewall subnet mappings.

        • Define multiple firewall endpoints for a VPC in an Availability Zone - You can create additional firewall endpoints for the VPC that you have defined in the firewall, in any Availability Zone that already has an endpoint defined in the Firewall subnet mappings. You can create multiple VPC endpoint associations for any other VPC where you use the firewall.

        You can use Resource Access Manager to share a Firewall that you own with other accounts, which gives them the ability to use the firewall to create VPC endpoint associations. For information about sharing a firewall, see PutResourcePolicy in this guide and see Sharing Network Firewall resources in the Network Firewall Developer Guide.

        The status of the VPC endpoint association, which indicates whether it's ready to filter network traffic, is provided in the corresponding VpcEndpointAssociationStatus. You can retrieve both the association and its status by calling DescribeVpcEndpointAssociation.

        " + }, + "VpcEndpointAssociationMetadata":{ + "type":"structure", + "members":{ + "VpcEndpointAssociationArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of a VPC endpoint association.

        " + } + }, + "documentation":"

        High-level information about a VPC endpoint association, returned by ListVpcEndpointAssociations. You can use the information provided in the metadata to retrieve and manage a VPC endpoint association.

        " + }, + "VpcEndpointAssociationStatus":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"FirewallStatusValue", + "documentation":"

        The readiness of the configured firewall endpoint to handle network traffic.

        " + }, + "AssociationSyncState":{ + "shape":"AssociationSyncState", + "documentation":"

        The list of the Availability Zone sync states for all subnets that are defined by the firewall.

        " + } + }, + "documentation":"

        Detailed information about the current status of a VpcEndpointAssociation. You can retrieve this by calling DescribeVpcEndpointAssociation and providing the VPC endpoint association ARN.

        " + }, + "VpcEndpointAssociations":{ + "type":"list", + "member":{"shape":"VpcEndpointAssociationMetadata"} + }, + "VpcEndpointId":{ + "type":"string", + "max":256, + "min":5, + "pattern":"^vpce-[a-zA-Z0-9]*$" + }, "VpcId":{ "type":"string", "max":128, @@ -4572,5 +5524,5 @@ "member":{"shape":"VpcId"} } }, - "documentation":"

        This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors.

        The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs.

        To view the complete list of Amazon Web Services Regions where Network Firewall is available, see Service endpoints and quotas in the Amazon Web Services General Reference.

        To access Network Firewall using the IPv4 REST API endpoint: https://network-firewall.<region>.amazonaws.com

        To access Network Firewall using the Dualstack (IPv4 and IPv6) REST API endpoint: https://network-firewall.<region>.aws.api

        Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs.

        For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide.

        Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. Network Firewall supports Suricata version 7.0.3. For information about Suricata, see the Suricata website and the Suricata User Guide.

        You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples:

        • Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic.

        • Use custom lists of known bad domains to limit the types of domain names that your applications can access.

        • Perform deep packet inspection on traffic entering or leaving your VPC.

        • Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used.

        To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

        To start using Network Firewall, do the following:

        1. (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC.

        2. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall.

        3. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have.

        4. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior.

        5. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy.

        6. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints.

        " + "documentation":"

        This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors.

        The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs.

        To view the complete list of Amazon Web Services Regions where Network Firewall is available, see Service endpoints and quotas in the Amazon Web Services General Reference.

        To access Network Firewall using the IPv4 REST API endpoint: https://network-firewall.<region>.amazonaws.com

        To access Network Firewall using the Dualstack (IPv4 and IPv6) REST API endpoint: https://network-firewall.<region>.aws.api

        Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs.

        For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide.

        Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. Network Firewall supports Suricata version 7.0.3. For information about Suricata, see the Suricata website and the Suricata User Guide.

        You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples:

        • Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic.

        • Use custom lists of known bad domains to limit the types of domain names that your applications can access.

        • Perform deep packet inspection on traffic entering or leaving your VPC.

        • Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used.

        To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

        To start using Network Firewall, do the following:

        1. (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC.

        2. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall.

        3. In Network Firewall, define the firewall behavior as follows:

          1. Create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have.

          2. Create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior.

        4. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy.

        5. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints.

        After your firewall is established, you can add firewall endpoints for new Availability Zones by following the prior steps for the Amazon VPC setup and firewall subnet definitions. You can also add endpoints to Availability Zones that you're using in the firewall, either for the same VPC or for another VPC, by following the prior steps for the Amazon VPC setup, and defining the new VPC subnets as VPC endpoint associations.

        " } diff --git a/services/networkflowmonitor/pom.xml b/services/networkflowmonitor/pom.xml index c3468c3b1c7d..7bca92831f0f 100644 --- a/services/networkflowmonitor/pom.xml +++ b/services/networkflowmonitor/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT networkflowmonitor AWS Java SDK :: Services :: Network Flow Monitor diff --git a/services/networkflowmonitor/src/main/resources/codegen-resources/customization.config b/services/networkflowmonitor/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/networkflowmonitor/src/main/resources/codegen-resources/customization.config +++ b/services/networkflowmonitor/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/networkflowmonitor/src/main/resources/codegen-resources/service-2.json b/services/networkflowmonitor/src/main/resources/codegen-resources/service-2.json index f0701de80bc3..af7edea6061b 100644 --- a/services/networkflowmonitor/src/main/resources/codegen-resources/service-2.json +++ b/services/networkflowmonitor/src/main/resources/codegen-resources/service-2.json @@ -84,6 +84,7 @@ "errors":[ {"shape":"ServiceQuotaExceededException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, @@ -127,7 +128,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Return the data for a query with the Network Flow Monitor query interface. You specify the query that you want to return results for by providing a query ID and a monitor name. This query returns the top contributors for a specific monitor.

        Create a query ID for this call by calling the corresponding API call to start the query, StartQueryMonitorTopContributors. Use the scope ID that was returned for your account by CreateScope.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor.

        " + "documentation":"

        Return the data for a query with the Network Flow Monitor query interface. You specify the query that you want to return results for by providing a query ID and a monitor name. This query returns the top contributors for a specific monitor.

        Create a query ID for this call by calling the corresponding API call to start the query, StartQueryMonitorTopContributors. Use the scope ID that was returned for your account by CreateScope.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type. Top contributors can be across all workload insights, for a given scope, or for a specific monitor. Use the applicable call for the top contributors that you want to be returned.

        " }, "GetQueryResultsWorkloadInsightsTopContributors":{ "name":"GetQueryResultsWorkloadInsightsTopContributors", @@ -146,7 +147,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Return the data for a query with the Network Flow Monitor query interface. You specify the query that you want to return results for by providing a query ID and a monitor name.

        This query returns the top contributors for a scope for workload insights. Workload insights provide a high level view of network flow performance data collected by agents. To return the data for the top contributors, see GetQueryResultsWorkloadInsightsTopContributorsData.

        Create a query ID for this call by calling the corresponding API call to start the query, StartQueryWorkloadInsightsTopContributors. Use the scope ID that was returned for your account by CreateScope.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor.

        " + "documentation":"

        Return the data for a query with the Network Flow Monitor query interface. You specify the query that you want to return results for by providing a query ID and a monitor name.

        This query returns the top contributors for a scope for workload insights. Workload insights provide a high level view of network flow performance data collected by agents. To return the data for the top contributors, see GetQueryResultsWorkloadInsightsTopContributorsData.

        Create a query ID for this call by calling the corresponding API call to start the query, StartQueryWorkloadInsightsTopContributors. Use the scope ID that was returned for your account by CreateScope.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type. Top contributors can be across all workload insights, for a given scope, or for a specific monitor. Use the applicable call for the top contributors that you want to be returned.

        " }, "GetQueryResultsWorkloadInsightsTopContributorsData":{ "name":"GetQueryResultsWorkloadInsightsTopContributorsData", @@ -165,7 +166,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Return the data for a query with the Network Flow Monitor query interface. Specify the query that you want to return results for by providing a query ID and a scope ID.

        This query returns the data for top contributors for workload insights for a specific scope. Workload insights provide a high level view of network flow performance data collected by agents for a scope. To return just the top contributors, see GetQueryResultsWorkloadInsightsTopContributors.

        Create a query ID for this call by calling the corresponding API call to start the query, StartQueryWorkloadInsightsTopContributorsData. Use the scope ID that was returned for your account by CreateScope.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor.

        The top contributor network flows overall for a specific metric type, for example, the number of retransmissions.

        " + "documentation":"

        Return the data for a query with the Network Flow Monitor query interface. Specify the query that you want to return results for by providing a query ID and a scope ID.

        This query returns the data for top contributors for workload insights for a specific scope. Workload insights provide a high level view of network flow performance data collected by agents for a scope. To return just the top contributors, see GetQueryResultsWorkloadInsightsTopContributors.

        Create a query ID for this call by calling the corresponding API call to start the query, StartQueryWorkloadInsightsTopContributorsData. Use the scope ID that was returned for your account by CreateScope.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type. Top contributors can be across all workload insights, for a given scope, or for a specific monitor. Use the applicable call for the top contributors that you want to be returned.

        The top contributor network flows overall are for a specific metric type, for example, the number of retransmissions.

        " }, "GetQueryStatusMonitorTopContributors":{ "name":"GetQueryStatusMonitorTopContributors", @@ -183,7 +184,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Returns the current status of a query for the Network Flow Monitor query interface, for a specified query ID and monitor. This call returns the query status for the top contributors for a monitor.

        When you start a query, use this call to check the status of the query to make sure that it has has SUCCEEDED before you reviewStartQueryWorkloadInsightsTopContributorsData the results. Use the same query ID that you used for the corresponding API call to start the query, StartQueryMonitorTopContributors.

        When you run a query, use this call to check the status of the query to make sure that the query has SUCCEEDED before you review the results.

        " + "documentation":"

        Returns the current status of a query for the Network Flow Monitor query interface, for a specified query ID and monitor. This call returns the query status for the top contributors for a monitor.

        When you create a query, use this call to check the status of the query to make sure that it has has SUCCEEDED before you review the results. Use the same query ID that you used for the corresponding API call to start (create) the query, StartQueryMonitorTopContributors.

        When you run a query, use this call to check the status of the query to make sure that the query has SUCCEEDED before you review the results.

        " }, "GetQueryStatusWorkloadInsightsTopContributors":{ "name":"GetQueryStatusWorkloadInsightsTopContributors", @@ -201,7 +202,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Return the data for a query with the Network Flow Monitor query interface. Specify the query that you want to return results for by providing a query ID and a monitor name. This query returns the top contributors for workload insights.

        When you start a query, use this call to check the status of the query to make sure that it has has SUCCEEDED before you review the results. Use the same query ID that you used for the corresponding API call to start the query, StartQueryWorkloadInsightsTopContributors.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor.

        " + "documentation":"

        Return the data for a query with the Network Flow Monitor query interface. Specify the query that you want to return results for by providing a query ID and a monitor name. This query returns the top contributors for workload insights.

        When you start a query, use this call to check the status of the query to make sure that it has has SUCCEEDED before you review the results. Use the same query ID that you used for the corresponding API call to start the query, StartQueryWorkloadInsightsTopContributors.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type. Top contributors can be across all workload insights, for a given scope, or for a specific monitor. Use the applicable call for the top contributors that you want to be returned.

        " }, "GetQueryStatusWorkloadInsightsTopContributorsData":{ "name":"GetQueryStatusWorkloadInsightsTopContributorsData", @@ -219,7 +220,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Returns the current status of a query for the Network Flow Monitor query interface, for a specified query ID and monitor. This call returns the query status for the top contributors data for workload insights.

        When you start a query, use this call to check the status of the query to make sure that it has has SUCCEEDED before you review the results. Use the same query ID that you used for the corresponding API call to start the query, StartQueryWorkloadInsightsTopContributorsData.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor.

        The top contributor network flows overall for a specific metric type, for example, the number of retransmissions.

        " + "documentation":"

        Returns the current status of a query for the Network Flow Monitor query interface, for a specified query ID and monitor. This call returns the query status for the top contributors data for workload insights.

        When you start a query, use this call to check the status of the query to make sure that it has has SUCCEEDED before you review the results. Use the same query ID that you used for the corresponding API call to start the query, StartQueryWorkloadInsightsTopContributorsData.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type. Top contributors can be across all workload insights, for a given scope, or for a specific monitor. Use the applicable call for the top contributors that you want to be returned.

        The top contributor network flows overall are for a specific metric type, for example, the number of retransmissions.

        " }, "GetScope":{ "name":"GetScope", @@ -310,7 +311,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Start a query to return the data with the Network Flow Monitor query interface. Specify the query that you want to return results for by providing a query ID and a monitor name. This query returns the top contributors for a specific monitor.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor.

        " + "documentation":"

        Create a query that you can use with the Network Flow Monitor query interface to return the top contributors for a monitor. Specify the monitor that you want to create the query for.

        The call returns a query ID that you can use with GetQueryResultsMonitorTopContributors to run the query and return the top contributors for a specific monitor.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type. Top contributors can be across all workload insights, for a given scope, or for a specific monitor. Use the applicable APIs for the top contributors that you want to be returned.

        " }, "StartQueryWorkloadInsightsTopContributors":{ "name":"StartQueryWorkloadInsightsTopContributors", @@ -328,7 +329,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Start a query to return the data with the Network Flow Monitor query interface. Specify the query that you want to start by providing a query ID and a monitor name. This query returns the top contributors for a specific monitor.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor.

        " + "documentation":"

        Create a query with the Network Flow Monitor query interface that you can run to return workload insights top contributors. Specify the scope that you want to create a query for.

        The call returns a query ID that you can use with GetQueryResultsWorkloadInsightsTopContributors to run the query and return the top contributors for the workload insights for a scope.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type. Top contributors can be across all workload insights, for a given scope, or for a specific monitor. Use the applicable APIs for the top contributors that you want to be returned.

        " }, "StartQueryWorkloadInsightsTopContributorsData":{ "name":"StartQueryWorkloadInsightsTopContributorsData", @@ -346,7 +347,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Start a query to return the with the Network Flow Monitor query interface. Specify the query that you want to start by providing a query ID and a monitor name. This query returns the data for top contributors for workload insights.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor.

        " + "documentation":"

        Create a query with the Network Flow Monitor query interface that you can run to return data for workload insights top contributors. Specify the scope that you want to create a query for.

        The call returns a query ID that you can use with GetQueryResultsWorkloadInsightsTopContributorsData to run the query and return the data for the top contributors for the workload insights for a scope.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type. Top contributors can be across all workload insights, for a given scope, or for a specific monitor. Use the applicable call for the top contributors that you want to be returned.

        " }, "StopQueryMonitorTopContributors":{ "name":"StopQueryMonitorTopContributors", @@ -364,7 +365,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Stop a query with the Network Flow Monitor query interface. Specify the query that you want to stop by providing a query ID and a monitor name. This query returns the top contributors for a specific monitor.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor.

        ", + "documentation":"

        Stop a top contributors query for a monitor. Specify the query that you want to stop by providing a query ID and a monitor name.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type. Top contributors can be across all workload insights, for a given scope, or for a specific monitor. Use the applicable call for the top contributors that you want to be returned.

        ", "idempotent":true }, "StopQueryWorkloadInsightsTopContributors":{ @@ -383,7 +384,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Stop a query with the Network Flow Monitor query interface. Specify the query that you want to stop by providing a query ID and a monitor name. This query returns the top contributors for a specific monitor.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor.

        " + "documentation":"

        Stop a top contributors query for workload insights. Specify the query that you want to stop by providing a query ID and a scope ID.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type. Top contributors can be across all workload insights, for a given scope, or for a specific monitor. Use the applicable call for the top contributors that you want to be returned.

        " }, "StopQueryWorkloadInsightsTopContributorsData":{ "name":"StopQueryWorkloadInsightsTopContributorsData", @@ -401,7 +402,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Return the data for a query with the Network Flow Monitor query interface. Specify the query that you want to return results for by providing a query ID and a scope ID. This query returns data for the top contributors for workload insights. Workload insights provide a high level view of network flow performance data collected by agents for a scope.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type, related to a scope (for workload insights) or a monitor.

        The top contributor network flows overall for a specific metric type, for example, the number of retransmissions.

        " + "documentation":"

        Stop a top contributors data query for workload insights. Specify the query that you want to stop by providing a query ID and a scope ID.

        Top contributors in Network Flow Monitor are network flows with the highest values for a specific metric type. Top contributors can be across all workload insights, for a given scope, or for a specific monitor. Use the applicable call for the top contributors that you want to be returned.

        " }, "TagResource":{ "name":"TagResource", @@ -474,6 +475,7 @@ "errors":[ {"shape":"ServiceQuotaExceededException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, @@ -783,7 +785,7 @@ }, "queryId":{ "shape":"String", - "documentation":"

        The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query.

        ", + "documentation":"

        The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to create a query.

        ", "location":"uri", "locationName":"queryId" }, @@ -833,7 +835,7 @@ }, "queryId":{ "shape":"String", - "documentation":"

        The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query.

        ", + "documentation":"

        The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to create a query.

        ", "location":"uri", "locationName":"queryId" }, @@ -887,7 +889,7 @@ }, "queryId":{ "shape":"String", - "documentation":"

        The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query.

        ", + "documentation":"

        The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to create a query.

        ", "location":"uri", "locationName":"queryId" }, @@ -1571,7 +1573,7 @@ }, "metricName":{ "shape":"MonitorMetric", - "documentation":"

        The metric that you want to query top contributors for. That is, you can specify this metric to return the top contributor network flows, for this type of metric, for a monitor and (optionally) within a specific category, such as network flows between Availability Zones.

        " + "documentation":"

        The metric that you want to query top contributors for. That is, you can specify a metric with this call and return the top contributor network flows, for that type of metric, for a monitor and (optionally) within a specific category, such as network flows between Availability Zones.

        " }, "destinationCategory":{ "shape":"DestinationCategory", @@ -1700,7 +1702,7 @@ }, "queryId":{ "shape":"String", - "documentation":"

        The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query.

        ", + "documentation":"

        The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to create a query.

        ", "location":"uri", "locationName":"queryId" } @@ -1726,7 +1728,7 @@ }, "queryId":{ "shape":"String", - "documentation":"

        The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query.

        ", + "documentation":"

        The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to create a query.

        ", "location":"uri", "locationName":"queryId" } @@ -1752,7 +1754,7 @@ }, "queryId":{ "shape":"String", - "documentation":"

        The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to start a query.

        ", + "documentation":"

        The identifier for the query. A query ID is an internally-generated identifier for a specific query returned from an API call to create a query.

        ", "location":"uri", "locationName":"queryId" } diff --git a/services/networkmanager/pom.xml b/services/networkmanager/pom.xml index 78d8736abe66..df23afc3124d 100644 --- a/services/networkmanager/pom.xml +++ b/services/networkmanager/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT networkmanager AWS Java SDK :: Services :: NetworkManager diff --git a/services/networkmanager/src/main/resources/codegen-resources/customization.config b/services/networkmanager/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/networkmanager/src/main/resources/codegen-resources/customization.config +++ b/services/networkmanager/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/networkmanager/src/main/resources/codegen-resources/service-2.json b/services/networkmanager/src/main/resources/codegen-resources/service-2.json index e64ee0b937d9..a578b2c681c8 100644 --- a/services/networkmanager/src/main/resources/codegen-resources/service-2.json +++ b/services/networkmanager/src/main/resources/codegen-resources/service-2.json @@ -2611,6 +2611,18 @@ "ServiceInsertionActions":{ "shape":"ServiceInsertionActionList", "documentation":"

        Describes the service insertion action.

        " + }, + "VpnEcmpSupport":{ + "shape":"Boolean", + "documentation":"

        Indicates whether Equal Cost Multipath (ECMP) is enabled for the core network.

        " + }, + "DnsSupport":{ + "shape":"Boolean", + "documentation":"

        Indicates whether public DNS support is supported. The default is true.

        " + }, + "SecurityGroupReferencingSupport":{ + "shape":"Boolean", + "documentation":"

        Indicates whether security group referencing is enabled for the core network.

        " } }, "documentation":"

        Describes a core network change.

        " @@ -3716,8 +3728,7 @@ }, "DeleteResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteSiteRequest":{ "type":"structure", @@ -4102,8 +4113,7 @@ }, "ExecuteCoreNetworkChangeSetResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ExternalRegionCode":{ "type":"string", @@ -6256,8 +6266,7 @@ }, "PutResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ReasonContextKey":{ "type":"string", @@ -6903,8 +6912,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -7120,8 +7128,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateConnectionRequest":{ "type":"structure", @@ -7552,6 +7559,14 @@ "ApplianceModeSupport":{ "shape":"Boolean", "documentation":"

        Indicates whether appliance mode is supported. If enabled, traffic flow between a source and destination use the same Availability Zone for the VPC attachment for the lifetime of that flow. The default value is false.

        " + }, + "DnsSupport":{ + "shape":"Boolean", + "documentation":"

        Indicates whether DNS is supported.

        " + }, + "SecurityGroupReferencingSupport":{ + "shape":"Boolean", + "documentation":"

        Indicates whether security group referencing is enabled for this VPC attachment. The default is true. However, at the core network policy-level the default is set to false.

        " } }, "documentation":"

        Describes the VPC options.

        " diff --git a/services/networkmonitor/pom.xml b/services/networkmonitor/pom.xml index de38b68c2fd6..0075bc3a6625 100644 --- a/services/networkmonitor/pom.xml +++ b/services/networkmonitor/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT networkmonitor AWS Java SDK :: Services :: Network Monitor diff --git a/services/networkmonitor/src/main/resources/codegen-resources/customization.config b/services/networkmonitor/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/networkmonitor/src/main/resources/codegen-resources/customization.config +++ b/services/networkmonitor/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/notifications/pom.xml b/services/notifications/pom.xml index 24a7b6a016d2..18f6bec9ea16 100644 --- a/services/notifications/pom.xml +++ b/services/notifications/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT notifications AWS Java SDK :: Services :: Notifications diff --git a/services/notifications/src/main/resources/codegen-resources/customization.config b/services/notifications/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/notifications/src/main/resources/codegen-resources/customization.config +++ b/services/notifications/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/notificationscontacts/pom.xml b/services/notificationscontacts/pom.xml index 9539a8d5b293..4b976b9a100c 100644 --- a/services/notificationscontacts/pom.xml +++ b/services/notificationscontacts/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT notificationscontacts AWS Java SDK :: Services :: Notifications Contacts diff --git a/services/notificationscontacts/src/main/resources/codegen-resources/customization.config b/services/notificationscontacts/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/notificationscontacts/src/main/resources/codegen-resources/customization.config +++ b/services/notificationscontacts/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/oam/pom.xml b/services/oam/pom.xml index b4287c792246..afaed72ee87c 100644 --- a/services/oam/pom.xml +++ b/services/oam/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT oam AWS Java SDK :: Services :: OAM diff --git a/services/oam/src/main/resources/codegen-resources/customization.config b/services/oam/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/oam/src/main/resources/codegen-resources/customization.config +++ b/services/oam/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/oam/src/main/resources/codegen-resources/service-2.json b/services/oam/src/main/resources/codegen-resources/service-2.json index 53ab64a3975c..b2eeda43b549 100644 --- a/services/oam/src/main/resources/codegen-resources/service-2.json +++ b/services/oam/src/main/resources/codegen-resources/service-2.json @@ -215,7 +215,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

        Creates or updates the resource policy that grants permissions to source accounts to link to the monitoring account sink. When you create a sink policy, you can grant permissions to all accounts in an organization or to individual accounts.

        You can also use a sink policy to limit the types of data that is shared. The three types that you can allow or deny are:

        • Metrics - Specify with AWS::CloudWatch::Metric

        • Log groups - Specify with AWS::Logs::LogGroup

        • Traces - Specify with AWS::XRay::Trace

        • Application Insights - Applications - Specify with AWS::ApplicationInsights::Application

        See the examples in this section to see how to specify permitted source accounts and data types.

        " + "documentation":"

        Creates or updates the resource policy that grants permissions to source accounts to link to the monitoring account sink. When you create a sink policy, you can grant permissions to all accounts in an organization or to individual accounts.

        You can also use a sink policy to limit the types of data that is shared. The six types of services with their respective resource types that you can allow or deny are:

        • Metrics - Specify with AWS::CloudWatch::Metric

        • Log groups - Specify with AWS::Logs::LogGroup

        • Traces - Specify with AWS::XRay::Trace

        • Application Insights - Applications - Specify with AWS::ApplicationInsights::Application

        • Internet Monitor - Specify with AWS::InternetMonitor::Monitor

        • Application Signals - Specify with AWS::ApplicationSignals::Service and AWS::ApplicationSignals::ServiceLevelObjective

        See the examples in this section to see how to specify permitted source accounts and data types.

        " }, "TagResource":{ "name":"TagResource", @@ -299,7 +299,7 @@ "members":{ "LabelTemplate":{ "shape":"LabelTemplate", - "documentation":"

        Specify a friendly human-readable name to use to identify this source account when you are viewing data from it in the monitoring account.

        You can use a custom label or use the following variables:

        • $AccountName is the name of the account

        • $AccountEmail is the globally unique email address of the account

        • $AccountEmailNoDomain is the email address of the account without the domain name

        " + "documentation":"

        Specify a friendly human-readable name to use to identify this source account when you are viewing data from it in the monitoring account.

        You can use a custom label or use the following variables:

        • $AccountName is the name of the account

        • $AccountEmail is the globally unique email address of the account

        • $AccountEmailNoDomain is the email address of the account without the domain name

        In the Amazon Web Services GovCloud (US-East) and Amazon Web Services GovCloud (US-West) Regions, the only supported option is to use custom labels, and the $AccountName, $AccountEmail, and $AccountEmailNoDomain variables all resolve as account-id instead of the specified variable.

        " }, "LinkConfiguration":{ "shape":"LinkConfiguration", @@ -428,6 +428,10 @@ "Identifier":{ "shape":"ResourceIdentifier", "documentation":"

        The ARN of the link to retrieve information for.

        " + }, + "IncludeTags":{ + "shape":"IncludeTags", + "documentation":"

        Specifies whether to include the tags associated with the link in the response. When IncludeTags is set to true and the caller has the required permission, oam:ListTagsForResource, the API will return the tags for the specified resource. If the caller doesn't have the required permission, oam:ListTagsForResource, the API will raise an exception.

        The default value is false.

        " } } }, @@ -475,6 +479,10 @@ "Identifier":{ "shape":"ResourceIdentifier", "documentation":"

        The ARN of the sink to retrieve information for.

        " + }, + "IncludeTags":{ + "shape":"IncludeTags", + "documentation":"

        Specifies whether to include the tags associated with the sink in the response. When IncludeTags is set to true and the caller has the required permission, oam:ListTagsForResource, the API will return the tags for the specified resource. If the caller doesn't have the required permission, oam:ListTagsForResource, the API will raise an exception.

        The default value is false.

        " } } }, @@ -526,6 +534,10 @@ } } }, + "IncludeTags":{ + "type":"boolean", + "box":true + }, "InternalServiceFault":{ "type":"structure", "members":{ @@ -1030,6 +1042,10 @@ "shape":"ResourceIdentifier", "documentation":"

        The ARN of the link that you want to update.

        " }, + "IncludeTags":{ + "shape":"IncludeTags", + "documentation":"

        Specifies whether to include the tags associated with the link in the response after the update operation. When IncludeTags is set to true and the caller has the required permission, oam:ListTagsForResource, the API will return the tags for the specified resource. If the caller doesn't have the required permission, oam:ListTagsForResource, the API will raise an exception.

        The default value is false.

        " + }, "LinkConfiguration":{ "shape":"LinkConfiguration", "documentation":"

        Use this structure to filter which metric namespaces and which log groups are to be shared from the source account to the monitoring account.

        " @@ -1090,5 +1106,5 @@ "exception":true } }, - "documentation":"

        Use Amazon CloudWatch Observability Access Manager to create and manage links between source accounts and monitoring accounts by using CloudWatch cross-account observability. With CloudWatch cross-account observability, you can monitor and troubleshoot applications that span multiple accounts within a Region. Seamlessly search, visualize, and analyze your metrics, logs, traces, Application Signals services, service level objectives (SLOs), Application Insights applications, and internet monitors in any of the linked accounts without account boundaries.

        Set up one or more Amazon Web Services accounts as monitoring accounts and link them with multiple source accounts. A monitoring account is a central Amazon Web Services account that can view and interact with observability data generated from source accounts. A source account is an individual Amazon Web Services account that generates observability data for the resources that reside in it. Source accounts share their observability data with the monitoring account. The shared observability data can include metrics in Amazon CloudWatch, logs in Amazon CloudWatch Logs, traces in X-Ray, Application Signals services, service level objectives (SLOs), applications in Amazon CloudWatch Application Insights, and internet monitors in CloudWatch Internet Monitor.

        When you set up a link, you can choose to share the metrics from all namespaces with the monitoring account, or filter to a subset of namespaces. And for CloudWatch Logs, you can choose to share all log groups with the monitoring account, or filter to a subset of log groups.

        " + "documentation":"

        Use Amazon CloudWatch Observability Access Manager to create and manage links between source accounts and monitoring accounts by using CloudWatch cross-account observability. With CloudWatch cross-account observability, you can monitor and troubleshoot applications that span multiple accounts within a Region. Seamlessly search, visualize, and analyze your metrics, logs, traces, Application Signals services and service level objectives (SLOs), Application Insights applications, and internet monitors in any of the linked accounts without account boundaries.

        Set up one or more Amazon Web Services accounts as monitoring accounts and link them with multiple source accounts. A monitoring account is a central Amazon Web Services account that can view and interact with observability data generated from source accounts. A source account is an individual Amazon Web Services account that generates observability data for the resources that reside in it. Source accounts share their observability data with the monitoring account. The shared observability data can include metrics in Amazon CloudWatch, logs in Amazon CloudWatch Logs, traces in X-Ray, Application Signals services and service level objectives (SLOs), applications in Amazon CloudWatch Application Insights, and internet monitors in CloudWatch Internet Monitor.

        When you set up a link, you can choose to share the metrics from all namespaces with the monitoring account, or filter to a subset of namespaces. And for CloudWatch Logs, you can choose to share all log groups with the monitoring account, or filter to a subset of log groups.

        " } diff --git a/services/observabilityadmin/pom.xml b/services/observabilityadmin/pom.xml index b0a99ebdf287..f16598c12ff0 100644 --- a/services/observabilityadmin/pom.xml +++ b/services/observabilityadmin/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT observabilityadmin AWS Java SDK :: Services :: Observability Admin diff --git a/services/observabilityadmin/src/main/resources/codegen-resources/customization.config b/services/observabilityadmin/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/observabilityadmin/src/main/resources/codegen-resources/customization.config +++ b/services/observabilityadmin/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/odb/pom.xml b/services/odb/pom.xml new file mode 100644 index 000000000000..790f5f5e8be5 --- /dev/null +++ b/services/odb/pom.xml @@ -0,0 +1,60 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.31.76-SNAPSHOT + + odb + AWS Java SDK :: Services :: Odb + The AWS Java SDK for Odb module holds the client classes that are used for + communicating with Odb. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.odb + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + software.amazon.awssdk + http-auth-aws + ${awsjavasdk.version} + + + diff --git a/services/odb/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/odb/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..5d10cff9434b --- /dev/null +++ b/services/odb/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://odb-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://odb-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://odb.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://odb.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/services/odb/src/main/resources/codegen-resources/endpoint-tests.json b/services/odb/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..e8896a402d84 --- /dev/null +++ b/services/odb/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://odb-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://odb-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://odb.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://odb.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://odb-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://odb-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://odb.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://odb.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://odb-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://odb-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://odb.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://odb.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://odb-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://odb.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://odb-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://odb.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/odb/src/main/resources/codegen-resources/paginators-1.json b/services/odb/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..6bda94b16e8e --- /dev/null +++ b/services/odb/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,70 @@ +{ + "pagination": { + "ListAutonomousVirtualMachines": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "autonomousVirtualMachines" + }, + "ListCloudAutonomousVmClusters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "cloudAutonomousVmClusters" + }, + "ListCloudExadataInfrastructures": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "cloudExadataInfrastructures" + }, + "ListCloudVmClusters": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "cloudVmClusters" + }, + "ListDbNodes": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "dbNodes" + }, + "ListDbServers": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "dbServers" + }, + "ListDbSystemShapes": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "dbSystemShapes" + }, + "ListGiVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "giVersions" + }, + "ListOdbNetworks": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "odbNetworks" + }, + "ListOdbPeeringConnections": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "odbPeeringConnections" + }, + "ListSystemVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "systemVersions" + } + } +} diff --git a/services/odb/src/main/resources/codegen-resources/service-2.json b/services/odb/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..97f64af45b45 --- /dev/null +++ b/services/odb/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,5177 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2024-08-20", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"odb", + "jsonVersion":"1.0", + "protocol":"json", + "protocols":["json"], + "serviceFullName":"odb", + "serviceId":"odb", + "signatureVersion":"v4", + "signingName":"odb", + "targetPrefix":"Odb", + "uid":"odb-2024-08-20" + }, + "operations":{ + "AcceptMarketplaceRegistration":{ + "name":"AcceptMarketplaceRegistration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AcceptMarketplaceRegistrationInput"}, + "output":{"shape":"AcceptMarketplaceRegistrationOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Registers the Amazon Web Services Marketplace token for your Amazon Web Services account to activate your Oracle Database@Amazon Web Services subscription.

        ", + "idempotent":true + }, + "CreateCloudAutonomousVmCluster":{ + "name":"CreateCloudAutonomousVmCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCloudAutonomousVmClusterInput"}, + "output":{"shape":"CreateCloudAutonomousVmClusterOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Creates a new Autonomous VM cluster in the specified Exadata infrastructure.

        ", + "idempotent":true + }, + "CreateCloudExadataInfrastructure":{ + "name":"CreateCloudExadataInfrastructure", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCloudExadataInfrastructureInput"}, + "output":{"shape":"CreateCloudExadataInfrastructureOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Creates an Exadata infrastructure.

        ", + "idempotent":true + }, + "CreateCloudVmCluster":{ + "name":"CreateCloudVmCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateCloudVmClusterInput"}, + "output":{"shape":"CreateCloudVmClusterOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Creates a VM cluster on the specified Exadata infrastructure.

        ", + "idempotent":true + }, + "CreateOdbNetwork":{ + "name":"CreateOdbNetwork", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOdbNetworkInput"}, + "output":{"shape":"CreateOdbNetworkOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Creates an ODB network.

        ", + "idempotent":true + }, + "CreateOdbPeeringConnection":{ + "name":"CreateOdbPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOdbPeeringConnectionInput"}, + "output":{"shape":"CreateOdbPeeringConnectionOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Creates a peering connection between an ODB network and either another ODB network or a customer-owned VPC.

        A peering connection enables private connectivity between the networks for application-tier communication.

        ", + "idempotent":true + }, + "DeleteCloudAutonomousVmCluster":{ + "name":"DeleteCloudAutonomousVmCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCloudAutonomousVmClusterInput"}, + "output":{"shape":"DeleteCloudAutonomousVmClusterOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Deletes an Autonomous VM cluster.

        ", + "idempotent":true + }, + "DeleteCloudExadataInfrastructure":{ + "name":"DeleteCloudExadataInfrastructure", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCloudExadataInfrastructureInput"}, + "output":{"shape":"DeleteCloudExadataInfrastructureOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Deletes the specified Exadata infrastructure. Before you use this operation, make sure to delete all of the VM clusters that are hosted on this Exadata infrastructure.

        ", + "idempotent":true + }, + "DeleteCloudVmCluster":{ + "name":"DeleteCloudVmCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteCloudVmClusterInput"}, + "output":{"shape":"DeleteCloudVmClusterOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Deletes the specified VM cluster.

        ", + "idempotent":true + }, + "DeleteOdbNetwork":{ + "name":"DeleteOdbNetwork", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOdbNetworkInput"}, + "output":{"shape":"DeleteOdbNetworkOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Deletes the specified ODB network.

        ", + "idempotent":true + }, + "DeleteOdbPeeringConnection":{ + "name":"DeleteOdbPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOdbPeeringConnectionInput"}, + "output":{"shape":"DeleteOdbPeeringConnectionOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Deletes an ODB peering connection.

        When you delete an ODB peering connection, the underlying VPC peering connection is also deleted.

        ", + "idempotent":true + }, + "GetCloudAutonomousVmCluster":{ + "name":"GetCloudAutonomousVmCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCloudAutonomousVmClusterInput"}, + "output":{"shape":"GetCloudAutonomousVmClusterOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Gets information about a specific Autonomous VM cluster.

        " + }, + "GetCloudExadataInfrastructure":{ + "name":"GetCloudExadataInfrastructure", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCloudExadataInfrastructureInput"}, + "output":{"shape":"GetCloudExadataInfrastructureOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns information about the specified Exadata infrastructure.

        " + }, + "GetCloudExadataInfrastructureUnallocatedResources":{ + "name":"GetCloudExadataInfrastructureUnallocatedResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCloudExadataInfrastructureUnallocatedResourcesInput"}, + "output":{"shape":"GetCloudExadataInfrastructureUnallocatedResourcesOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Retrieves information about unallocated resources in a specified Cloud Exadata Infrastructure.

        " + }, + "GetCloudVmCluster":{ + "name":"GetCloudVmCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetCloudVmClusterInput"}, + "output":{"shape":"GetCloudVmClusterOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns information about the specified VM cluster.

        " + }, + "GetDbNode":{ + "name":"GetDbNode", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDbNodeInput"}, + "output":{"shape":"GetDbNodeOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns information about the specified DB node.

        " + }, + "GetDbServer":{ + "name":"GetDbServer", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDbServerInput"}, + "output":{"shape":"GetDbServerOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns information about the specified database server.

        " + }, + "GetOciOnboardingStatus":{ + "name":"GetOciOnboardingStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOciOnboardingStatusInput"}, + "output":{"shape":"GetOciOnboardingStatusOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns the tenancy activation link and onboarding status for your Amazon Web Services account.

        " + }, + "GetOdbNetwork":{ + "name":"GetOdbNetwork", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOdbNetworkInput"}, + "output":{"shape":"GetOdbNetworkOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns information about the specified ODB network.

        " + }, + "GetOdbPeeringConnection":{ + "name":"GetOdbPeeringConnection", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOdbPeeringConnectionInput"}, + "output":{"shape":"GetOdbPeeringConnectionOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Retrieves information about an ODB peering connection.

        " + }, + "InitializeService":{ + "name":"InitializeService", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"InitializeServiceInput"}, + "output":{"shape":"InitializeServiceOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Initializes the ODB service for the first time in an account.

        ", + "idempotent":true + }, + "ListAutonomousVirtualMachines":{ + "name":"ListAutonomousVirtualMachines", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAutonomousVirtualMachinesInput"}, + "output":{"shape":"ListAutonomousVirtualMachinesOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Lists all Autonomous VMs in an Autonomous VM cluster.

        " + }, + "ListCloudAutonomousVmClusters":{ + "name":"ListCloudAutonomousVmClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCloudAutonomousVmClustersInput"}, + "output":{"shape":"ListCloudAutonomousVmClustersOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Lists all Autonomous VM clusters in a specified Cloud Exadata infrastructure.

        " + }, + "ListCloudExadataInfrastructures":{ + "name":"ListCloudExadataInfrastructures", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCloudExadataInfrastructuresInput"}, + "output":{"shape":"ListCloudExadataInfrastructuresOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns information about the Exadata infrastructures owned by your Amazon Web Services account.

        " + }, + "ListCloudVmClusters":{ + "name":"ListCloudVmClusters", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCloudVmClustersInput"}, + "output":{"shape":"ListCloudVmClustersOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns information about the VM clusters owned by your Amazon Web Services account or only the ones on the specified Exadata infrastructure.

        " + }, + "ListDbNodes":{ + "name":"ListDbNodes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDbNodesInput"}, + "output":{"shape":"ListDbNodesOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns information about the DB nodes for the specified VM cluster.

        " + }, + "ListDbServers":{ + "name":"ListDbServers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDbServersInput"}, + "output":{"shape":"ListDbServersOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns information about the database servers that belong to the specified Exadata infrastructure.

        " + }, + "ListDbSystemShapes":{ + "name":"ListDbSystemShapes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDbSystemShapesInput"}, + "output":{"shape":"ListDbSystemShapesOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns information about the shapes that are available for an Exadata infrastructure.

        " + }, + "ListGiVersions":{ + "name":"ListGiVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGiVersionsInput"}, + "output":{"shape":"ListGiVersionsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns information about Oracle Grid Infrastructure (GI) software versions that are available for a VM cluster for the specified shape.

        " + }, + "ListOdbNetworks":{ + "name":"ListOdbNetworks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOdbNetworksInput"}, + "output":{"shape":"ListOdbNetworksOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Returns information about the ODB networks owned by your Amazon Web Services account.

        " + }, + "ListOdbPeeringConnections":{ + "name":"ListOdbPeeringConnections", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOdbPeeringConnectionsInput"}, + "output":{"shape":"ListOdbPeeringConnectionsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Lists all ODB peering connections or those associated with a specific ODB network.

        " + }, + "ListSystemVersions":{ + "name":"ListSystemVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSystemVersionsInput"}, + "output":{"shape":"ListSystemVersionsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns information about the system versions that are available for a VM cluster for the specified giVersion and shape.

        " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns information about the tags applied to this resource.

        " + }, + "RebootDbNode":{ + "name":"RebootDbNode", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDbNodeInput"}, + "output":{"shape":"RebootDbNodeOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Reboots the specified DB node in a VM cluster.

        " + }, + "StartDbNode":{ + "name":"StartDbNode", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartDbNodeInput"}, + "output":{"shape":"StartDbNodeOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Starts the specified DB node in a VM cluster.

        " + }, + "StopDbNode":{ + "name":"StopDbNode", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopDbNodeInput"}, + "output":{"shape":"StopDbNodeOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Stops the specified DB node in a VM cluster.

        " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Applies tags to the specified resource.

        ", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Removes tags from the specified resource.

        ", + "idempotent":true + }, + "UpdateCloudExadataInfrastructure":{ + "name":"UpdateCloudExadataInfrastructure", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateCloudExadataInfrastructureInput"}, + "output":{"shape":"UpdateCloudExadataInfrastructureOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Updates the properties of an Exadata infrastructure resource.

        " + }, + "UpdateOdbNetwork":{ + "name":"UpdateOdbNetwork", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateOdbNetworkInput"}, + "output":{"shape":"UpdateOdbNetworkOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Updates properties of a specified ODB network.

        " + } + }, + "shapes":{ + "AcceptMarketplaceRegistrationInput":{ + "type":"structure", + "required":["marketplaceRegistrationToken"], + "members":{ + "marketplaceRegistrationToken":{ + "shape":"String", + "documentation":"

        The registration token that's generated by Amazon Web Services Marketplace and sent to Oracle Database@Amazon Web Services.

        " + } + } + }, + "AcceptMarketplaceRegistrationOutput":{ + "type":"structure", + "members":{} + }, + "Access":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

        You don't have sufficient access to perform this action. Make sure you have the required permissions and try again.

        ", + "exception":true + }, + "AutonomousVirtualMachineList":{ + "type":"list", + "member":{"shape":"AutonomousVirtualMachineSummary"} + }, + "AutonomousVirtualMachineSummary":{ + "type":"structure", + "members":{ + "autonomousVirtualMachineId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the Autonomous VM.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the Autonomous VM.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the current status of the Autonomous VM, if applicable.

        " + }, + "vmName":{ + "shape":"String", + "documentation":"

        The name of the Autonomous VM.

        " + }, + "dbServerId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the database server hosting this Autonomous VM.

        " + }, + "dbServerDisplayName":{ + "shape":"String", + "documentation":"

        The display name of the database server hosting this Autonomous VM.

        " + }, + "cpuCoreCount":{ + "shape":"Integer", + "documentation":"

        The number of CPU cores allocated to this Autonomous VM.

        " + }, + "memorySizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of memory allocated to this Autonomous VM, in gigabytes (GB).

        " + }, + "dbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of storage allocated to this Autonomous Virtual Machine, in gigabytes (GB).

        " + }, + "clientIpAddress":{ + "shape":"String", + "documentation":"

        The IP address used by clients to connect to this Autonomous VM.

        " + }, + "cloudAutonomousVmClusterId":{ + "shape":"String", + "documentation":"

        The unique identifier of the Autonomous VM cluster containing this Autonomous VM.

        " + }, + "ocid":{ + "shape":"String", + "documentation":"

        The Oracle Cloud Identifier (OCID) of the Autonomous VM.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the Oracle Cloud Infrastructure (OCI) resource anchor associated with this Autonomous VM.

        " + } + }, + "documentation":"

        A summary of an Autonomous Virtual Machine (VM) within an Autonomous VM cluster.

        " + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "CloudAutonomousVmCluster":{ + "type":"structure", + "required":["cloudAutonomousVmClusterId"], + "members":{ + "cloudAutonomousVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the Autonomous VM cluster.

        " + }, + "cloudAutonomousVmClusterArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) for the Autonomous VM cluster.

        " + }, + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB network associated with this Autonomous VM cluster.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the OCI resource anchor associated with this Autonomous VM cluster.

        " + }, + "percentProgress":{ + "shape":"Float", + "documentation":"

        The progress of the current operation on the Autonomous VM cluster, as a percentage.

        " + }, + "displayName":{ + "shape":"ResourceDisplayName", + "documentation":"

        The display name of the Autonomous VM cluster.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current state of the Autonomous VM cluster. Possible values include CREATING, AVAILABLE, UPDATING, DELETING, DELETED, FAILED.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the current status of the Autonomous VM cluster.

        " + }, + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Cloud Exadata Infrastructure containing this Autonomous VM cluster.

        " + }, + "autonomousDataStoragePercentage":{ + "shape":"Float", + "documentation":"

        The percentage of data storage currently in use for Autonomous Databases in the Autonomous VM cluster.

        " + }, + "autonomousDataStorageSizeInTBs":{ + "shape":"Double", + "documentation":"

        The data storage size allocated for Autonomous Databases in the Autonomous VM cluster, in TB.

        " + }, + "availableAutonomousDataStorageSizeInTBs":{ + "shape":"Double", + "documentation":"

        The available data storage space for Autonomous Databases in the Autonomous VM cluster, in TB.

        " + }, + "availableContainerDatabases":{ + "shape":"Integer", + "documentation":"

        The number of Autonomous CDBs that you can create with the currently available storage.

        " + }, + "availableCpus":{ + "shape":"Float", + "documentation":"

        The number of CPU cores available for allocation to Autonomous Databases.

        " + }, + "computeModel":{ + "shape":"ComputeModel", + "documentation":"

        The compute model of the Autonomous VM cluster: ECPU or OCPU.

        " + }, + "cpuCoreCount":{ + "shape":"Integer", + "documentation":"

        The total number of CPU cores in the Autonomous VM cluster.

        " + }, + "cpuCoreCountPerNode":{ + "shape":"Integer", + "documentation":"

        The number of CPU cores enabled per node in the Autonomous VM cluster.

        " + }, + "cpuPercentage":{ + "shape":"Float", + "documentation":"

        The percentage of total CPU cores currently in use in the Autonomous VM cluster.

        " + }, + "dataStorageSizeInGBs":{ + "shape":"Double", + "documentation":"

        The total data storage allocated to the Autonomous VM cluster, in GB.

        " + }, + "dataStorageSizeInTBs":{ + "shape":"Double", + "documentation":"

        The total data storage allocated to the Autonomous VM cluster, in TB.

        " + }, + "dbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The local node storage allocated to the Autonomous VM cluster, in gigabytes (GB).

        " + }, + "dbServers":{ + "shape":"StringList", + "documentation":"

        The list of database servers associated with the Autonomous VM cluster.

        " + }, + "description":{ + "shape":"String", + "documentation":"

        The user-provided description of the Autonomous VM cluster.

        " + }, + "domain":{ + "shape":"String", + "documentation":"

        The domain name for the Autonomous VM cluster.

        " + }, + "exadataStorageInTBsLowestScaledValue":{ + "shape":"Double", + "documentation":"

        The minimum value to which you can scale down the Exadata storage, in TB.

        " + }, + "hostname":{ + "shape":"String", + "documentation":"

        The hostname for the Autonomous VM cluster.

        " + }, + "ocid":{ + "shape":"String", + "documentation":"

        The Oracle Cloud Identifier (OCID) of the Autonomous VM cluster.

        " + }, + "ociUrl":{ + "shape":"String", + "documentation":"

        The URL for accessing the OCI console page for this Autonomous VM cluster.

        " + }, + "isMtlsEnabledVmCluster":{ + "shape":"Boolean", + "documentation":"

        Indicates whether mutual TLS (mTLS) authentication is enabled for the Autonomous VM cluster.

        " + }, + "licenseModel":{ + "shape":"LicenseModel", + "documentation":"

        The Oracle license model that applies to the Autonomous VM cluster. Valid values are LICENSE_INCLUDED or BRING_YOUR_OWN_LICENSE.

        " + }, + "maintenanceWindow":{ + "shape":"MaintenanceWindow", + "documentation":"

        The scheduling details for the maintenance window. Patching and system updates take place during the maintenance window.

        " + }, + "maxAcdsLowestScaledValue":{ + "shape":"Integer", + "documentation":"

        The minimum value to which you can scale down the maximum number of Autonomous CDBs.

        " + }, + "memoryPerOracleComputeUnitInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of memory allocated per Oracle Compute Unit, in GB.

        " + }, + "memorySizeInGBs":{ + "shape":"Integer", + "documentation":"

        The total amount of memory allocated to the Autonomous VM cluster, in gigabytes (GB).

        " + }, + "nodeCount":{ + "shape":"Integer", + "documentation":"

        The number of database server nodes in the Autonomous VM cluster.

        " + }, + "nonProvisionableAutonomousContainerDatabases":{ + "shape":"Integer", + "documentation":"

        The number of Autonomous CDBs that can't be provisioned because of resource constraints.

        " + }, + "provisionableAutonomousContainerDatabases":{ + "shape":"Integer", + "documentation":"

        The number of Autonomous CDBs that can be provisioned in the Autonomous VM cluster.

        " + }, + "provisionedAutonomousContainerDatabases":{ + "shape":"Integer", + "documentation":"

        The number of Autonomous CDBs currently provisioned in the Autonomous VM cluster.

        " + }, + "provisionedCpus":{ + "shape":"Float", + "documentation":"

        The number of CPU cores currently provisioned in the Autonomous VM cluster.

        " + }, + "reclaimableCpus":{ + "shape":"Float", + "documentation":"

        The number of CPU cores that can be reclaimed from terminated or scaled-down Autonomous Databases.

        " + }, + "reservedCpus":{ + "shape":"Float", + "documentation":"

        The number of CPU cores reserved for system operations and redundancy.

        " + }, + "scanListenerPortNonTls":{ + "shape":"Integer", + "documentation":"

        The SCAN listener port for non-TLS (TCP) protocol. The default is 1521.

        " + }, + "scanListenerPortTls":{ + "shape":"Integer", + "documentation":"

        The SCAN listener port for TLS (TCP) protocol. The default is 2484.

        " + }, + "shape":{ + "shape":"String", + "documentation":"

        The shape of the Exadata infrastructure for the Autonomous VM cluster.

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The date and time when the Autonomous VM cluster was created.

        " + }, + "timeDatabaseSslCertificateExpires":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The expiration date and time of the database SSL certificate.

        " + }, + "timeOrdsCertificateExpires":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The expiration date and time of the Oracle REST Data Services (ORDS) certificate.

        " + }, + "timeZone":{ + "shape":"String", + "documentation":"

        The time zone of the Autonomous VM cluster.

        " + }, + "totalContainerDatabases":{ + "shape":"Integer", + "documentation":"

        The total number of Autonomous Container Databases that can be created with the allocated local storage.

        " + } + }, + "documentation":"

        Information about an Autonomous VM cluster resource.

        " + }, + "CloudAutonomousVmClusterList":{ + "type":"list", + "member":{"shape":"CloudAutonomousVmClusterSummary"} + }, + "CloudAutonomousVmClusterResourceDetails":{ + "type":"structure", + "members":{ + "cloudAutonomousVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the Autonomous VM cluster.

        " + }, + "unallocatedAdbStorageInTBs":{ + "shape":"Double", + "documentation":"

        The amount of unallocated Autonomous Database storage in the Autonomous VM cluster, in terabytes.

        " + } + }, + "documentation":"

        Resource details of an Autonomous VM cluster.

        " + }, + "CloudAutonomousVmClusterResourceDetailsList":{ + "type":"list", + "member":{"shape":"CloudAutonomousVmClusterResourceDetails"} + }, + "CloudAutonomousVmClusterSummary":{ + "type":"structure", + "required":["cloudAutonomousVmClusterId"], + "members":{ + "cloudAutonomousVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the Autonomous VM cluster.

        " + }, + "cloudAutonomousVmClusterArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) for the Autonomous VM cluster.

        " + }, + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB network associated with this Autonomous VM cluster.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the OCI resource anchor associated with this Autonomous VM cluster.

        " + }, + "percentProgress":{ + "shape":"Float", + "documentation":"

        The progress of the current operation on the Autonomous VM cluster, as a percentage.

        " + }, + "displayName":{ + "shape":"ResourceDisplayName", + "documentation":"

        The user-friendly name for the Autonomous VM cluster.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the Autonomous VM cluster.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the current status of the Autonomous VM cluster, if applicable.

        " + }, + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Exadata infrastructure containing this Autonomous VM cluster.

        " + }, + "autonomousDataStoragePercentage":{ + "shape":"Float", + "documentation":"

        The percentage of data storage currently in use for Autonomous Databases in the Autonomous VM cluster.

        " + }, + "autonomousDataStorageSizeInTBs":{ + "shape":"Double", + "documentation":"

        The total data storage allocated for Autonomous Databases in the Autonomous VM cluster, in TB.

        " + }, + "availableAutonomousDataStorageSizeInTBs":{ + "shape":"Double", + "documentation":"

        The available data storage for Autonomous Databases in the Autonomous VM cluster, in TB.

        " + }, + "availableContainerDatabases":{ + "shape":"Integer", + "documentation":"

        The number of Autonomous Container Databases that you can create with the currently available storage.

        " + }, + "availableCpus":{ + "shape":"Float", + "documentation":"

        The number of CPU cores available for allocation to Autonomous Databases.

        " + }, + "computeModel":{ + "shape":"ComputeModel", + "documentation":"

        The compute model of the Autonomous VM cluster: ECPU or OCPU.

        " + }, + "cpuCoreCount":{ + "shape":"Integer", + "documentation":"

        The total number of CPU cores in the Autonomous VM cluster.

        " + }, + "cpuCoreCountPerNode":{ + "shape":"Integer", + "documentation":"

        The number of CPU cores per node in the Autonomous VM cluster.

        " + }, + "cpuPercentage":{ + "shape":"Float", + "documentation":"

        The percentage of total CPU cores currently in use in the Autonomous VM cluster.

        " + }, + "dataStorageSizeInGBs":{ + "shape":"Double", + "documentation":"

        The total data storage allocated to the Autonomous VM cluster, in GB.

        " + }, + "dataStorageSizeInTBs":{ + "shape":"Double", + "documentation":"

        The total data storage allocated to the Autonomous VM cluster, in TB.

        " + }, + "dbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The local node storage allocated to the Autonomous VM cluster, in GB.

        " + }, + "dbServers":{ + "shape":"StringList", + "documentation":"

        The list of database servers associated with the Autonomous VM cluster.

        " + }, + "description":{ + "shape":"String", + "documentation":"

        The user-provided description of the Autonomous VM cluster.

        " + }, + "domain":{ + "shape":"String", + "documentation":"

        The domain name for the Autonomous VM cluster.

        " + }, + "exadataStorageInTBsLowestScaledValue":{ + "shape":"Double", + "documentation":"

        The lowest value to which Exadata storage can be scaled down, in TB.

        " + }, + "hostname":{ + "shape":"String", + "documentation":"

        The host name for the Autonomous VM cluster.

        " + }, + "ocid":{ + "shape":"String", + "documentation":"

        The Oracle Cloud Identifier (OCID) of the Autonomous VM cluster.

        " + }, + "ociUrl":{ + "shape":"String", + "documentation":"

        The URL for accessing the OCI console page for this Autonomous VM cluster.

        " + }, + "isMtlsEnabledVmCluster":{ + "shape":"Boolean", + "documentation":"

        Indicates if mutual TLS (mTLS) authentication is enabled for the Autonomous VM cluster.

        " + }, + "licenseModel":{ + "shape":"LicenseModel", + "documentation":"

        The Oracle license model that applies to the Autonomous VM cluster.

        " + }, + "maintenanceWindow":{ + "shape":"MaintenanceWindow", + "documentation":"

        The scheduling details for the maintenance window. Patching and system updates take place during the maintenance window.

        " + }, + "maxAcdsLowestScaledValue":{ + "shape":"Integer", + "documentation":"

        The lowest value to which you can scale down the maximum number of Autonomous CDBs.

        " + }, + "memoryPerOracleComputeUnitInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of memory allocated per Oracle Compute Unit (OCU), in GB.

        " + }, + "memorySizeInGBs":{ + "shape":"Integer", + "documentation":"

        The total amount of memory allocated to the Autonomous VM cluster, in GB.

        " + }, + "nodeCount":{ + "shape":"Integer", + "documentation":"

        The number of database server nodes in the Autonomous VM cluster.

        " + }, + "nonProvisionableAutonomousContainerDatabases":{ + "shape":"Integer", + "documentation":"

        The number of Autonomous CDBs that can't be provisioned because of resource constraints.

        " + }, + "provisionableAutonomousContainerDatabases":{ + "shape":"Integer", + "documentation":"

        The number of Autonomous CDBs that you can provision in the Autonomous VM cluster.

        " + }, + "provisionedAutonomousContainerDatabases":{ + "shape":"Integer", + "documentation":"

        The number of Autonomous Container Databases currently provisioned in the Autonomous VM cluster.

        " + }, + "provisionedCpus":{ + "shape":"Float", + "documentation":"

        The number of CPUs currently provisioned in the Autonomous VM cluster.

        " + }, + "reclaimableCpus":{ + "shape":"Float", + "documentation":"

        The number of CPUs that can be reclaimed from terminated or scaled-down Autonomous Databases.

        " + }, + "reservedCpus":{ + "shape":"Float", + "documentation":"

        The number of CPUs reserved for system operations and redundancy.

        " + }, + "scanListenerPortNonTls":{ + "shape":"Integer", + "documentation":"

        The SCAN listener port for non-TLS (TCP) protocol.

        " + }, + "scanListenerPortTls":{ + "shape":"Integer", + "documentation":"

        The SCAN listener port for TLS (TCP) protocol.

        " + }, + "shape":{ + "shape":"String", + "documentation":"

        The shape of the Exadata infrastructure for the Autonomous VM cluster.

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The date and time when the Autonomous VM cluster was created.

        " + }, + "timeDatabaseSslCertificateExpires":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The expiration date and time of the database SSL certificate.

        " + }, + "timeOrdsCertificateExpires":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The expiration date and time of the Oracle REST Data Services (ORDS) certificate.

        " + }, + "timeZone":{ + "shape":"String", + "documentation":"

        The time zone of the Autonomous VM cluster.

        " + }, + "totalContainerDatabases":{ + "shape":"Integer", + "documentation":"

        The total number of Autonomous Container Databases that can be created in the Autonomous VM cluster.

        " + } + }, + "documentation":"

        A summary of an Autonomous VM cluster.

        " + }, + "CloudExadataInfrastructure":{ + "type":"structure", + "required":["cloudExadataInfrastructureId"], + "members":{ + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier for the Exadata infrastructure.

        " + }, + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name for the Exadata infrastructure.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the Exadata infrastructure.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the Exadata infrastructure.

        " + }, + "cloudExadataInfrastructureArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) for the Exadata infrastructure.

        " + }, + "activatedStorageCount":{ + "shape":"Integer", + "documentation":"

        The number of storage servers requested for the Exadata infrastructure.

        " + }, + "additionalStorageCount":{ + "shape":"Integer", + "documentation":"

        The number of storage servers requested for the Exadata infrastructure.

        " + }, + "availableStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of available storage, in gigabytes (GB), for the Exadata infrastructure.

        " + }, + "availabilityZone":{ + "shape":"String", + "documentation":"

        The name of the Availability Zone (AZ) where the Exadata infrastructure is located.

        " + }, + "availabilityZoneId":{ + "shape":"String", + "documentation":"

        The AZ ID of the AZ where the Exadata infrastructure is located.

        " + }, + "computeCount":{ + "shape":"Integer", + "documentation":"

        The number of database servers for the Exadata infrastructure.

        " + }, + "cpuCount":{ + "shape":"Integer", + "documentation":"

        The total number of CPU cores that are allocated to the Exadata infrastructure.

        " + }, + "customerContactsToSendToOCI":{ + "shape":"CustomerContacts", + "documentation":"

        The email addresses of contacts to receive notification from Oracle about maintenance updates for the Exadata infrastructure.

        " + }, + "dataStorageSizeInTBs":{ + "shape":"Double", + "documentation":"

        The size of the Exadata infrastructure's data disk group, in terabytes (TB).

        " + }, + "dbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The size of the Exadata infrastructure's local node storage, in gigabytes (GB).

        " + }, + "dbServerVersion":{ + "shape":"String", + "documentation":"

        The software version of the database servers (dom0) in the Exadata infrastructure.

        " + }, + "lastMaintenanceRunId":{ + "shape":"String", + "documentation":"

        The Oracle Cloud Identifier (OCID) of the last maintenance run for the Exadata infrastructure.

        " + }, + "maintenanceWindow":{ + "shape":"MaintenanceWindow", + "documentation":"

        The scheduling details for the maintenance window. Patching and system updates take place during the maintenance window.

        " + }, + "maxCpuCount":{ + "shape":"Integer", + "documentation":"

        The total number of CPU cores available on the Exadata infrastructure.

        " + }, + "maxDataStorageInTBs":{ + "shape":"Double", + "documentation":"

        The total amount of data disk group storage, in terabytes (TB), that's available on the Exadata infrastructure.

        " + }, + "maxDbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The total amount of local node storage, in gigabytes (GB), that's available on the Exadata infrastructure.

        " + }, + "maxMemoryInGBs":{ + "shape":"Integer", + "documentation":"

        The total amount of memory, in gigabytes (GB), that's available on the Exadata infrastructure.

        " + }, + "memorySizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of memory, in gigabytes (GB), that's allocated on the Exadata infrastructure.

        " + }, + "monthlyDbServerVersion":{ + "shape":"String", + "documentation":"

        The monthly software version of the database servers installed on the Exadata infrastructure.

        " + }, + "monthlyStorageServerVersion":{ + "shape":"String", + "documentation":"

        The monthly software version of the storage servers installed on the Exadata infrastructure.

        " + }, + "nextMaintenanceRunId":{ + "shape":"String", + "documentation":"

        The OCID of the next maintenance run for the Exadata infrastructure.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the OCI resource anchor for the Exadata infrastructure.

        " + }, + "ociUrl":{ + "shape":"String", + "documentation":"

        The HTTPS link to the Exadata infrastructure in OCI.

        " + }, + "ocid":{ + "shape":"String", + "documentation":"

        The OCID of the Exadata infrastructure.

        " + }, + "shape":{ + "shape":"String", + "documentation":"

        The model name of the Exadata infrastructure.

        " + }, + "storageCount":{ + "shape":"Integer", + "documentation":"

        The number of storage servers that are activated for the Exadata infrastructure.

        " + }, + "storageServerVersion":{ + "shape":"String", + "documentation":"

        The software version of the storage servers on the Exadata infrastructure.

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The date and time when the Exadata infrastructure was created.

        " + }, + "totalStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The total amount of storage, in gigabytes (GB), on the the Exadata infrastructure.

        " + }, + "percentProgress":{ + "shape":"Float", + "documentation":"

        The amount of progress made on the current operation on the Exadata infrastructure, expressed as a percentage.

        " + }, + "databaseServerType":{ + "shape":"String", + "documentation":"

        The database server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation.

        " + }, + "storageServerType":{ + "shape":"String", + "documentation":"

        The storage server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation.

        " + }, + "computeModel":{ + "shape":"ComputeModel", + "documentation":"

        The OCI model compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled.

        " + } + }, + "documentation":"

        Information about an Exadata infrastructure.

        " + }, + "CloudExadataInfrastructureList":{ + "type":"list", + "member":{"shape":"CloudExadataInfrastructureSummary"} + }, + "CloudExadataInfrastructureSummary":{ + "type":"structure", + "required":["cloudExadataInfrastructureId"], + "members":{ + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier for the Exadata infrastructure.

        " + }, + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name for the Exadata infrastructure.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the Exadata infrastructure.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the Exadata infrastructure.

        " + }, + "cloudExadataInfrastructureArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) for the Exadata infrastructure.

        " + }, + "activatedStorageCount":{ + "shape":"Integer", + "documentation":"

        The number of storage servers requested for the Exadata infrastructure.

        " + }, + "additionalStorageCount":{ + "shape":"Integer", + "documentation":"

        The number of storage servers requested for the Exadata infrastructure.

        " + }, + "availableStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of available storage, in gigabytes (GB), for the Exadata infrastructure.

        " + }, + "availabilityZone":{ + "shape":"String", + "documentation":"

        The name of the Availability Zone (AZ) where the Exadata infrastructure is located.

        " + }, + "availabilityZoneId":{ + "shape":"String", + "documentation":"

        The AZ ID of the AZ where the Exadata infrastructure is located.

        " + }, + "computeCount":{ + "shape":"Integer", + "documentation":"

        The number of database servers for the Exadata infrastructure.

        " + }, + "cpuCount":{ + "shape":"Integer", + "documentation":"

        The total number of CPU cores that are allocated to the Exadata infrastructure.

        " + }, + "customerContactsToSendToOCI":{ + "shape":"CustomerContacts", + "documentation":"

        The email addresses of contacts to receive notification from Oracle about maintenance updates for the Exadata infrastructure.

        " + }, + "dataStorageSizeInTBs":{ + "shape":"Double", + "documentation":"

        The size of the Exadata infrastructure's data disk group, in terabytes (TB).

        " + }, + "dbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The size of the Exadata infrastructure's local node storage, in gigabytes (GB).

        " + }, + "dbServerVersion":{ + "shape":"String", + "documentation":"

        The software version of the database servers on the Exadata infrastructure.

        " + }, + "lastMaintenanceRunId":{ + "shape":"String", + "documentation":"

        The Oracle Cloud Identifier (OCID) of the last maintenance run for the Exadata infrastructure.

        " + }, + "maintenanceWindow":{ + "shape":"MaintenanceWindow", + "documentation":"

        The scheduling details for the maintenance window. Patching and system updates take place during the maintenance window.

        " + }, + "maxCpuCount":{ + "shape":"Integer", + "documentation":"

        The total number of CPU cores available on the Exadata infrastructure.

        " + }, + "maxDataStorageInTBs":{ + "shape":"Double", + "documentation":"

        The total amount of data disk group storage, in terabytes (TB), that's available on the Exadata infrastructure.

        " + }, + "maxDbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The total amount of local node storage, in gigabytes (GB), that's available on the Exadata infrastructure.

        " + }, + "maxMemoryInGBs":{ + "shape":"Integer", + "documentation":"

        The total amount of memory, in gigabytes (GB), that's available on the Exadata infrastructure.

        " + }, + "memorySizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of memory, in gigabytes (GB), that's allocated on the Exadata infrastructure.

        " + }, + "monthlyDbServerVersion":{ + "shape":"String", + "documentation":"

        The monthly software version of the database servers (dom0) installed on the Exadata infrastructure.

        " + }, + "monthlyStorageServerVersion":{ + "shape":"String", + "documentation":"

        The monthly software version of the storage servers installed on the Exadata infrastructure.

        " + }, + "nextMaintenanceRunId":{ + "shape":"String", + "documentation":"

        The OCID of the next maintenance run for the Exadata infrastructure.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the OCI resource anchor for the Exadata infrastructure.

        " + }, + "ociUrl":{ + "shape":"String", + "documentation":"

        The HTTPS link to the Exadata infrastructure in OCI.

        " + }, + "ocid":{ + "shape":"String", + "documentation":"

        The OCID of the Exadata infrastructure.

        " + }, + "shape":{ + "shape":"String", + "documentation":"

        The model name of the Exadata infrastructure.

        " + }, + "storageCount":{ + "shape":"Integer", + "documentation":"

        The number of storage servers that are activated for the Exadata infrastructure.

        " + }, + "storageServerVersion":{ + "shape":"String", + "documentation":"

        The software version of the storage servers on the Exadata infrastructure.

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The date and time when the Exadata infrastructure was created.

        " + }, + "totalStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The total amount of storage, in gigabytes (GB), on the the Exadata infrastructure.

        " + }, + "percentProgress":{ + "shape":"Float", + "documentation":"

        The amount of progress made on the current operation on the Exadata infrastructure, expressed as a percentage.

        " + }, + "databaseServerType":{ + "shape":"String", + "documentation":"

        The database server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation.

        " + }, + "storageServerType":{ + "shape":"String", + "documentation":"

        The storage server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation.

        " + }, + "computeModel":{ + "shape":"ComputeModel", + "documentation":"

        The OCI model compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled.

        " + } + }, + "documentation":"

        Information about an Exadata infrastructure.

        " + }, + "CloudExadataInfrastructureUnallocatedResources":{ + "type":"structure", + "members":{ + "cloudAutonomousVmClusters":{ + "shape":"CloudAutonomousVmClusterResourceDetailsList", + "documentation":"

        A list of Autonomous VM clusters associated with this Cloud Exadata Infrastructure.

        " + }, + "cloudExadataInfrastructureDisplayName":{ + "shape":"String", + "documentation":"

        The display name of the Cloud Exadata infrastructure.

        " + }, + "exadataStorageInTBs":{ + "shape":"Double", + "documentation":"

        The amount of unallocated Exadata storage available, in terabytes (TB).

        " + }, + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Cloud Exadata infrastructure.

        " + }, + "localStorageInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of unallocated local storage available, in gigabytes (GB).

        " + }, + "memoryInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of unallocated memory available, in gigabytes (GB).

        " + }, + "ocpus":{ + "shape":"Integer", + "documentation":"

        The number of unallocated Oracle CPU Units (OCPUs) available.

        " + } + }, + "documentation":"

        Information about unallocated resources in the Cloud Exadata infrastructure.

        " + }, + "CloudVmCluster":{ + "type":"structure", + "required":["cloudVmClusterId"], + "members":{ + "cloudVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the VM cluster.

        " + }, + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name for the VM cluster.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the VM cluster.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the VM cluster.

        " + }, + "cloudVmClusterArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the VM cluster.

        " + }, + "cloudExadataInfrastructureId":{ + "shape":"String", + "documentation":"

        The unique identifier of the Exadata infrastructure that this VM cluster belongs to.

        " + }, + "clusterName":{ + "shape":"String", + "documentation":"

        The name of the Grid Infrastructure (GI) cluster.

        " + }, + "cpuCoreCount":{ + "shape":"Integer", + "documentation":"

        The number of CPU cores enabled on the VM cluster.

        " + }, + "dataCollectionOptions":{ + "shape":"DataCollectionOptions", + "documentation":"

        The set of diagnostic collection options enabled for the VM cluster.

        " + }, + "dataStorageSizeInTBs":{ + "shape":"Double", + "documentation":"

        The size of the data disk group, in terabytes (TB), that's allocated for the VM cluster.

        " + }, + "dbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of local node storage, in gigabytes (GB), that's allocated for the VM cluster.

        " + }, + "dbServers":{ + "shape":"StringList", + "documentation":"

        The list of database servers for the VM cluster.

        " + }, + "diskRedundancy":{ + "shape":"DiskRedundancy", + "documentation":"

        The type of redundancy configured for the VM cluster. NORMAL is 2-way redundancy. HIGH is 3-way redundancy.

        " + }, + "giVersion":{ + "shape":"String", + "documentation":"

        The software version of the Oracle Grid Infrastructure (GI) for the VM cluster.

        " + }, + "hostname":{ + "shape":"String", + "documentation":"

        The host name for the VM cluster.

        " + }, + "iormConfigCache":{ + "shape":"ExadataIormConfig", + "documentation":"

        The ExadataIormConfig cache details for the VM cluster.

        " + }, + "isLocalBackupEnabled":{ + "shape":"Boolean", + "documentation":"

        Indicates whether database backups to local Exadata storage is enabled for the VM cluster.

        " + }, + "isSparseDiskgroupEnabled":{ + "shape":"Boolean", + "documentation":"

        Indicates whether the VM cluster is configured with a sparse disk group.

        " + }, + "lastUpdateHistoryEntryId":{ + "shape":"String", + "documentation":"

        The Oracle Cloud ID (OCID) of the last maintenance update history entry.

        " + }, + "licenseModel":{ + "shape":"LicenseModel", + "documentation":"

        The Oracle license model applied to the VM cluster.

        " + }, + "listenerPort":{ + "shape":"Integer", + "documentation":"

        The port number configured for the listener on the VM cluster.

        " + }, + "memorySizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of memory, in gigabytes (GB), that's allocated for the VM cluster.

        " + }, + "nodeCount":{ + "shape":"Integer", + "documentation":"

        The number of nodes in the VM cluster.

        " + }, + "ocid":{ + "shape":"String", + "documentation":"

        The OCID of the VM cluster.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the OCI resource anchor for the VM cluster.

        " + }, + "ociUrl":{ + "shape":"String", + "documentation":"

        The HTTPS link to the VM cluster in OCI.

        " + }, + "domain":{ + "shape":"String", + "documentation":"

        The domain of the VM cluster.

        " + }, + "scanDnsName":{ + "shape":"String", + "documentation":"

        The FQDN of the DNS record for the Single Client Access Name (SCAN) IP addresses that are associated with the VM cluster.

        " + }, + "scanDnsRecordId":{ + "shape":"String", + "documentation":"

        The OCID of the DNS record for the SCAN IP addresses that are associated with the VM cluster.

        " + }, + "scanIpIds":{ + "shape":"StringList", + "documentation":"

        The OCID of the SCAN IP addresses that are associated with the VM cluster.

        " + }, + "shape":{ + "shape":"String", + "documentation":"

        The hardware model name of the Exadata infrastructure that's running the VM cluster.

        " + }, + "sshPublicKeys":{ + "shape":"SensitiveStringList", + "documentation":"

        The public key portion of one or more key pairs used for SSH access to the VM cluster.

        " + }, + "storageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of local node storage, in gigabytes (GB), that's allocated to the VM cluster.

        " + }, + "systemVersion":{ + "shape":"String", + "documentation":"

        The operating system version of the image chosen for the VM cluster.

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The date and time when the VM cluster was created.

        " + }, + "timeZone":{ + "shape":"String", + "documentation":"

        The time zone of the VM cluster.

        " + }, + "vipIds":{ + "shape":"StringList", + "documentation":"

        The virtual IP (VIP) addresses that are associated with the VM cluster. Oracle's Cluster Ready Services (CRS) creates and maintains one VIP address for each node in the VM cluster to enable failover. If one node fails, the VIP is reassigned to another active node in the cluster.

        " + }, + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB network for the VM cluster.

        " + }, + "percentProgress":{ + "shape":"Float", + "documentation":"

        The amount of progress made on the current operation on the VM cluster, expressed as a percentage.

        " + }, + "computeModel":{ + "shape":"ComputeModel", + "documentation":"

        The OCI model compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled.

        " + } + }, + "documentation":"

        Information about a VM cluster.

        " + }, + "CloudVmClusterList":{ + "type":"list", + "member":{"shape":"CloudVmClusterSummary"} + }, + "CloudVmClusterSummary":{ + "type":"structure", + "required":["cloudVmClusterId"], + "members":{ + "cloudVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the VM cluster.

        " + }, + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name for the VM cluster.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the VM cluster.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the VM cluster.

        " + }, + "cloudVmClusterArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the VM cluster.

        " + }, + "cloudExadataInfrastructureId":{ + "shape":"String", + "documentation":"

        The unique identifier of the Exadata infrastructure that this VM cluster belongs to.

        " + }, + "clusterName":{ + "shape":"String", + "documentation":"

        The name of the Grid Infrastructure (GI) cluster.

        " + }, + "cpuCoreCount":{ + "shape":"Integer", + "documentation":"

        The number of CPU cores enabled on the VM cluster.

        " + }, + "dataCollectionOptions":{"shape":"DataCollectionOptions"}, + "dataStorageSizeInTBs":{ + "shape":"Double", + "documentation":"

        The size of the data disk group, in terabytes (TB), that's allocated for the VM cluster.

        " + }, + "dbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of local node storage, in gigabytes (GB), that's allocated for the VM cluster.

        " + }, + "dbServers":{ + "shape":"StringList", + "documentation":"

        The list of database servers for the VM cluster.

        " + }, + "diskRedundancy":{ + "shape":"DiskRedundancy", + "documentation":"

        The type of redundancy configured for the VM cluster. NORMAL is 2-way redundancy. HIGH is 3-way redundancy.

        " + }, + "giVersion":{ + "shape":"String", + "documentation":"

        The software version of the Oracle Grid Infrastructure (GI) for the VM cluster.

        " + }, + "hostname":{ + "shape":"String", + "documentation":"

        The host name for the VM cluster.

        " + }, + "iormConfigCache":{"shape":"ExadataIormConfig"}, + "isLocalBackupEnabled":{ + "shape":"Boolean", + "documentation":"

        Indicates whether database backups to local Exadata storage is enabled for the VM cluster.

        " + }, + "isSparseDiskgroupEnabled":{ + "shape":"Boolean", + "documentation":"

        Indicates whether the VM cluster is configured with a sparse disk group.

        " + }, + "lastUpdateHistoryEntryId":{ + "shape":"String", + "documentation":"

        The Oracle Cloud ID (OCID) of the last maintenance update history entry.

        " + }, + "licenseModel":{ + "shape":"LicenseModel", + "documentation":"

        The Oracle license model applied to the VM cluster.

        " + }, + "listenerPort":{ + "shape":"Integer", + "documentation":"

        The port number configured for the listener on the VM cluster.

        " + }, + "memorySizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of memory, in gigabytes (GB), that's allocated for the VM cluster.

        " + }, + "nodeCount":{ + "shape":"Integer", + "documentation":"

        The number of nodes in the VM cluster.

        " + }, + "ocid":{ + "shape":"String", + "documentation":"

        The OCID of the VM cluster.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the OCI resource anchor for the VM cluster.

        " + }, + "ociUrl":{ + "shape":"String", + "documentation":"

        The HTTPS link to the VM cluster in OCI.

        " + }, + "domain":{ + "shape":"String", + "documentation":"

        The domain of the VM cluster.

        " + }, + "scanDnsName":{ + "shape":"String", + "documentation":"

        The FQDN of the DNS record for the Single Client Access Name (SCAN) IP addresses that are associated with the VM cluster.

        " + }, + "scanDnsRecordId":{ + "shape":"String", + "documentation":"

        The OCID of the DNS record for the SCAN IP addresses that are associated with the VM cluster.

        " + }, + "scanIpIds":{ + "shape":"StringList", + "documentation":"

        The OCID of the SCAN IP addresses that are associated with the VM cluster.

        " + }, + "shape":{ + "shape":"String", + "documentation":"

        The hardware model name of the Exadata infrastructure that's running the VM cluster.

        " + }, + "sshPublicKeys":{ + "shape":"SensitiveStringList", + "documentation":"

        The public key portion of one or more key pairs used for SSH access to the VM cluster.

        " + }, + "storageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of local node storage, in gigabytes (GB), that's allocated to the VM cluster.

        " + }, + "systemVersion":{ + "shape":"String", + "documentation":"

        The operating system version of the image chosen for the VM cluster.

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The date and time when the VM cluster was created.

        " + }, + "timeZone":{ + "shape":"String", + "documentation":"

        The time zone of the VM cluster.

        " + }, + "vipIds":{ + "shape":"StringList", + "documentation":"

        The virtual IP (VIP) addresses that are associated with the VM cluster. Oracle's Cluster Ready Services (CRS) creates and maintains one VIP address for each node in the VM cluster to enable failover. If one node fails, the VIP is reassigned to another active node in the cluster.

        " + }, + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB network for the VM cluster.

        " + }, + "percentProgress":{ + "shape":"Float", + "documentation":"

        The amount of progress made on the current operation on the VM cluster, expressed as a percentage.

        " + }, + "computeModel":{ + "shape":"ComputeModel", + "documentation":"

        The OCI model compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled.

        " + } + }, + "documentation":"

        Information about a VM cluster.

        " + }, + "ComputeModel":{ + "type":"string", + "enum":[ + "ECPU", + "OCPU" + ] + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

        The identifier of the resource that caused the conflict.

        " + }, + "resourceType":{ + "shape":"String", + "documentation":"

        The type of resource that caused the conflict.

        " + } + }, + "documentation":"

        Occurs when a conflict with the current status of your resource. Fix any inconsistencies with your resource and try again.

        ", + "exception":true + }, + "CreateCloudAutonomousVmClusterInput":{ + "type":"structure", + "required":[ + "cloudExadataInfrastructureId", + "odbNetworkId", + "displayName", + "autonomousDataStorageSizeInTBs", + "cpuCoreCountPerNode", + "memoryPerOracleComputeUnitInGBs", + "totalContainerDatabases" + ], + "members":{ + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Exadata infrastructure where the VM cluster will be created.

        " + }, + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB network to be used for the VM cluster.

        " + }, + "displayName":{ + "shape":"ResourceDisplayName", + "documentation":"

        The display name for the Autonomous VM cluster. The name does not need to be unique.

        " + }, + "clientToken":{ + "shape":"CreateCloudAutonomousVmClusterInputClientTokenString", + "documentation":"

        A client-provided token to ensure idempotency of the request.

        ", + "idempotencyToken":true + }, + "autonomousDataStorageSizeInTBs":{ + "shape":"CreateCloudAutonomousVmClusterInputAutonomousDataStorageSizeInTBsDouble", + "documentation":"

        The data disk group size to be allocated for Autonomous Databases, in terabytes (TB).

        " + }, + "cpuCoreCountPerNode":{ + "shape":"CreateCloudAutonomousVmClusterInputCpuCoreCountPerNodeInteger", + "documentation":"

        The number of CPU cores to be enabled per VM cluster node.

        " + }, + "dbServers":{ + "shape":"StringList", + "documentation":"

        The list of database servers to be used for the Autonomous VM cluster.

        " + }, + "description":{ + "shape":"CreateCloudAutonomousVmClusterInputDescriptionString", + "documentation":"

        A user-provided description of the Autonomous VM cluster.

        " + }, + "isMtlsEnabledVmCluster":{ + "shape":"Boolean", + "documentation":"

        Specifies whether to enable mutual TLS (mTLS) authentication for the Autonomous VM cluster.

        " + }, + "licenseModel":{ + "shape":"LicenseModel", + "documentation":"

        The Oracle license model to apply to the Autonomous VM cluster.

        " + }, + "maintenanceWindow":{ + "shape":"MaintenanceWindow", + "documentation":"

        The scheduling details for the maintenance window. Patching and system updates take place during the maintenance window.

        " + }, + "memoryPerOracleComputeUnitInGBs":{ + "shape":"CreateCloudAutonomousVmClusterInputMemoryPerOracleComputeUnitInGBsInteger", + "documentation":"

        The amount of memory to be allocated per OCPU, in GB.

        " + }, + "scanListenerPortNonTls":{ + "shape":"CreateCloudAutonomousVmClusterInputScanListenerPortNonTlsInteger", + "documentation":"

        The SCAN listener port for non-TLS (TCP) protocol.

        " + }, + "scanListenerPortTls":{ + "shape":"CreateCloudAutonomousVmClusterInputScanListenerPortTlsInteger", + "documentation":"

        The SCAN listener port for TLS (TCP) protocol.

        " + }, + "tags":{ + "shape":"RequestTagMap", + "documentation":"

        Free-form tags for this resource. Each tag is a key-value pair with no predefined name, type, or namespace.

        " + }, + "timeZone":{ + "shape":"CreateCloudAutonomousVmClusterInputTimeZoneString", + "documentation":"

        The time zone to use for the Autonomous VM cluster.

        " + }, + "totalContainerDatabases":{ + "shape":"CreateCloudAutonomousVmClusterInputTotalContainerDatabasesInteger", + "documentation":"

        The total number of Autonomous CDBs that you can create in the Autonomous VM cluster.

        " + } + } + }, + "CreateCloudAutonomousVmClusterInputAutonomousDataStorageSizeInTBsDouble":{ + "type":"double", + "box":true, + "min":0 + }, + "CreateCloudAutonomousVmClusterInputClientTokenString":{ + "type":"string", + "max":64, + "min":8, + "pattern":"[a-zA-Z0-9_\\/.=-]+" + }, + "CreateCloudAutonomousVmClusterInputCpuCoreCountPerNodeInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "CreateCloudAutonomousVmClusterInputDescriptionString":{ + "type":"string", + "max":400, + "min":1 + }, + "CreateCloudAutonomousVmClusterInputMemoryPerOracleComputeUnitInGBsInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "CreateCloudAutonomousVmClusterInputScanListenerPortNonTlsInteger":{ + "type":"integer", + "box":true, + "max":8999, + "min":1024 + }, + "CreateCloudAutonomousVmClusterInputScanListenerPortTlsInteger":{ + "type":"integer", + "box":true, + "max":8999, + "min":1024 + }, + "CreateCloudAutonomousVmClusterInputTimeZoneString":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateCloudAutonomousVmClusterInputTotalContainerDatabasesInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "CreateCloudAutonomousVmClusterOutput":{ + "type":"structure", + "required":["cloudAutonomousVmClusterId"], + "members":{ + "displayName":{ + "shape":"String", + "documentation":"

        The display name of the created Autonomous VM cluster.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the Autonomous VM cluster creation process.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the current status of the Autonomous VM cluster creation process, if applicable.

        " + }, + "cloudAutonomousVmClusterId":{ + "shape":"String", + "documentation":"

        The unique identifier of the created Autonomous VM cluster.

        " + } + } + }, + "CreateCloudExadataInfrastructureInput":{ + "type":"structure", + "required":[ + "displayName", + "shape", + "computeCount", + "storageCount" + ], + "members":{ + "displayName":{ + "shape":"ResourceDisplayName", + "documentation":"

        A user-friendly name for the Exadata infrastructure.

        " + }, + "shape":{ + "shape":"CreateCloudExadataInfrastructureInputShapeString", + "documentation":"

        The model name of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation.

        " + }, + "availabilityZone":{ + "shape":"CreateCloudExadataInfrastructureInputAvailabilityZoneString", + "documentation":"

        The name of the Availability Zone (AZ) where the Exadata infrastructure is located.

        This operation requires that you specify a value for either availabilityZone or availabilityZoneId.

        Example: us-east-1a

        " + }, + "availabilityZoneId":{ + "shape":"CreateCloudExadataInfrastructureInputAvailabilityZoneIdString", + "documentation":"

        The AZ ID of the AZ where the Exadata infrastructure is located.

        This operation requires that you specify a value for either availabilityZone or availabilityZoneId.

        Example: use1-az1

        " + }, + "tags":{ + "shape":"RequestTagMap", + "documentation":"

        The list of resource tags to apply to the Exadata infrastructure.

        " + }, + "computeCount":{ + "shape":"Integer", + "documentation":"

        The number of database servers for the Exadata infrastructure. Valid values for this parameter depend on the shape. To get information about the minimum and maximum values, use the ListDbSystemShapes operation.

        " + }, + "customerContactsToSendToOCI":{ + "shape":"CustomerContacts", + "documentation":"

        The email addresses of contacts to receive notification from Oracle about maintenance updates for the Exadata infrastructure.

        " + }, + "maintenanceWindow":{ + "shape":"MaintenanceWindow", + "documentation":"

        The maintenance window configuration for the Exadata Cloud infrastructure.

        This allows you to define when maintenance operations such as patching and updates can be performed on the infrastructure.

        " + }, + "storageCount":{ + "shape":"Integer", + "documentation":"

        The number of storage servers to activate for this Exadata infrastructure. Valid values for this parameter depend on the shape. To get information about the minimum and maximum values, use the ListDbSystemShapes operation.

        " + }, + "clientToken":{ + "shape":"CreateCloudExadataInfrastructureInputClientTokenString", + "documentation":"

        A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If you don't specify a client token, the Amazon Web Services SDK automatically generates a client token and uses it for the request to ensure idempotency. The client token is valid for up to 24 hours after it's first used.

        ", + "idempotencyToken":true + }, + "databaseServerType":{ + "shape":"CreateCloudExadataInfrastructureInputDatabaseServerTypeString", + "documentation":"

        The database server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation.

        " + }, + "storageServerType":{ + "shape":"CreateCloudExadataInfrastructureInputStorageServerTypeString", + "documentation":"

        The storage server model type of the Exadata infrastructure. For the list of valid model names, use the ListDbSystemShapes operation.

        " + } + } + }, + "CreateCloudExadataInfrastructureInputAvailabilityZoneIdString":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateCloudExadataInfrastructureInputAvailabilityZoneString":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateCloudExadataInfrastructureInputClientTokenString":{ + "type":"string", + "max":64, + "min":8, + "pattern":"[a-zA-Z0-9_\\/.=-]+" + }, + "CreateCloudExadataInfrastructureInputDatabaseServerTypeString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[a-zA-Z0-9_\\/.=-]+" + }, + "CreateCloudExadataInfrastructureInputShapeString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[a-zA-Z0-9_\\/.=-]+" + }, + "CreateCloudExadataInfrastructureInputStorageServerTypeString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[a-zA-Z0-9_\\/.=-]+" + }, + "CreateCloudExadataInfrastructureOutput":{ + "type":"structure", + "required":["cloudExadataInfrastructureId"], + "members":{ + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name for the Exadata infrastructure.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the Exadata infrastructure.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the Exadata infrastructure.

        " + }, + "cloudExadataInfrastructureId":{ + "shape":"String", + "documentation":"

        The unique identifier of the Exadata infrastructure.

        " + } + } + }, + "CreateCloudVmClusterInput":{ + "type":"structure", + "required":[ + "cloudExadataInfrastructureId", + "cpuCoreCount", + "displayName", + "giVersion", + "hostname", + "sshPublicKeys", + "odbNetworkId" + ], + "members":{ + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Exadata infrastructure for this VM cluster.

        " + }, + "cpuCoreCount":{ + "shape":"CreateCloudVmClusterInputCpuCoreCountInteger", + "documentation":"

        The number of CPU cores to enable on the VM cluster.

        " + }, + "displayName":{ + "shape":"ResourceDisplayName", + "documentation":"

        A user-friendly name for the VM cluster.

        " + }, + "giVersion":{ + "shape":"CreateCloudVmClusterInputGiVersionString", + "documentation":"

        A valid software version of Oracle Grid Infrastructure (GI). To get the list of valid values, use the ListGiVersions operation and specify the shape of the Exadata infrastructure.

        Example: 19.0.0.0

        " + }, + "hostname":{ + "shape":"CreateCloudVmClusterInputHostnameString", + "documentation":"

        The host name for the VM cluster.

        Constraints:

        • Can't be \"localhost\" or \"hostname\".

        • Can't contain \"-version\".

        • The maximum length of the combined hostname and domain is 63 characters.

        • The hostname must be unique within the subnet.

        " + }, + "sshPublicKeys":{ + "shape":"StringList", + "documentation":"

        The public key portion of one or more key pairs used for SSH access to the VM cluster.

        " + }, + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB network for the VM cluster.

        " + }, + "clusterName":{ + "shape":"CreateCloudVmClusterInputClusterNameString", + "documentation":"

        A name for the Grid Infrastructure cluster. The name isn't case sensitive.

        " + }, + "dataCollectionOptions":{ + "shape":"DataCollectionOptions", + "documentation":"

        The set of preferences for the various diagnostic collection options for the VM cluster.

        " + }, + "dataStorageSizeInTBs":{ + "shape":"Double", + "documentation":"

        The size of the data disk group, in terabytes (TBs), to allocate for the VM cluster.

        " + }, + "dbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of local node storage, in gigabytes (GBs), to allocate for the VM cluster.

        " + }, + "dbServers":{ + "shape":"StringList", + "documentation":"

        The list of database servers for the VM cluster.

        " + }, + "tags":{ + "shape":"RequestTagMap", + "documentation":"

        The list of resource tags to apply to the VM cluster.

        " + }, + "isLocalBackupEnabled":{ + "shape":"Boolean", + "documentation":"

        Specifies whether to enable database backups to local Exadata storage for the VM cluster.

        " + }, + "isSparseDiskgroupEnabled":{ + "shape":"Boolean", + "documentation":"

        Specifies whether to create a sparse disk group for the VM cluster.

        " + }, + "licenseModel":{ + "shape":"LicenseModel", + "documentation":"

        The Oracle license model to apply to the VM cluster.

        Default: LICENSE_INCLUDED

        " + }, + "memorySizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of memory, in gigabytes (GBs), to allocate for the VM cluster.

        " + }, + "systemVersion":{ + "shape":"CreateCloudVmClusterInputSystemVersionString", + "documentation":"

        The version of the operating system of the image for the VM cluster.

        " + }, + "timeZone":{ + "shape":"CreateCloudVmClusterInputTimeZoneString", + "documentation":"

        The time zone for the VM cluster. For a list of valid values for time zone, you can check the options in the console.

        Default: UTC

        " + }, + "clientToken":{ + "shape":"CreateCloudVmClusterInputClientTokenString", + "documentation":"

        A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If you don't specify a client token, the Amazon Web Services SDK automatically generates a client token and uses it for the request to ensure idempotency. The client token is valid for up to 24 hours after it's first used.

        ", + "idempotencyToken":true + }, + "scanListenerPortTcp":{ + "shape":"CreateCloudVmClusterInputScanListenerPortTcpInteger", + "documentation":"

        The port number for TCP connections to the single client access name (SCAN) listener.

        Valid values: 1024–8999 with the following exceptions: 2484, 6100, 6200, 7060, 7070, 7085, and 7879

        Default: 1521

        " + } + } + }, + "CreateCloudVmClusterInputClientTokenString":{ + "type":"string", + "max":64, + "min":8, + "pattern":"[a-zA-Z0-9_\\/.=-]+" + }, + "CreateCloudVmClusterInputClusterNameString":{ + "type":"string", + "max":11, + "min":1, + "pattern":"[a-zA-Z][a-zA-Z0-9-]*" + }, + "CreateCloudVmClusterInputCpuCoreCountInteger":{ + "type":"integer", + "box":true, + "max":368, + "min":0 + }, + "CreateCloudVmClusterInputGiVersionString":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateCloudVmClusterInputHostnameString":{ + "type":"string", + "max":12, + "min":1, + "pattern":"[a-zA-Z][a-zA-Z0-9-]*[a-zA-Z0-9]" + }, + "CreateCloudVmClusterInputScanListenerPortTcpInteger":{ + "type":"integer", + "box":true, + "max":8999, + "min":1024 + }, + "CreateCloudVmClusterInputSystemVersionString":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateCloudVmClusterInputTimeZoneString":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateCloudVmClusterOutput":{ + "type":"structure", + "required":["cloudVmClusterId"], + "members":{ + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name for the VM cluster.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the VM cluster.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the VM cluster.

        " + }, + "cloudVmClusterId":{ + "shape":"String", + "documentation":"

        The unique identifier for the VM cluster.

        " + } + } + }, + "CreateOdbNetworkInput":{ + "type":"structure", + "required":[ + "displayName", + "clientSubnetCidr" + ], + "members":{ + "displayName":{ + "shape":"ResourceDisplayName", + "documentation":"

        A user-friendly name for the ODB network.

        " + }, + "availabilityZone":{ + "shape":"CreateOdbNetworkInputAvailabilityZoneString", + "documentation":"

        The Amazon Web Services Availability Zone (AZ) where the ODB network is located.

        This operation requires that you specify a value for either availabilityZone or availabilityZoneId.

        " + }, + "availabilityZoneId":{ + "shape":"CreateOdbNetworkInputAvailabilityZoneIdString", + "documentation":"

        The AZ ID of the AZ where the ODB network is located.

        This operation requires that you specify a value for either availabilityZone or availabilityZoneId.

        " + }, + "clientSubnetCidr":{ + "shape":"CreateOdbNetworkInputClientSubnetCidrString", + "documentation":"

        The CIDR range of the client subnet for the ODB network.

        Constraints:

        • Must not overlap with the CIDR range of the backup subnet.

        • Must not overlap with the CIDR ranges of the VPCs that are connected to the ODB network.

        • Must not use the following CIDR ranges that are reserved by OCI:

          • 100.106.0.0/16 and 100.107.0.0/16

          • 169.254.0.0/16

          • 224.0.0.0 - 239.255.255.255

          • 240.0.0.0 - 255.255.255.255

        " + }, + "backupSubnetCidr":{ + "shape":"CreateOdbNetworkInputBackupSubnetCidrString", + "documentation":"

        The CIDR range of the backup subnet for the ODB network.

        Constraints:

        • Must not overlap with the CIDR range of the client subnet.

        • Must not overlap with the CIDR ranges of the VPCs that are connected to the ODB network.

        • Must not use the following CIDR ranges that are reserved by OCI:

          • 100.106.0.0/16 and 100.107.0.0/16

          • 169.254.0.0/16

          • 224.0.0.0 - 239.255.255.255

          • 240.0.0.0 - 255.255.255.255

        " + }, + "customDomainName":{ + "shape":"CreateOdbNetworkInputCustomDomainNameString", + "documentation":"

        The domain name to use for the resources in the ODB network.

        " + }, + "defaultDnsPrefix":{ + "shape":"CreateOdbNetworkInputDefaultDnsPrefixString", + "documentation":"

        The DNS prefix to the default DNS domain name. The default DNS domain name is oraclevcn.com.

        " + }, + "clientToken":{ + "shape":"CreateOdbNetworkInputClientTokenString", + "documentation":"

        A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If you don't specify a client token, the Amazon Web Services SDK automatically generates a client token and uses it for the request to ensure idempotency. The client token is valid for up to 24 hours after it's first used.

        ", + "idempotencyToken":true + }, + "s3Access":{ + "shape":"Access", + "documentation":"

        Specifies the configuration for Amazon S3 access from the ODB network.

        " + }, + "zeroEtlAccess":{ + "shape":"Access", + "documentation":"

        Specifies the configuration for Zero-ETL access from the ODB network.

        " + }, + "s3PolicyDocument":{ + "shape":"PolicyDocument", + "documentation":"

        Specifies the endpoint policy for Amazon S3 access from the ODB network.

        " + }, + "tags":{ + "shape":"RequestTagMap", + "documentation":"

        The list of resource tags to apply to the ODB network.

        " + } + } + }, + "CreateOdbNetworkInputAvailabilityZoneIdString":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateOdbNetworkInputAvailabilityZoneString":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateOdbNetworkInputBackupSubnetCidrString":{ + "type":"string", + "max":43, + "min":1 + }, + "CreateOdbNetworkInputClientSubnetCidrString":{ + "type":"string", + "max":43, + "min":1 + }, + "CreateOdbNetworkInputClientTokenString":{ + "type":"string", + "max":64, + "min":8, + "pattern":"[a-zA-Z0-9_\\/.=-]+" + }, + "CreateOdbNetworkInputCustomDomainNameString":{ + "type":"string", + "max":255, + "min":1 + }, + "CreateOdbNetworkInputDefaultDnsPrefixString":{ + "type":"string", + "max":15, + "min":1, + "pattern":"[a-zA-Z][a-zA-Z0-9]*" + }, + "CreateOdbNetworkOutput":{ + "type":"structure", + "required":["odbNetworkId"], + "members":{ + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name of the ODB network.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the ODB network.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the ODB network.

        " + }, + "odbNetworkId":{ + "shape":"String", + "documentation":"

        The unique identifier of the ODB network.

        " + } + } + }, + "CreateOdbPeeringConnectionInput":{ + "type":"structure", + "required":[ + "odbNetworkId", + "peerNetworkId" + ], + "members":{ + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB network that initiates the peering connection.

        " + }, + "peerNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the peer network. This can be either a VPC ID or another ODB network ID.

        " + }, + "displayName":{ + "shape":"ResourceDisplayName", + "documentation":"

        The display name for the ODB peering connection.

        " + }, + "clientToken":{ + "shape":"CreateOdbPeeringConnectionInputClientTokenString", + "documentation":"

        The client token for the ODB peering connection request.

        Constraints:

        • Must be unique for each request.

        ", + "idempotencyToken":true + }, + "tags":{ + "shape":"RequestTagMap", + "documentation":"

        The tags to assign to the ODB peering connection.

        " + } + } + }, + "CreateOdbPeeringConnectionInputClientTokenString":{ + "type":"string", + "max":64, + "min":8, + "pattern":"[a-zA-Z0-9_\\/.=-]+" + }, + "CreateOdbPeeringConnectionOutput":{ + "type":"structure", + "required":["odbPeeringConnectionId"], + "members":{ + "displayName":{ + "shape":"String", + "documentation":"

        The display name of the ODB peering connection.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The status of the ODB peering connection.

        Valid Values: provisioning | active | terminating | terminated | failed

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        The reason for the current status of the ODB peering connection.

        " + }, + "odbPeeringConnectionId":{ + "shape":"String", + "documentation":"

        The unique identifier of the ODB peering connection.

        " + } + } + }, + "CustomerContact":{ + "type":"structure", + "members":{ + "email":{ + "shape":"CustomerContactEmailString", + "documentation":"

        The email address of the contact.

        " + } + }, + "documentation":"

        A contact to receive notification from Oracle about maintenance updates for a specific Exadata infrastructure.

        " + }, + "CustomerContactEmailString":{ + "type":"string", + "max":320, + "min":1, + "sensitive":true + }, + "CustomerContacts":{ + "type":"list", + "member":{"shape":"CustomerContact"} + }, + "DataCollectionOptions":{ + "type":"structure", + "members":{ + "isDiagnosticsEventsEnabled":{ + "shape":"Boolean", + "documentation":"

        Indicates whether diagnostic collection is enabled for the VM cluster.

        " + }, + "isHealthMonitoringEnabled":{ + "shape":"Boolean", + "documentation":"

        Indicates whether health monitoring is enabled for the VM cluster.

        " + }, + "isIncidentLogsEnabled":{ + "shape":"Boolean", + "documentation":"

        Indicates whether incident logs are enabled for the cloud VM cluster.

        " + } + }, + "documentation":"

        Information about the data collection options enabled for a VM cluster.

        " + }, + "DayOfWeek":{ + "type":"structure", + "members":{ + "name":{ + "shape":"DayOfWeekName", + "documentation":"

        The name of the day of the week.

        " + } + }, + "documentation":"

        An enumeration of days of the week used for scheduling maintenance windows.

        " + }, + "DayOfWeekName":{ + "type":"string", + "enum":[ + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY", + "SUNDAY" + ] + }, + "DaysOfWeek":{ + "type":"list", + "member":{"shape":"DayOfWeek"} + }, + "DbIormConfig":{ + "type":"structure", + "members":{ + "dbName":{ + "shape":"String", + "documentation":"

        The database name. For the default DbPlan, the dbName is default.

        " + }, + "flashCacheLimit":{ + "shape":"String", + "documentation":"

        The flash cache limit for this database. This value is internally configured based on the share value assigned to the database.

        " + }, + "share":{ + "shape":"Integer", + "documentation":"

        The relative priority of this database.

        " + } + }, + "documentation":"

        The IORM configuration settings for the database.

        " + }, + "DbIormConfigList":{ + "type":"list", + "member":{"shape":"DbIormConfig"} + }, + "DbNode":{ + "type":"structure", + "members":{ + "dbNodeId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the DB node.

        " + }, + "dbNodeArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the DB node.

        " + }, + "status":{ + "shape":"DbNodeResourceStatus", + "documentation":"

        The current status of the DB node.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the DB node.

        " + }, + "additionalDetails":{ + "shape":"String", + "documentation":"

        Additional information about the planned maintenance.

        " + }, + "backupIpId":{ + "shape":"String", + "documentation":"

        The Oracle Cloud ID (OCID) of the backup IP address that's associated with the DB node.

        " + }, + "backupVnic2Id":{ + "shape":"String", + "documentation":"

        The OCID of the second backup VNIC.

        " + }, + "backupVnicId":{ + "shape":"String", + "documentation":"

        The OCID of the backup VNIC.

        " + }, + "cpuCoreCount":{ + "shape":"Integer", + "documentation":"

        Number of CPU cores enabled on the DB node.

        " + }, + "dbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of local node storage, in gigabytes (GBs), that's allocated on the DB node.

        " + }, + "dbServerId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the Db server that is associated with the DB node.

        " + }, + "dbSystemId":{ + "shape":"String", + "documentation":"

        The OCID of the DB system.

        " + }, + "faultDomain":{ + "shape":"String", + "documentation":"

        The name of the fault domain the instance is contained in.

        " + }, + "hostIpId":{ + "shape":"String", + "documentation":"

        The OCID of the host IP address that's associated with the DB node.

        " + }, + "hostname":{ + "shape":"String", + "documentation":"

        The host name for the DB node.

        " + }, + "ocid":{ + "shape":"String", + "documentation":"

        The OCID of the DB node.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the OCI resource anchor for the DB node.

        " + }, + "maintenanceType":{ + "shape":"DbNodeMaintenanceType", + "documentation":"

        The type of database node maintenance. Either VMDB_REBOOT_MIGRATION or EXADBXS_REBOOT_MIGRATION.

        " + }, + "memorySizeInGBs":{ + "shape":"Integer", + "documentation":"

        The allocated memory in GBs on the DB node.

        " + }, + "softwareStorageSizeInGB":{ + "shape":"Integer", + "documentation":"

        The size (in GB) of the block storage volume allocation for the DB system.

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The date and time when the DB node was created.

        " + }, + "timeMaintenanceWindowEnd":{ + "shape":"String", + "documentation":"

        End date and time of maintenance window.

        " + }, + "timeMaintenanceWindowStart":{ + "shape":"String", + "documentation":"

        Start date and time of maintenance window.

        " + }, + "totalCpuCoreCount":{ + "shape":"Integer", + "documentation":"

        The total number of CPU cores reserved on the DB node.

        " + }, + "vnic2Id":{ + "shape":"String", + "documentation":"

        The OCID of the second VNIC.

        " + }, + "vnicId":{ + "shape":"String", + "documentation":"

        The OCID of the VNIC.

        " + }, + "privateIpAddress":{ + "shape":"String", + "documentation":"

        The private IP address assigned to the DB node.

        " + }, + "floatingIpAddress":{ + "shape":"String", + "documentation":"

        The floating IP address assigned to the DB node.

        " + } + }, + "documentation":"

        Information about a DB node.

        " + }, + "DbNodeList":{ + "type":"list", + "member":{"shape":"DbNodeSummary"} + }, + "DbNodeMaintenanceType":{ + "type":"string", + "enum":["VMDB_REBOOT_MIGRATION"] + }, + "DbNodeResourceStatus":{ + "type":"string", + "enum":[ + "AVAILABLE", + "FAILED", + "PROVISIONING", + "TERMINATED", + "TERMINATING", + "UPDATING", + "STOPPING", + "STOPPED", + "STARTING" + ] + }, + "DbNodeSummary":{ + "type":"structure", + "members":{ + "dbNodeId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the DB node.

        " + }, + "dbNodeArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the DB node.

        " + }, + "status":{ + "shape":"DbNodeResourceStatus", + "documentation":"

        The current status of the DB node.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the DB node.

        " + }, + "additionalDetails":{ + "shape":"String", + "documentation":"

        Additional information about the planned maintenance.

        " + }, + "backupIpId":{ + "shape":"String", + "documentation":"

        The Oracle Cloud ID (OCID) of the backup IP address that's associated with the DB node.

        " + }, + "backupVnic2Id":{ + "shape":"String", + "documentation":"

        The OCID of the second backup virtual network interface card (VNIC) for the DB node.

        " + }, + "backupVnicId":{ + "shape":"String", + "documentation":"

        The OCID of the backup VNIC for the DB node.

        " + }, + "cpuCoreCount":{ + "shape":"Integer", + "documentation":"

        The number of CPU cores enabled on the DB node.

        " + }, + "dbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of local node storage, in gigabytes (GB), that's allocated on the DB node.

        " + }, + "dbServerId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the database server that's associated with the DB node.

        " + }, + "dbSystemId":{ + "shape":"String", + "documentation":"

        The OCID of the DB system.

        " + }, + "faultDomain":{ + "shape":"String", + "documentation":"

        The name of the fault domain where the DB node is located.

        " + }, + "hostIpId":{ + "shape":"String", + "documentation":"

        The OCID of the host IP address that's associated with the DB node.

        " + }, + "hostname":{ + "shape":"String", + "documentation":"

        The host name for the DB node.

        " + }, + "ocid":{ + "shape":"String", + "documentation":"

        The OCID of the DB node.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the OCI resource anchor for the DB node.

        " + }, + "maintenanceType":{ + "shape":"DbNodeMaintenanceType", + "documentation":"

        The type of maintenance the DB node.

        " + }, + "memorySizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of memory, in gigabytes (GB), that allocated on the DB node.

        " + }, + "softwareStorageSizeInGB":{ + "shape":"Integer", + "documentation":"

        The size of the block storage volume, in gigabytes (GB), that's allocated for the DB system. This attribute applies only for virtual machine DB systems.

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The date and time when the DB node was created.

        " + }, + "timeMaintenanceWindowEnd":{ + "shape":"String", + "documentation":"

        The end date and time of the maintenance window.

        " + }, + "timeMaintenanceWindowStart":{ + "shape":"String", + "documentation":"

        The start date and time of the maintenance window.

        " + }, + "totalCpuCoreCount":{ + "shape":"Integer", + "documentation":"

        The total number of CPU cores reserved on the DB node.

        " + }, + "vnic2Id":{ + "shape":"String", + "documentation":"

        The OCID of the second VNIC.

        " + }, + "vnicId":{ + "shape":"String", + "documentation":"

        The OCID of the VNIC.

        " + } + }, + "documentation":"

        Information about a DB node.

        " + }, + "DbServer":{ + "type":"structure", + "members":{ + "dbServerId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier for the database server.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the database server.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the current status of the database server.

        " + }, + "cpuCoreCount":{ + "shape":"Integer", + "documentation":"

        The number of CPU cores enabled on the database server.

        " + }, + "dbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The allocated local node storage in GBs on the database server.

        " + }, + "dbServerPatchingDetails":{ + "shape":"DbServerPatchingDetails", + "documentation":"

        The scheduling details for the quarterly maintenance window. Patching and system updates take place during the maintenance window.

        " + }, + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name of the database server.

        " + }, + "exadataInfrastructureId":{ + "shape":"String", + "documentation":"

        The ID of the Exadata infrastructure the database server belongs to.

        " + }, + "ocid":{ + "shape":"String", + "documentation":"

        The OCID of the database server.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the OCI resource anchor for the database server.

        " + }, + "maxCpuCount":{ + "shape":"Integer", + "documentation":"

        The total number of CPU cores available.

        " + }, + "maxDbNodeStorageInGBs":{ + "shape":"Integer", + "documentation":"

        The total local node storage available in GBs.

        " + }, + "maxMemoryInGBs":{ + "shape":"Integer", + "documentation":"

        The total memory available in GBs.

        " + }, + "memorySizeInGBs":{ + "shape":"Integer", + "documentation":"

        The allocated memory in GBs on the database server.

        " + }, + "shape":{ + "shape":"String", + "documentation":"

        The shape of the database server. The shape determines the amount of CPU, storage, and memory resources available.

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The date and time when the database server was created.

        " + }, + "vmClusterIds":{ + "shape":"StringList", + "documentation":"

        The OCID of the VM clusters that are associated with the database server.

        " + }, + "computeModel":{ + "shape":"ComputeModel", + "documentation":"

        The compute model of the database server (ECPU or OCPU).

        " + }, + "autonomousVmClusterIds":{ + "shape":"StringList", + "documentation":"

        The list of identifiers for the Autonomous VM clusters associated with this database server.

        " + }, + "autonomousVirtualMachineIds":{ + "shape":"StringList", + "documentation":"

        The list of unique identifiers for the Autonomous VMs associated with this database server.

        " + } + }, + "documentation":"

        Information about a database server.

        " + }, + "DbServerList":{ + "type":"list", + "member":{"shape":"DbServerSummary"} + }, + "DbServerPatchingDetails":{ + "type":"structure", + "members":{ + "estimatedPatchDuration":{ + "shape":"Integer", + "documentation":"

        Estimated time, in minutes, to patch one database server.

        " + }, + "patchingStatus":{ + "shape":"DbServerPatchingStatus", + "documentation":"

        The status of the patching operation. Possible values are SCHEDULED, MAINTENANCE_IN_PROGRESS, FAILED, and COMPLETE.

        " + }, + "timePatchingEnded":{ + "shape":"String", + "documentation":"

        The time when the patching operation ended.

        " + }, + "timePatchingStarted":{ + "shape":"String", + "documentation":"

        The time when the patching operation started.

        " + } + }, + "documentation":"

        The scheduling details for the quarterly maintenance window. Patching and system updates take place during the maintenance window.

        " + }, + "DbServerPatchingStatus":{ + "type":"string", + "enum":[ + "COMPLETE", + "FAILED", + "MAINTENANCE_IN_PROGRESS", + "SCHEDULED" + ] + }, + "DbServerSummary":{ + "type":"structure", + "members":{ + "dbServerId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the database server.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the database server.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the database server.

        " + }, + "cpuCoreCount":{ + "shape":"Integer", + "documentation":"

        The number of CPU cores enabled on the database server.

        " + }, + "dbNodeStorageSizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of local node storage, in gigabytes (GB), that's allocated on the database server.

        " + }, + "dbServerPatchingDetails":{"shape":"DbServerPatchingDetails"}, + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name of the database server. The name doesn't need to be unique.

        " + }, + "exadataInfrastructureId":{ + "shape":"String", + "documentation":"

        The ID of the Exadata infrastructure that hosts the database server.

        " + }, + "ocid":{ + "shape":"String", + "documentation":"

        The OCID of the database server.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the OCI resource anchor for the database server.

        " + }, + "maxCpuCount":{ + "shape":"Integer", + "documentation":"

        The total number of CPU cores available on the database server.

        " + }, + "maxDbNodeStorageInGBs":{ + "shape":"Integer", + "documentation":"

        The total amount of local node storage, in gigabytes (GB), that's available on the database server.

        " + }, + "maxMemoryInGBs":{ + "shape":"Integer", + "documentation":"

        The total amount of memory, in gigabytes (GB), that's available on the database server.

        " + }, + "memorySizeInGBs":{ + "shape":"Integer", + "documentation":"

        The amount of memory, in gigabytes (GB), that's allocated on the database server.

        " + }, + "shape":{ + "shape":"String", + "documentation":"

        The hardware system model of the Exadata infrastructure that the database server is hosted on. The shape determines the amount of CPU, storage, and memory resources available.

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The date and time when the database server was created.

        " + }, + "vmClusterIds":{ + "shape":"StringList", + "documentation":"

        The IDs of the VM clusters that are associated with the database server.

        " + }, + "computeModel":{ + "shape":"ComputeModel", + "documentation":"

        The OCI model compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled.

        " + }, + "autonomousVmClusterIds":{ + "shape":"StringList", + "documentation":"

        A list of identifiers for the Autonomous VM clusters.

        " + }, + "autonomousVirtualMachineIds":{ + "shape":"StringList", + "documentation":"

        A list of unique identifiers for the Autonomous VMs.

        " + } + }, + "documentation":"

        Information about a database server.

        " + }, + "DbSystemShapeList":{ + "type":"list", + "member":{"shape":"DbSystemShapeSummary"} + }, + "DbSystemShapeSummary":{ + "type":"structure", + "members":{ + "availableCoreCount":{ + "shape":"Integer", + "documentation":"

        The maximum number of CPU cores that can be enabled for the shape.

        " + }, + "availableCoreCountPerNode":{ + "shape":"Integer", + "documentation":"

        The maximum number of CPU cores per DB node that can be enabled for the shape.

        " + }, + "availableDataStorageInTBs":{ + "shape":"Integer", + "documentation":"

        The maximum amount of data storage, in terabytes (TB), that can be enabled for the shape.

        " + }, + "availableDataStoragePerServerInTBs":{ + "shape":"Integer", + "documentation":"

        The maximum amount of data storage, in terabytes (TB), that's available per storage server for the shape.

        " + }, + "availableDbNodePerNodeInGBs":{ + "shape":"Integer", + "documentation":"

        The maximum amount of DB node storage, in gigabytes (GB), that's available per DB node for the shape.

        " + }, + "availableDbNodeStorageInGBs":{ + "shape":"Integer", + "documentation":"

        The maximum amount of DB node storage, in gigabytes (GB), that can be enabled for the shape.

        " + }, + "availableMemoryInGBs":{ + "shape":"Integer", + "documentation":"

        The maximum amount of memory, in gigabytes (GB), that can be enabled for the shape.

        " + }, + "availableMemoryPerNodeInGBs":{ + "shape":"Integer", + "documentation":"

        The maximum amount of memory, in gigabytes (GB), that's available per DB node for the shape.

        " + }, + "coreCountIncrement":{ + "shape":"Integer", + "documentation":"

        The discrete number by which the CPU core count for the shape can be increased or decreased.

        " + }, + "maxStorageCount":{ + "shape":"Integer", + "documentation":"

        The maximum number of Exadata storage servers that's available for the shape.

        " + }, + "maximumNodeCount":{ + "shape":"Integer", + "documentation":"

        The maximum number of compute servers that is available for the shape.

        " + }, + "minCoreCountPerNode":{ + "shape":"Integer", + "documentation":"

        The minimum number of CPU cores that can be enabled per node for the shape.

        " + }, + "minDataStorageInTBs":{ + "shape":"Integer", + "documentation":"

        The minimum amount of data storage, in terabytes (TB), that must be allocated for the shape.

        " + }, + "minDbNodeStoragePerNodeInGBs":{ + "shape":"Integer", + "documentation":"

        The minimum amount of DB node storage, in gigabytes (GB), that must be allocated per DB node for the shape.

        " + }, + "minMemoryPerNodeInGBs":{ + "shape":"Integer", + "documentation":"

        The minimum amount of memory, in gigabytes (GB), that must be allocated per DB node for the shape.

        " + }, + "minStorageCount":{ + "shape":"Integer", + "documentation":"

        The minimum number of Exadata storage servers that are available for the shape.

        " + }, + "minimumCoreCount":{ + "shape":"Integer", + "documentation":"

        The minimum number of CPU cores that can be enabled for the shape.

        " + }, + "minimumNodeCount":{ + "shape":"Integer", + "documentation":"

        The minimum number of compute servers that are available for the shape.

        " + }, + "runtimeMinimumCoreCount":{ + "shape":"Integer", + "documentation":"

        The runtime minimum number of CPU cores that can be enabled for the shape.

        " + }, + "shapeFamily":{ + "shape":"String", + "documentation":"

        The family of the shape.

        " + }, + "shapeType":{ + "shape":"ShapeType", + "documentation":"

        The shape type. This property is determined by the CPU hardware.

        " + }, + "name":{ + "shape":"String", + "documentation":"

        The name of the shape.

        " + }, + "computeModel":{ + "shape":"ComputeModel", + "documentation":"

        The OCI model compute model used when you create or clone an instance: ECPU or OCPU. An ECPU is an abstracted measure of compute resources. ECPUs are based on the number of cores elastically allocated from a pool of compute and storage servers. An OCPU is a legacy physical measure of compute resources. OCPUs are based on the physical core of a processor with hyper-threading enabled.

        " + }, + "areServerTypesSupported":{ + "shape":"Boolean", + "documentation":"

        Indicates whether the hardware system model supports configurable database and server storage types.

        " + } + }, + "documentation":"

        Information about a hardware system model (shape) that's available for an Exadata infrastructure. The shape determines resources, such as CPU cores, memory, and storage, to allocate to the Exadata infrastructure.

        " + }, + "DeleteCloudAutonomousVmClusterInput":{ + "type":"structure", + "required":["cloudAutonomousVmClusterId"], + "members":{ + "cloudAutonomousVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the Autonomous VM cluster to delete.

        " + } + } + }, + "DeleteCloudAutonomousVmClusterOutput":{ + "type":"structure", + "members":{} + }, + "DeleteCloudExadataInfrastructureInput":{ + "type":"structure", + "required":["cloudExadataInfrastructureId"], + "members":{ + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Exadata infrastructure to delete.

        " + } + } + }, + "DeleteCloudExadataInfrastructureOutput":{ + "type":"structure", + "members":{} + }, + "DeleteCloudVmClusterInput":{ + "type":"structure", + "required":["cloudVmClusterId"], + "members":{ + "cloudVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the VM cluster to delete.

        " + } + } + }, + "DeleteCloudVmClusterOutput":{ + "type":"structure", + "members":{} + }, + "DeleteOdbNetworkInput":{ + "type":"structure", + "required":[ + "odbNetworkId", + "deleteAssociatedResources" + ], + "members":{ + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB network to delete.

        " + }, + "deleteAssociatedResources":{ + "shape":"Boolean", + "documentation":"

        Specifies whether to delete associated OCI networking resources along with the ODB network.

        " + } + } + }, + "DeleteOdbNetworkOutput":{ + "type":"structure", + "members":{} + }, + "DeleteOdbPeeringConnectionInput":{ + "type":"structure", + "required":["odbPeeringConnectionId"], + "members":{ + "odbPeeringConnectionId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB peering connection to delete.

        " + } + } + }, + "DeleteOdbPeeringConnectionOutput":{ + "type":"structure", + "members":{} + }, + "DiskRedundancy":{ + "type":"string", + "enum":[ + "HIGH", + "NORMAL" + ] + }, + "Double":{ + "type":"double", + "box":true + }, + "ExadataIormConfig":{ + "type":"structure", + "members":{ + "dbPlans":{ + "shape":"DbIormConfigList", + "documentation":"

        An array of IORM settings for all the database in the Exadata DB system.

        " + }, + "lifecycleDetails":{ + "shape":"String", + "documentation":"

        Additional information about the current lifecycleState.

        " + }, + "lifecycleState":{ + "shape":"IormLifecycleState", + "documentation":"

        The current state of IORM configuration for the Exadata DB system.

        " + }, + "objective":{ + "shape":"Objective", + "documentation":"

        The current value for the IORM objective. The default is AUTO.

        " + } + }, + "documentation":"

        The IORM settings of the Exadata DB system.

        " + }, + "Float":{ + "type":"float", + "box":true + }, + "GetCloudAutonomousVmClusterInput":{ + "type":"structure", + "required":["cloudAutonomousVmClusterId"], + "members":{ + "cloudAutonomousVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the Autonomous VM cluster to retrieve information about.

        " + } + } + }, + "GetCloudAutonomousVmClusterOutput":{ + "type":"structure", + "members":{ + "cloudAutonomousVmCluster":{ + "shape":"CloudAutonomousVmCluster", + "documentation":"

        The details of the requested Autonomous VM cluster.

        " + } + } + }, + "GetCloudExadataInfrastructureInput":{ + "type":"structure", + "required":["cloudExadataInfrastructureId"], + "members":{ + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Exadata infrastructure.

        " + } + } + }, + "GetCloudExadataInfrastructureOutput":{ + "type":"structure", + "members":{ + "cloudExadataInfrastructure":{ + "shape":"CloudExadataInfrastructure", + "documentation":"

        The Exadata infrastructure.

        " + } + } + }, + "GetCloudExadataInfrastructureUnallocatedResourcesInput":{ + "type":"structure", + "required":["cloudExadataInfrastructureId"], + "members":{ + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Cloud Exadata infrastructure for which to retrieve unallocated resources.

        " + }, + "dbServers":{ + "shape":"StringList", + "documentation":"

        The database servers to include in the unallocated resources query.

        " + } + } + }, + "GetCloudExadataInfrastructureUnallocatedResourcesOutput":{ + "type":"structure", + "members":{ + "cloudExadataInfrastructureUnallocatedResources":{ + "shape":"CloudExadataInfrastructureUnallocatedResources", + "documentation":"

        Details about the unallocated resources in the specified Cloud Exadata infrastructure.

        " + } + } + }, + "GetCloudVmClusterInput":{ + "type":"structure", + "required":["cloudVmClusterId"], + "members":{ + "cloudVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the VM cluster.

        " + } + } + }, + "GetCloudVmClusterOutput":{ + "type":"structure", + "members":{ + "cloudVmCluster":{ + "shape":"CloudVmCluster", + "documentation":"

        The VM cluster.

        " + } + } + }, + "GetDbNodeInput":{ + "type":"structure", + "required":[ + "cloudVmClusterId", + "dbNodeId" + ], + "members":{ + "cloudVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the VM cluster that contains the DB node.

        " + }, + "dbNodeId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the DB node to retrieve information about.

        " + } + } + }, + "GetDbNodeOutput":{ + "type":"structure", + "members":{ + "dbNode":{"shape":"DbNode"} + } + }, + "GetDbServerInput":{ + "type":"structure", + "required":[ + "cloudExadataInfrastructureId", + "dbServerId" + ], + "members":{ + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Oracle Exadata infrastructure that contains the database server.

        " + }, + "dbServerId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the database server to retrieve information about.

        " + } + } + }, + "GetDbServerOutput":{ + "type":"structure", + "members":{ + "dbServer":{ + "shape":"DbServer", + "documentation":"

        The details of the requested database server.

        " + } + } + }, + "GetOciOnboardingStatusInput":{ + "type":"structure", + "members":{} + }, + "GetOciOnboardingStatusOutput":{ + "type":"structure", + "members":{ + "status":{"shape":"OciOnboardingStatus"}, + "existingTenancyActivationLink":{ + "shape":"String", + "documentation":"

        The existing OCI tenancy activation link for your Amazon Web Services account.

        " + }, + "newTenancyActivationLink":{ + "shape":"String", + "documentation":"

        A new OCI tenancy activation link for your Amazon Web Services account.

        " + } + } + }, + "GetOdbNetworkInput":{ + "type":"structure", + "required":["odbNetworkId"], + "members":{ + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB network.

        " + } + } + }, + "GetOdbNetworkOutput":{ + "type":"structure", + "members":{ + "odbNetwork":{ + "shape":"OdbNetwork", + "documentation":"

        The ODB network.

        " + } + } + }, + "GetOdbPeeringConnectionInput":{ + "type":"structure", + "required":["odbPeeringConnectionId"], + "members":{ + "odbPeeringConnectionId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB peering connection to retrieve information about.

        " + } + } + }, + "GetOdbPeeringConnectionOutput":{ + "type":"structure", + "members":{ + "odbPeeringConnection":{"shape":"OdbPeeringConnection"} + } + }, + "GiVersionList":{ + "type":"list", + "member":{"shape":"GiVersionSummary"} + }, + "GiVersionSummary":{ + "type":"structure", + "members":{ + "version":{ + "shape":"String", + "documentation":"

        The GI software version.

        " + } + }, + "documentation":"

        Information about a specific version of Oracle Grid Infrastructure (GI) software that can be installed on a VM cluster.

        " + }, + "HoursOfDay":{ + "type":"list", + "member":{"shape":"Integer"} + }, + "InitializeServiceInput":{ + "type":"structure", + "members":{} + }, + "InitializeServiceOutput":{ + "type":"structure", + "members":{} + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

        The number of seconds to wait before retrying the request after an internal server error.

        " + } + }, + "documentation":"

        Occurs when there is an internal failure in the Oracle Database@Amazon Web Services service. Wait and try again.

        ", + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "IormLifecycleState":{ + "type":"string", + "enum":[ + "BOOTSTRAPPING", + "DISABLED", + "ENABLED", + "FAILED", + "UPDATING" + ] + }, + "LicenseModel":{ + "type":"string", + "enum":[ + "BRING_YOUR_OWN_LICENSE", + "LICENSE_INCLUDED" + ] + }, + "ListAutonomousVirtualMachinesInput":{ + "type":"structure", + "required":["cloudAutonomousVmClusterId"], + "members":{ + "maxResults":{ + "shape":"ListAutonomousVirtualMachinesInputMaxResultsInteger", + "documentation":"

        The maximum number of items to return per page.

        " + }, + "nextToken":{ + "shape":"ListAutonomousVirtualMachinesInputNextTokenString", + "documentation":"

        The pagination token to continue listing from.

        " + }, + "cloudAutonomousVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the Autonomous VM cluster whose virtual machines you're listing.

        " + } + } + }, + "ListAutonomousVirtualMachinesInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListAutonomousVirtualMachinesInputNextTokenString":{ + "type":"string", + "max":8192, + "min":1 + }, + "ListAutonomousVirtualMachinesOutput":{ + "type":"structure", + "required":["autonomousVirtualMachines"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

        The pagination token from which to continue listing.

        " + }, + "autonomousVirtualMachines":{ + "shape":"AutonomousVirtualMachineList", + "documentation":"

        The list of Autonomous VMs in the specified Autonomous VM cluster.

        " + } + } + }, + "ListCloudAutonomousVmClustersInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListCloudAutonomousVmClustersInputMaxResultsInteger", + "documentation":"

        The maximum number of items to return per page.

        " + }, + "nextToken":{ + "shape":"ListCloudAutonomousVmClustersInputNextTokenString", + "documentation":"

        The pagination token to continue listing from.

        " + }, + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Cloud Exadata Infrastructure that hosts the Autonomous VM clusters to be listed.

        " + } + } + }, + "ListCloudAutonomousVmClustersInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListCloudAutonomousVmClustersInputNextTokenString":{ + "type":"string", + "max":8192, + "min":1 + }, + "ListCloudAutonomousVmClustersOutput":{ + "type":"structure", + "required":["cloudAutonomousVmClusters"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

        The pagination token to continue listing from.

        " + }, + "cloudAutonomousVmClusters":{ + "shape":"CloudAutonomousVmClusterList", + "documentation":"

        The list of Autonomous VM clusters in the specified Cloud Exadata Infrastructure.

        " + } + } + }, + "ListCloudExadataInfrastructuresInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListCloudExadataInfrastructuresInputMaxResultsInteger", + "documentation":"

        The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output.

        Default: 10

        " + }, + "nextToken":{ + "shape":"ListCloudExadataInfrastructuresInputNextTokenString", + "documentation":"

        The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

        " + } + } + }, + "ListCloudExadataInfrastructuresInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListCloudExadataInfrastructuresInputNextTokenString":{ + "type":"string", + "max":8192, + "min":1 + }, + "ListCloudExadataInfrastructuresOutput":{ + "type":"structure", + "required":["cloudExadataInfrastructures"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

        The token to include in another request to get the next page of items. This value is null when there are no more items to return.

        " + }, + "cloudExadataInfrastructures":{ + "shape":"CloudExadataInfrastructureList", + "documentation":"

        The list of Exadata infrastructures along with their properties.

        " + } + } + }, + "ListCloudVmClustersInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListCloudVmClustersInputMaxResultsInteger", + "documentation":"

        The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output.

        Default: 10

        " + }, + "nextToken":{ + "shape":"ListCloudVmClustersInputNextTokenString", + "documentation":"

        The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

        " + }, + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Oracle Exadata infrastructure.

        " + } + } + }, + "ListCloudVmClustersInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListCloudVmClustersInputNextTokenString":{ + "type":"string", + "max":8192, + "min":1 + }, + "ListCloudVmClustersOutput":{ + "type":"structure", + "required":["cloudVmClusters"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

        The token to include in another request to get the next page of items. This value is null when there are no more items to return.

        " + }, + "cloudVmClusters":{ + "shape":"CloudVmClusterList", + "documentation":"

        The list of VM clusters along with their properties.

        " + } + } + }, + "ListDbNodesInput":{ + "type":"structure", + "required":["cloudVmClusterId"], + "members":{ + "maxResults":{ + "shape":"ListDbNodesInputMaxResultsInteger", + "documentation":"

        The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output.

        Default: 10

        " + }, + "nextToken":{ + "shape":"ListDbNodesInputNextTokenString", + "documentation":"

        The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

        " + }, + "cloudVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the VM cluster.

        " + } + } + }, + "ListDbNodesInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListDbNodesInputNextTokenString":{ + "type":"string", + "max":8192, + "min":1 + }, + "ListDbNodesOutput":{ + "type":"structure", + "required":["dbNodes"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

        The token to include in another request to get the next page of items. This value is null when there are no more items to return.

        " + }, + "dbNodes":{ + "shape":"DbNodeList", + "documentation":"

        The list of DB nodes along with their properties.

        " + } + } + }, + "ListDbServersInput":{ + "type":"structure", + "required":["cloudExadataInfrastructureId"], + "members":{ + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Oracle Exadata infrastructure.

        " + }, + "maxResults":{ + "shape":"ListDbServersInputMaxResultsInteger", + "documentation":"

        The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output.

        Default: 10

        " + }, + "nextToken":{ + "shape":"ListDbServersInputNextTokenString", + "documentation":"

        The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

        " + } + } + }, + "ListDbServersInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListDbServersInputNextTokenString":{ + "type":"string", + "max":8192, + "min":1 + }, + "ListDbServersOutput":{ + "type":"structure", + "required":["dbServers"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

        The token to include in another request to get the next page of items. This value is null when there are no more items to return.

        " + }, + "dbServers":{ + "shape":"DbServerList", + "documentation":"

        The list of database servers along with their properties.

        " + } + } + }, + "ListDbSystemShapesInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListDbSystemShapesInputMaxResultsInteger", + "documentation":"

        The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output.

        Default: 10

        " + }, + "nextToken":{ + "shape":"ListDbSystemShapesInputNextTokenString", + "documentation":"

        The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

        " + }, + "availabilityZone":{ + "shape":"ListDbSystemShapesInputAvailabilityZoneString", + "documentation":"

        The logical name of the AZ, for example, us-east-1a. This name varies depending on the account.

        " + }, + "availabilityZoneId":{ + "shape":"ListDbSystemShapesInputAvailabilityZoneIdString", + "documentation":"

        The physical ID of the AZ, for example, use1-az4. This ID persists across accounts.

        " + } + } + }, + "ListDbSystemShapesInputAvailabilityZoneIdString":{ + "type":"string", + "max":255, + "min":1 + }, + "ListDbSystemShapesInputAvailabilityZoneString":{ + "type":"string", + "max":255, + "min":1 + }, + "ListDbSystemShapesInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListDbSystemShapesInputNextTokenString":{ + "type":"string", + "max":8192, + "min":1 + }, + "ListDbSystemShapesOutput":{ + "type":"structure", + "required":["dbSystemShapes"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

        The token to include in another request to get the next page of items. This value is null when there are no more items to return.

        " + }, + "dbSystemShapes":{ + "shape":"DbSystemShapeList", + "documentation":"

        The list of shapes and their properties.

        " + } + } + }, + "ListGiVersionsInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListGiVersionsInputMaxResultsInteger", + "documentation":"

        The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output.

        Default: 10

        " + }, + "nextToken":{ + "shape":"ListGiVersionsInputNextTokenString", + "documentation":"

        The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

        " + }, + "shape":{ + "shape":"ListGiVersionsInputShapeString", + "documentation":"

        The shape to return GI versions for. For a list of valid shapes, use the ListDbSystemShapes operation..

        " + } + } + }, + "ListGiVersionsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListGiVersionsInputNextTokenString":{ + "type":"string", + "max":8192, + "min":1 + }, + "ListGiVersionsInputShapeString":{ + "type":"string", + "max":255, + "min":1 + }, + "ListGiVersionsOutput":{ + "type":"structure", + "required":["giVersions"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

        The token to include in another request to get the next page of items. This value is null when there are no more items to return.

        " + }, + "giVersions":{ + "shape":"GiVersionList", + "documentation":"

        The list of GI versions and their properties.

        " + } + } + }, + "ListOdbNetworksInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListOdbNetworksInputMaxResultsInteger", + "documentation":"

        The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output.

        Default: 10

        " + }, + "nextToken":{ + "shape":"ListOdbNetworksInputNextTokenString", + "documentation":"

        The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

        " + } + } + }, + "ListOdbNetworksInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListOdbNetworksInputNextTokenString":{ + "type":"string", + "max":8192, + "min":1 + }, + "ListOdbNetworksOutput":{ + "type":"structure", + "required":["odbNetworks"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

        The token to include in another request to get the next page of items. This value is null when there are no more items to return.

        " + }, + "odbNetworks":{ + "shape":"OdbNetworkList", + "documentation":"

        The list of ODB networks.

        " + } + } + }, + "ListOdbPeeringConnectionsInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListOdbPeeringConnectionsInputMaxResultsInteger", + "documentation":"

        The maximum number of ODB peering connections to return in the response.

        Default: 20

        Constraints:

        • Must be between 1 and 100.

        " + }, + "nextToken":{ + "shape":"ListOdbPeeringConnectionsInputNextTokenString", + "documentation":"

        The pagination token for the next page of ODB peering connections.

        " + }, + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The identifier of the ODB network to list peering connections for.

        If not specified, lists all ODB peering connections in the account.

        " + } + } + }, + "ListOdbPeeringConnectionsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListOdbPeeringConnectionsInputNextTokenString":{ + "type":"string", + "max":8192, + "min":1 + }, + "ListOdbPeeringConnectionsOutput":{ + "type":"structure", + "required":["odbPeeringConnections"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

        The pagination token for the next page of ODB peering connections.

        " + }, + "odbPeeringConnections":{ + "shape":"OdbPeeringConnectionList", + "documentation":"

        The list of ODB peering connections.

        " + } + } + }, + "ListSystemVersionsInput":{ + "type":"structure", + "required":[ + "giVersion", + "shape" + ], + "members":{ + "maxResults":{ + "shape":"ListSystemVersionsInputMaxResultsInteger", + "documentation":"

        The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output.

        Default: 10

        " + }, + "nextToken":{ + "shape":"ListSystemVersionsInputNextTokenString", + "documentation":"

        The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

        " + }, + "giVersion":{ + "shape":"ListSystemVersionsInputGiVersionString", + "documentation":"

        The software version of the Exadata Grid Infrastructure (GI).

        " + }, + "shape":{ + "shape":"ListSystemVersionsInputShapeString", + "documentation":"

        The Exadata hardware system model.

        " + } + } + }, + "ListSystemVersionsInputGiVersionString":{ + "type":"string", + "max":30, + "min":1 + }, + "ListSystemVersionsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListSystemVersionsInputNextTokenString":{ + "type":"string", + "max":8192, + "min":1 + }, + "ListSystemVersionsInputShapeString":{ + "type":"string", + "max":255, + "min":1 + }, + "ListSystemVersionsOutput":{ + "type":"structure", + "required":["systemVersions"], + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

        The token to include in another request to get the next page of items. This value is null when there are no more items to return.

        " + }, + "systemVersions":{ + "shape":"SystemVersionList", + "documentation":"

        The list of system versions.

        " + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the resource to list tags for.

        " + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"ResponseTagMap", + "documentation":"

        The list of tags applied to the resource.

        " + } + } + }, + "MaintenanceWindow":{ + "type":"structure", + "members":{ + "customActionTimeoutInMins":{ + "shape":"MaintenanceWindowCustomActionTimeoutInMinsInteger", + "documentation":"

        The custom action timeout in minutes for the maintenance window.

        " + }, + "daysOfWeek":{ + "shape":"DaysOfWeek", + "documentation":"

        The days of the week when maintenance can be performed.

        " + }, + "hoursOfDay":{ + "shape":"HoursOfDay", + "documentation":"

        The hours of the day when maintenance can be performed.

        " + }, + "isCustomActionTimeoutEnabled":{ + "shape":"Boolean", + "documentation":"

        Indicates whether custom action timeout is enabled for the maintenance window.

        " + }, + "leadTimeInWeeks":{ + "shape":"MaintenanceWindowLeadTimeInWeeksInteger", + "documentation":"

        The lead time in weeks before the maintenance window.

        " + }, + "months":{ + "shape":"Months", + "documentation":"

        The months when maintenance can be performed.

        " + }, + "patchingMode":{ + "shape":"PatchingModeType", + "documentation":"

        The patching mode for the maintenance window.

        " + }, + "preference":{ + "shape":"PreferenceType", + "documentation":"

        The preference for the maintenance window scheduling.

        " + }, + "skipRu":{ + "shape":"Boolean", + "documentation":"

        Indicates whether to skip release updates during maintenance.

        " + }, + "weeksOfMonth":{ + "shape":"WeeksOfMonth", + "documentation":"

        The weeks of the month when maintenance can be performed.

        " + } + }, + "documentation":"

        The scheduling details for the maintenance window. Patching and system updates take place during the maintenance window.

        " + }, + "MaintenanceWindowCustomActionTimeoutInMinsInteger":{ + "type":"integer", + "box":true, + "max":120, + "min":15 + }, + "MaintenanceWindowLeadTimeInWeeksInteger":{ + "type":"integer", + "box":true, + "max":4, + "min":1 + }, + "ManagedResourceStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "ENABLING", + "DISABLED", + "DISABLING" + ] + }, + "ManagedS3BackupAccess":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ManagedResourceStatus", + "documentation":"

        The status of the managed Amazon S3 backup access.

        Valid Values: enabled | disabled

        " + }, + "ipv4Addresses":{ + "shape":"StringList", + "documentation":"

        The IPv4 addresses for the managed Amazon S3 backup access.

        " + } + }, + "documentation":"

        The configuration for managed Amazon S3 backup access from the ODB network.

        " + }, + "ManagedServices":{ + "type":"structure", + "members":{ + "serviceNetworkArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the service network.

        " + }, + "resourceGatewayArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the resource gateway.

        " + }, + "managedServicesIpv4Cidrs":{ + "shape":"StringList", + "documentation":"

        The IPv4 CIDR blocks for the managed services.

        " + }, + "serviceNetworkEndpoint":{ + "shape":"ServiceNetworkEndpoint", + "documentation":"

        The service network endpoint configuration.

        " + }, + "managedS3BackupAccess":{ + "shape":"ManagedS3BackupAccess", + "documentation":"

        The managed Amazon S3 backup access configuration.

        " + }, + "zeroEtlAccess":{ + "shape":"ZeroEtlAccess", + "documentation":"

        The Zero-ETL access configuration.

        " + }, + "s3Access":{ + "shape":"S3Access", + "documentation":"

        The Amazon S3 access configuration.

        " + } + }, + "documentation":"

        The managed services configuration for the ODB network.

        " + }, + "Month":{ + "type":"structure", + "members":{ + "name":{ + "shape":"MonthName", + "documentation":"

        The name of the month.

        " + } + }, + "documentation":"

        An enumeration of months used for scheduling maintenance windows.

        " + }, + "MonthName":{ + "type":"string", + "enum":[ + "JANUARY", + "FEBRUARY", + "MARCH", + "APRIL", + "MAY", + "JUNE", + "JULY", + "AUGUST", + "SEPTEMBER", + "OCTOBER", + "NOVEMBER", + "DECEMBER" + ] + }, + "Months":{ + "type":"list", + "member":{"shape":"Month"} + }, + "Objective":{ + "type":"string", + "enum":[ + "AUTO", + "BALANCED", + "BASIC", + "HIGH_THROUGHPUT", + "LOW_LATENCY" + ] + }, + "OciDnsForwardingConfig":{ + "type":"structure", + "members":{ + "domainName":{ + "shape":"OciDnsForwardingConfigDomainNameString", + "documentation":"

        Domain name to which DNS resolver forwards to.

        " + }, + "ociDnsListenerIp":{ + "shape":"String", + "documentation":"

        OCI DNS listener IP for custom DNS setup.

        " + } + }, + "documentation":"

        DNS configuration to forward DNS resolver endpoints to your OCI Private Zone.

        " + }, + "OciDnsForwardingConfigDomainNameString":{ + "type":"string", + "max":255, + "min":1 + }, + "OciDnsForwardingConfigList":{ + "type":"list", + "member":{"shape":"OciDnsForwardingConfig"} + }, + "OciOnboardingStatus":{ + "type":"string", + "documentation":"

        ", + "enum":[ + "NOT_STARTED", + "PENDING_LINK_GENERATION", + "PENDING_CUSTOMER_ACTION", + "PENDING_INITIALIZATION", + "ACTIVATING", + "ACTIVE_IN_HOME_REGION", + "ACTIVE", + "ACTIVE_LIMITED", + "FAILED", + "PUBLIC_OFFER_UNSUPPORTED", + "SUSPENDED", + "CANCELED" + ] + }, + "OdbNetwork":{ + "type":"structure", + "required":["odbNetworkId"], + "members":{ + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB network.

        " + }, + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name of the ODB network.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the ODB network.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the current status of the ODB network.

        " + }, + "odbNetworkArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the ODB network.

        " + }, + "availabilityZone":{ + "shape":"OdbNetworkAvailabilityZoneString", + "documentation":"

        The Amazon Web Services Availability Zone (AZ) where the ODB network is located.

        " + }, + "availabilityZoneId":{ + "shape":"OdbNetworkAvailabilityZoneIdString", + "documentation":"

        The AZ ID of the AZ where the ODB network is located.

        " + }, + "clientSubnetCidr":{ + "shape":"OdbNetworkClientSubnetCidrString", + "documentation":"

        The CIDR range of the client subnet in the ODB network.

        " + }, + "backupSubnetCidr":{ + "shape":"OdbNetworkBackupSubnetCidrString", + "documentation":"

        The CIDR range of the backup subnet in the ODB network.

        " + }, + "customDomainName":{ + "shape":"OdbNetworkCustomDomainNameString", + "documentation":"

        The domain name for the resources in the ODB network.

        " + }, + "defaultDnsPrefix":{ + "shape":"OdbNetworkDefaultDnsPrefixString", + "documentation":"

        The DNS prefix to the default DNS domain name. The default DNS domain name is oraclevcn.com.

        " + }, + "peeredCidrs":{ + "shape":"StringList", + "documentation":"

        The list of CIDR ranges from the peered VPC that are allowed access to the ODB network.

        " + }, + "ociNetworkAnchorId":{ + "shape":"OdbNetworkOciNetworkAnchorIdString", + "documentation":"

        The unique identifier of the OCI network anchor for the ODB network.

        " + }, + "ociNetworkAnchorUrl":{ + "shape":"String", + "documentation":"

        The URL of the OCI network anchor for the ODB network.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the OCI resource anchor that's associated with the ODB network.

        " + }, + "ociVcnId":{ + "shape":"OdbNetworkOciVcnIdString", + "documentation":"

        The Oracle Cloud ID (OCID) for the Virtual Cloud Network (VCN) that's associated with the ODB network.

        " + }, + "ociVcnUrl":{ + "shape":"String", + "documentation":"

        The URL for the VCN that's associated with the ODB network.

        " + }, + "ociDnsForwardingConfigs":{ + "shape":"OciDnsForwardingConfigList", + "documentation":"

        The DNS resolver endpoint in OCI for forwarding DNS queries for the ociPrivateZone domain.

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The date and time when the ODB network was created.

        " + }, + "percentProgress":{ + "shape":"Float", + "documentation":"

        The amount of progress made on the current operation on the ODB network, expressed as a percentage.

        " + }, + "managedServices":{ + "shape":"ManagedServices", + "documentation":"

        The managed services configuration for the ODB network.

        " + } + }, + "documentation":"

        Information about an ODB network.

        " + }, + "OdbNetworkAvailabilityZoneIdString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkAvailabilityZoneString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkBackupSubnetCidrString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkClientSubnetCidrString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkCustomDomainNameString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkDefaultDnsPrefixString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkList":{ + "type":"list", + "member":{"shape":"OdbNetworkSummary"} + }, + "OdbNetworkOciNetworkAnchorIdString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkOciVcnIdString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkSummary":{ + "type":"structure", + "required":["odbNetworkId"], + "members":{ + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB network.

        " + }, + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name of the ODB network.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the ODB network.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the current status of the ODB network.

        " + }, + "odbNetworkArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the ODB network.

        " + }, + "availabilityZone":{ + "shape":"OdbNetworkSummaryAvailabilityZoneString", + "documentation":"

        The Amazon Web Services Availability Zone (AZ) where the ODB network is located.

        " + }, + "availabilityZoneId":{ + "shape":"OdbNetworkSummaryAvailabilityZoneIdString", + "documentation":"

        The AZ ID of the AZ where the ODB network is located.

        " + }, + "clientSubnetCidr":{ + "shape":"OdbNetworkSummaryClientSubnetCidrString", + "documentation":"

        The CIDR range of the client subnet in the ODB network.

        " + }, + "backupSubnetCidr":{ + "shape":"OdbNetworkSummaryBackupSubnetCidrString", + "documentation":"

        The CIDR range of the backup subnet in the ODB network.

        " + }, + "customDomainName":{ + "shape":"OdbNetworkSummaryCustomDomainNameString", + "documentation":"

        The domain name for the resources in the ODB network.

        " + }, + "defaultDnsPrefix":{ + "shape":"OdbNetworkSummaryDefaultDnsPrefixString", + "documentation":"

        The DNS prefix to the default DNS domain name. The default DNS domain name is oraclevcn.com.

        " + }, + "peeredCidrs":{ + "shape":"StringList", + "documentation":"

        The list of CIDR ranges from the peered VPC that are allowed access to the ODB network.

        " + }, + "ociNetworkAnchorId":{ + "shape":"OdbNetworkSummaryOciNetworkAnchorIdString", + "documentation":"

        The unique identifier of the OCI network anchor for the ODB network.

        " + }, + "ociNetworkAnchorUrl":{ + "shape":"String", + "documentation":"

        The URL of the OCI network anchor for the ODB network.

        " + }, + "ociResourceAnchorName":{ + "shape":"String", + "documentation":"

        The name of the OCI resource anchor associated with the ODB network.

        " + }, + "ociVcnId":{ + "shape":"OdbNetworkSummaryOciVcnIdString", + "documentation":"

        The Oracle Cloud ID (OCID) for the Virtual Cloud Network (VCN) associated with the ODB network.

        " + }, + "ociVcnUrl":{ + "shape":"String", + "documentation":"

        The URL for the VCN that's associated with the ODB network.

        " + }, + "ociDnsForwardingConfigs":{ + "shape":"OciDnsForwardingConfigList", + "documentation":"

        The DNS resolver endpoint in OCI for forwarding DNS queries for the ociPrivateZone domain.

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The date and time when the ODB network was created.

        " + }, + "percentProgress":{ + "shape":"Float", + "documentation":"

        The amount of progress made on the current operation on the ODB network, expressed as a percentage.

        " + }, + "managedServices":{ + "shape":"ManagedServices", + "documentation":"

        The managed services configuration for the ODB network.

        " + } + }, + "documentation":"

        Information about an ODB network.

        " + }, + "OdbNetworkSummaryAvailabilityZoneIdString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkSummaryAvailabilityZoneString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkSummaryBackupSubnetCidrString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkSummaryClientSubnetCidrString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkSummaryCustomDomainNameString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkSummaryDefaultDnsPrefixString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkSummaryOciNetworkAnchorIdString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbNetworkSummaryOciVcnIdString":{ + "type":"string", + "max":255, + "min":1 + }, + "OdbPeeringConnection":{ + "type":"structure", + "required":["odbPeeringConnectionId"], + "members":{ + "odbPeeringConnectionId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB peering connection. A sample ID is odbpcx-abcdefgh12345678.

        " + }, + "displayName":{ + "shape":"String", + "documentation":"

        The display name of the ODB peering connection.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The status of the ODB peering connection.

        Valid Values: provisioning | active | terminating | terminated | failed

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        The reason for the current status of the ODB peering connection.

        " + }, + "odbPeeringConnectionArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the ODB peering connection.

        Example: arn:aws:odb:us-east-1:123456789012:odb-peering-connection/odbpcx-abcdefgh12345678

        " + }, + "odbNetworkArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the ODB network that initiated the peering connection.

        " + }, + "peerNetworkArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the peer network.

        " + }, + "odbPeeringConnectionType":{ + "shape":"String", + "documentation":"

        The type of the ODB peering connection.

        Valid Values: ODB-VPC | ODB-ODB

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The timestamp when the ODB peering connection was created.

        " + }, + "percentProgress":{ + "shape":"Float", + "documentation":"

        The percentage progress of the ODB peering connection creation or deletion.

        " + } + }, + "documentation":"

        A peering connection between an ODB network and either another ODB network or a customer-owned VPC.

        " + }, + "OdbPeeringConnectionList":{ + "type":"list", + "member":{"shape":"OdbPeeringConnectionSummary"} + }, + "OdbPeeringConnectionSummary":{ + "type":"structure", + "required":["odbPeeringConnectionId"], + "members":{ + "odbPeeringConnectionId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB peering connection. A sample ID is odbpcx-abcdefgh12345678.

        " + }, + "displayName":{ + "shape":"String", + "documentation":"

        The display name of the ODB peering connection.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The status of the ODB peering connection.

        Valid Values: provisioning | active | terminating | terminated | failed

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        The reason for the current status of the ODB peering connection.

        " + }, + "odbPeeringConnectionArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the ODB peering connection.

        " + }, + "odbNetworkArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the ODB network that initiated the peering connection.

        " + }, + "peerNetworkArn":{ + "shape":"String", + "documentation":"

        The Amazon Resource Name (ARN) of the peer network.

        " + }, + "odbPeeringConnectionType":{ + "shape":"String", + "documentation":"

        The type of the ODB peering connection.

        Valid Values: ODB-VPC | ODB-ODB

        " + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

        The timestamp when the ODB peering connection was created.

        " + }, + "percentProgress":{ + "shape":"Float", + "documentation":"

        The percentage progress of the ODB peering connection creation or deletion.

        " + } + }, + "documentation":"

        A summary of an ODB peering connection.

        " + }, + "PatchingModeType":{ + "type":"string", + "enum":[ + "ROLLING", + "NONROLLING" + ] + }, + "PolicyDocument":{ + "type":"string", + "max":20480, + "min":3 + }, + "PreferenceType":{ + "type":"string", + "enum":[ + "NO_PREFERENCE", + "CUSTOM_PREFERENCE" + ] + }, + "RebootDbNodeInput":{ + "type":"structure", + "required":[ + "cloudVmClusterId", + "dbNodeId" + ], + "members":{ + "cloudVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the VM cluster that contains the DB node to reboot.

        " + }, + "dbNodeId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the DB node to reboot.

        " + } + } + }, + "RebootDbNodeOutput":{ + "type":"structure", + "required":["dbNodeId"], + "members":{ + "dbNodeId":{ + "shape":"String", + "documentation":"

        The unique identifier of the DB node that was rebooted.

        " + }, + "status":{ + "shape":"DbNodeResourceStatus", + "documentation":"

        The current status of the DB node after the reboot operation.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the DB node after the reboot operation.

        " + } + } + }, + "RequestTagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":1 + }, + "ResourceArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:(?:aws|aws-cn|aws-us-gov|aws-iso-{0,1}[a-z]{0,1}):[a-z0-9-]+:[a-z0-9-]*:[0-9]+:[a-z0-9-]+/[a-z0-9-_]{6,64}" + }, + "ResourceDisplayName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[a-zA-Z_](?!.*--)[a-zA-Z0-9_-]*" + }, + "ResourceId":{ + "type":"string", + "max":64, + "min":6, + "pattern":"[a-zA-Z0-9_~.-]+" + }, + "ResourceIdOrArn":{ + "type":"string", + "max":2048, + "min":6, + "pattern":"(arn:(?:aws|aws-cn|aws-us-gov|aws-iso-{0,1}[a-z]{0,1}):[a-z0-9-]+:[a-z0-9-]*:[0-9]+:[a-z0-9-]+/[a-zA-Z0-9_~.-]{6,64}|[a-zA-Z0-9_~.-]{6,64})" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

        The identifier of the resource that was not found.

        " + }, + "resourceType":{ + "shape":"String", + "documentation":"

        The type of resource that was not found.

        " + } + }, + "documentation":"

        The operation tried to access a resource that doesn't exist. Make sure you provided the correct resource and try again.

        ", + "exception":true + }, + "ResourceStatus":{ + "type":"string", + "enum":[ + "AVAILABLE", + "FAILED", + "PROVISIONING", + "TERMINATED", + "TERMINATING", + "UPDATING", + "MAINTENANCE_IN_PROGRESS" + ] + }, + "ResponseTagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":0 + }, + "S3Access":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ManagedResourceStatus", + "documentation":"

        The status of the Amazon S3 access.

        Valid Values: enabled | disabled

        " + }, + "ipv4Addresses":{ + "shape":"StringList", + "documentation":"

        The IPv4 addresses for the Amazon S3 access.

        " + }, + "domainName":{ + "shape":"String", + "documentation":"

        The domain name for the Amazon S3 access.

        " + }, + "s3PolicyDocument":{ + "shape":"String", + "documentation":"

        The endpoint policy for the Amazon S3 access.

        " + } + }, + "documentation":"

        The configuration for Amazon S3 access from the ODB network.

        " + }, + "SensitiveString":{ + "type":"string", + "sensitive":true + }, + "SensitiveStringList":{ + "type":"list", + "member":{"shape":"SensitiveString"} + }, + "ServiceNetworkEndpoint":{ + "type":"structure", + "members":{ + "vpcEndpointId":{ + "shape":"String", + "documentation":"

        The identifier of the VPC endpoint.

        " + }, + "vpcEndpointType":{ + "shape":"VpcEndpointType", + "documentation":"

        The type of the VPC endpoint.

        Valid Values: Interface | Gateway

        " + } + }, + "documentation":"

        The configuration for a service network endpoint.

        " + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType", + "quotaCode" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

        The identifier of the resource that exceeded the service quota.

        " + }, + "resourceType":{ + "shape":"String", + "documentation":"

        The type of resource that exceeded the service quota.

        " + }, + "quotaCode":{ + "shape":"String", + "documentation":"

        The unqiue identifier of the service quota that was exceeded.

        " + } + }, + "documentation":"

        You have exceeded the service quota.

        ", + "exception":true + }, + "ShapeType":{ + "type":"string", + "enum":[ + "AMD", + "INTEL", + "INTEL_FLEX_X9", + "AMPERE_FLEX_A1" + ] + }, + "StartDbNodeInput":{ + "type":"structure", + "required":[ + "cloudVmClusterId", + "dbNodeId" + ], + "members":{ + "cloudVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the VM cluster that contains the DB node to start.

        " + }, + "dbNodeId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the DB node to start.

        " + } + } + }, + "StartDbNodeOutput":{ + "type":"structure", + "required":["dbNodeId"], + "members":{ + "dbNodeId":{ + "shape":"String", + "documentation":"

        The unique identifier of the DB node that was started.

        " + }, + "status":{ + "shape":"DbNodeResourceStatus", + "documentation":"

        The current status of the DB node after the start operation.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the DB node after the start operation.

        " + } + } + }, + "StopDbNodeInput":{ + "type":"structure", + "required":[ + "cloudVmClusterId", + "dbNodeId" + ], + "members":{ + "cloudVmClusterId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the VM cluster that contains the DB node to stop.

        " + }, + "dbNodeId":{ + "shape":"ResourceId", + "documentation":"

        The unique identifier of the DB node to stop.

        " + } + } + }, + "StopDbNodeOutput":{ + "type":"structure", + "required":["dbNodeId"], + "members":{ + "dbNodeId":{ + "shape":"String", + "documentation":"

        The unique identifier of the DB node that was stopped.

        " + }, + "status":{ + "shape":"DbNodeResourceStatus", + "documentation":"

        The current status of the DB node after the stop operation.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the DB node after the stop operation.

        " + } + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"}, + "max":1024, + "min":1 + }, + "SyntheticTimestamp_date_time":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "SystemVersionList":{ + "type":"list", + "member":{"shape":"SystemVersionSummary"} + }, + "SystemVersionSummary":{ + "type":"structure", + "members":{ + "giVersion":{ + "shape":"String", + "documentation":"

        The version of GI software.

        " + }, + "shape":{ + "shape":"String", + "documentation":"

        The Exadata hardware model.

        " + }, + "systemVersions":{ + "shape":"StringList", + "documentation":"

        The Exadata system versions that are compatible with the specified Exadata shape and GI version.

        " + } + }, + "documentation":"

        Information about the compatible system versions that can be used with a specific Exadata shape and Grid Infrastructure (GI) version.

        " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the resource to apply tags to.

        " + }, + "tags":{ + "shape":"RequestTagMap", + "documentation":"

        The list of tags to apply to the resource.

        " + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{} + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

        The number of seconds to wait before retrying the request after being throttled.

        " + } + }, + "documentation":"

        The request was denied due to request throttling.

        ", + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

        The Amazon Resource Name (ARN) of the resource to remove tags from.

        " + }, + "tagKeys":{ + "shape":"TagKeys", + "documentation":"

        The names (keys) of the tags to remove from the resource.

        " + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{} + }, + "UpdateCloudExadataInfrastructureInput":{ + "type":"structure", + "required":["cloudExadataInfrastructureId"], + "members":{ + "cloudExadataInfrastructureId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the Exadata infrastructure to update.

        " + }, + "maintenanceWindow":{"shape":"MaintenanceWindow"} + } + }, + "UpdateCloudExadataInfrastructureOutput":{ + "type":"structure", + "required":["cloudExadataInfrastructureId"], + "members":{ + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name of the updated Exadata infrastructure.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the Exadata infrastructure after the update operation.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the Exadata infrastructure after the update operation.

        " + }, + "cloudExadataInfrastructureId":{ + "shape":"String", + "documentation":"

        The unique identifier of the updated Exadata infrastructure.

        " + } + } + }, + "UpdateOdbNetworkInput":{ + "type":"structure", + "required":["odbNetworkId"], + "members":{ + "odbNetworkId":{ + "shape":"ResourceIdOrArn", + "documentation":"

        The unique identifier of the ODB network to update.

        " + }, + "displayName":{ + "shape":"ResourceDisplayName", + "documentation":"

        The new user-friendly name of the ODB network.

        " + }, + "peeredCidrsToBeAdded":{ + "shape":"StringList", + "documentation":"

        The list of CIDR ranges from the peered VPC that allow access to the ODB network.

        " + }, + "peeredCidrsToBeRemoved":{ + "shape":"StringList", + "documentation":"

        The list of CIDR ranges from the peered VPC to remove from the ODB network.

        " + }, + "s3Access":{ + "shape":"Access", + "documentation":"

        Specifies the updated configuration for Amazon S3 access from the ODB network.

        " + }, + "zeroEtlAccess":{ + "shape":"Access", + "documentation":"

        Specifies the updated configuration for Zero-ETL access from the ODB network.

        " + }, + "s3PolicyDocument":{ + "shape":"PolicyDocument", + "documentation":"

        Specifies the updated endpoint policy for Amazon S3 access from the ODB network.

        " + } + } + }, + "UpdateOdbNetworkOutput":{ + "type":"structure", + "required":["odbNetworkId"], + "members":{ + "displayName":{ + "shape":"String", + "documentation":"

        The user-friendly name of the ODB network.

        " + }, + "status":{ + "shape":"ResourceStatus", + "documentation":"

        The current status of the ODB network.

        " + }, + "statusReason":{ + "shape":"String", + "documentation":"

        Additional information about the status of the ODB network.

        " + }, + "odbNetworkId":{ + "shape":"String", + "documentation":"

        The unique identifier of the ODB network.

        " + } + } + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "message":{"shape":"String"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

        The reason why the validation failed.

        " + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

        A list of fields that failed validation.

        " + } + }, + "documentation":"

        The request has failed validation because it is missing required fields or has invalid inputs.

        ", + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

        The field name for which validation failed.

        " + }, + "message":{ + "shape":"String", + "documentation":"

        The description of the error.

        " + } + }, + "documentation":"

        The input failed to meet the constraints specified by the service in a specified field. Make sure you provided the correct input and try again.

        " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "other" + ] + }, + "VpcEndpointType":{ + "type":"string", + "enum":["SERVICENETWORK"] + }, + "WeeksOfMonth":{ + "type":"list", + "member":{"shape":"Integer"} + }, + "ZeroEtlAccess":{ + "type":"structure", + "members":{ + "status":{ + "shape":"ManagedResourceStatus", + "documentation":"

        The status of the Zero-ETL access.

        Valid Values: enabled | disabled

        " + }, + "cidr":{ + "shape":"String", + "documentation":"

        The CIDR block for the Zero-ETL access.

        " + } + }, + "documentation":"

        The configuration for Zero-ETL access from the ODB network.

        " + } + }, + "documentation":"

        Oracle Database@Amazon Web Services is an offering that enables you to access Oracle Exadata infrastructure managed by Oracle Cloud Infrastructure (OCI) inside Amazon Web Services data centers. You can migrate your Oracle Exadata workloads, establish low-latency connectivity with applications running on Amazon Web Services, and integrate with Amazon Web Services services. For example, you can run application servers in a virtual private cloud (VPC) and access an Oracle Exadata system running in Oracle Database@Amazon Web Services. You can get started with Oracle Database@Amazon Web Services by using the familiar Amazon Web Services Management Console, APIs, or CLI.

        This interface reference for Oracle Database@Amazon Web Services contains documentation for a programming or command line interface that you can use to manage Oracle Database@Amazon Web Services. Oracle Database@Amazon Web Services is asynchronous, which means that some interfaces might require techniques such as polling or callback functions to determine when a command has been applied. The reference structure is as follows.

        In this preview release documentation, the links in the \"See Also\" sections do not work.

        Oracle Database@Amazon Web Services API Reference

        • For the alphabetical list of API actions, see .

        • For the alphabetical list of data types, see .

        • For a list of common parameters, see CommonParameters.

        • For descriptions of the error codes, see CommonErrors.

        " +} diff --git a/services/odb/src/main/resources/codegen-resources/waiters-2.json b/services/odb/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/odb/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/omics/pom.xml b/services/omics/pom.xml index 608a9b99b881..f3429edf9ca1 100644 --- a/services/omics/pom.xml +++ b/services/omics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT omics AWS Java SDK :: Services :: Omics diff --git a/services/omics/src/main/resources/codegen-resources/customization.config b/services/omics/src/main/resources/codegen-resources/customization.config index 2880fc39d3a3..cdf857bdc287 100644 --- a/services/omics/src/main/resources/codegen-resources/customization.config +++ b/services/omics/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/opensearch/pom.xml b/services/opensearch/pom.xml index 82e6a06cdeba..bd33174b70b9 100644 --- a/services/opensearch/pom.xml +++ b/services/opensearch/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT opensearch AWS Java SDK :: Services :: Open Search diff --git a/services/opensearch/src/main/resources/codegen-resources/customization.config b/services/opensearch/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/opensearch/src/main/resources/codegen-resources/customization.config +++ b/services/opensearch/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/opensearchserverless/pom.xml b/services/opensearchserverless/pom.xml index e2cf6827747c..61c070fe7205 100644 --- a/services/opensearchserverless/pom.xml +++ b/services/opensearchserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT opensearchserverless AWS Java SDK :: Services :: Open Search Serverless diff --git a/services/opensearchserverless/src/main/resources/codegen-resources/customization.config b/services/opensearchserverless/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/opensearchserverless/src/main/resources/codegen-resources/customization.config +++ b/services/opensearchserverless/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/opsworks/pom.xml b/services/opsworks/pom.xml index eed100d1933d..e52346465894 100644 --- a/services/opsworks/pom.xml +++ b/services/opsworks/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT opsworks AWS Java SDK :: Services :: AWS OpsWorks diff --git a/services/opsworks/src/main/resources/codegen-resources/customization.config b/services/opsworks/src/main/resources/codegen-resources/customization.config index af24625f1822..9b21e4466998 100644 --- a/services/opsworks/src/main/resources/codegen-resources/customization.config +++ b/services/opsworks/src/main/resources/codegen-resources/customization.config @@ -21,6 +21,5 @@ "describeRaidArrays", "describeVolumes" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/opsworkscm/pom.xml b/services/opsworkscm/pom.xml index c2a4e452a475..7252d87245ff 100644 --- a/services/opsworkscm/pom.xml +++ b/services/opsworkscm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT opsworkscm AWS Java SDK :: Services :: AWS OpsWorks for Chef Automate diff --git a/services/opsworkscm/src/main/resources/codegen-resources/customization.config b/services/opsworkscm/src/main/resources/codegen-resources/customization.config index 421a6292883f..d291200bbd79 100644 --- a/services/opsworkscm/src/main/resources/codegen-resources/customization.config +++ b/services/opsworkscm/src/main/resources/codegen-resources/customization.config @@ -4,6 +4,5 @@ "describeBackups", "describeServers" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/organizations/pom.xml b/services/organizations/pom.xml index 1ad22b470174..596f45f0591c 100644 --- a/services/organizations/pom.xml +++ b/services/organizations/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT organizations AWS Java SDK :: Services :: AWS Organizations diff --git a/services/organizations/src/main/resources/codegen-resources/customization.config b/services/organizations/src/main/resources/codegen-resources/customization.config index 4f9c7b4f9733..cf0472a9f85f 100644 --- a/services/organizations/src/main/resources/codegen-resources/customization.config +++ b/services/organizations/src/main/resources/codegen-resources/customization.config @@ -12,6 +12,5 @@ "listRoots", "listAWSServiceAccessForOrganization" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/organizations/src/main/resources/codegen-resources/service-2.json b/services/organizations/src/main/resources/codegen-resources/service-2.json index f0013079980d..8ce86e56c315 100644 --- a/services/organizations/src/main/resources/codegen-resources/service-2.json +++ b/services/organizations/src/main/resources/codegen-resources/service-2.json @@ -60,7 +60,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

        Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type:

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type:

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "CancelHandshake":{ "name":"CancelHandshake", @@ -208,7 +208,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

        Creates a policy of a specified type that you can attach to a root, an organizational unit (OU), or an individual Amazon Web Services account.

        For more information about policies and their use, see Managing Organizations policies.

        If the request includes tags, then the requester must have the organizations:TagResource permission.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Creates a policy of a specified type that you can attach to a root, an organizational unit (OU), or an individual Amazon Web Services account.

        For more information about policies and their use, see Managing Organizations policies.

        If the request includes tags, then the requester must have the organizations:TagResource permission.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "DeclineHandshake":{ "name":"DeclineHandshake", @@ -284,7 +284,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

        Deletes the specified policy from your organization. Before you perform this operation, you must first detach the policy from all organizational units (OUs), roots, and accounts.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Deletes the specified policy from your organization. Before you perform this operation, you must first detach the policy from all organizational units (OUs), roots, and accounts.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "DeleteResourcePolicy":{ "name":"DeleteResourcePolicy", @@ -302,7 +302,7 @@ {"shape":"AWSOrganizationsNotInUseException"}, {"shape":"ResourcePolicyNotFoundException"} ], - "documentation":"

        Deletes the resource policy from your organization.

        You can only call this operation from the organization's management account.

        " + "documentation":"

        Deletes the resource policy from your organization.

        This operation can be called only from the organization's management account.

        " }, "DeregisterDelegatedAdministrator":{ "name":"DeregisterDelegatedAdministrator", @@ -341,7 +341,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Retrieves Organizations-related information about the specified account.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Retrieves Organizations-related information about the specified account.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "DescribeCreateAccountStatus":{ "name":"DescribeCreateAccountStatus", @@ -360,7 +360,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

        Retrieves the current status of an asynchronous request to create an account.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Retrieves the current status of an asynchronous request to create an account.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "DescribeEffectivePolicy":{ "name":"DescribeEffectivePolicy", @@ -433,7 +433,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Retrieves information about an organizational unit (OU).

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Retrieves information about an organizational unit (OU).

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "DescribePolicy":{ "name":"DescribePolicy", @@ -452,7 +452,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

        Retrieves information about a policy.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Retrieves information about a policy.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "DescribeResourcePolicy":{ "name":"DescribeResourcePolicy", @@ -470,7 +470,7 @@ {"shape":"ResourcePolicyNotFoundException"}, {"shape":"ConstraintViolationException"} ], - "documentation":"

        Retrieves information about a resource policy.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Retrieves information about a resource policy.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "DetachPolicy":{ "name":"DetachPolicy", @@ -493,7 +493,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

        Detaches a policy from a target root, organizational unit (OU), or account.

        If the policy being detached is a service control policy (SCP), the changes to permissions for Identity and Access Management (IAM) users and roles in affected accounts are immediate.

        Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with an SCP that limits the permissions that can be delegated, you must attach the replacement SCP before you can remove the default SCP. This is the authorization strategy of an \"allow list\". If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of a \"deny list\".

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Detaches a policy from a target root, organizational unit (OU), or account.

        If the policy being detached is a service control policy (SCP), the changes to permissions for Identity and Access Management (IAM) users and roles in affected accounts are immediate.

        Every root, OU, and account must have at least one SCP attached. If you want to replace the default FullAWSAccess policy with an SCP that limits the permissions that can be delegated, you must attach the replacement SCP before you can remove the default SCP. This is the authorization strategy of an \"allow list\". If you instead attach a second SCP and leave the FullAWSAccess SCP still attached, and specify \"Effect\": \"Deny\" in the second SCP to override the \"Effect\": \"Allow\" in the FullAWSAccess policy (or any other attached SCP), you're using the authorization strategy of a \"deny list\".

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "DisableAWSServiceAccess":{ "name":"DisableAWSServiceAccess", @@ -535,7 +535,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

        Disables an organizational policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

        This is an asynchronous request that Amazon Web Services performs in the background. If you disable a policy type for a root, it still appears enabled for the organization if all features are enabled for the organization. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        To view the status of available policy types in the organization, use DescribeOrganization.

        " + "documentation":"

        Disables an organizational policy type in a root. A policy of a certain type can be attached to entities in a root only if that type is enabled in the root. After you perform this operation, you no longer can attach policies of the specified type to that root or to any organizational unit (OU) or account in that root. You can undo this by using the EnablePolicyType operation.

        This is an asynchronous request that Amazon Web Services performs in the background. If you disable a policy type for a root, it still appears enabled for the organization if all features are enabled for the organization. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        To view the status of available policy types in the organization, use DescribeOrganization.

        " }, "EnableAWSServiceAccess":{ "name":"EnableAWSServiceAccess", @@ -554,7 +554,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

        Provides an Amazon Web Services service (the service that is specified by ServicePrincipal) with permissions to view the structure of an organization, create a service-linked role in all the accounts in the organization, and allow the service to perform operations on behalf of the organization and its accounts. Establishing these permissions can be a first step in enabling the integration of an Amazon Web Services service with Organizations.

        We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service.

        For more information about enabling services to integrate with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide.

        You can only call this operation from the organization's management account and only if the organization has enabled all features.

        " + "documentation":"

        Provides an Amazon Web Services service (the service that is specified by ServicePrincipal) with permissions to view the structure of an organization, create a service-linked role in all the accounts in the organization, and allow the service to perform operations on behalf of the organization and its accounts. Establishing these permissions can be a first step in enabling the integration of an Amazon Web Services service with Organizations.

        We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service.

        For more information about enabling services to integrate with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide.

        This operation can be called only from the organization's management account.

        " }, "EnableAllFeatures":{ "name":"EnableAllFeatures", @@ -598,7 +598,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

        Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

        This is an asynchronous request that Amazon Web Services performs in the background. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization.

        " + "documentation":"

        Enables a policy type in a root. After you enable a policy type in a root, you can attach policies of that type to the root, any organizational unit (OU), or account in that root. You can undo this by using the DisablePolicyType operation.

        This is an asynchronous request that Amazon Web Services performs in the background. Amazon Web Services recommends that you first use ListRoots to see the status of policy types for a specified root, and then use this operation.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization.

        " }, "InviteAccountToOrganization":{ "name":"InviteAccountToOrganization", @@ -621,7 +621,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Sends an invitation to another account to join your organization as a member account. Organizations sends email on your behalf to the email address that is associated with the other account's owner. The invitation is implemented as a Handshake whose details are in the response.

        • You can invite Amazon Web Services accounts only from the same seller as the management account. For example, if your organization's management account was created by Amazon Internet Services Pvt. Ltd (AISPL), an Amazon Web Services seller in India, you can invite only other AISPL accounts to your organization. You can't combine accounts from AISPL and Amazon Web Services or from any other Amazon Web Services seller. For more information, see Consolidated billing in India.

        • If you receive an exception that indicates that you exceeded your account limits for the organization or that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists after an hour, contact Amazon Web Services Support.

        If the request includes tags, then the requester must have the organizations:TagResource permission.

        This operation can be called only from the organization's management account.

        " + "documentation":"

        Sends an invitation to another account to join your organization as a member account. Organizations sends email on your behalf to the email address that is associated with the other account's owner. The invitation is implemented as a Handshake whose details are in the response.

        If you receive an exception that indicates that you exceeded your account limits for the organization or that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists after an hour, contact Amazon Web Services Support.

        If the request includes tags, then the requester must have the organizations:TagResource permission.

        This operation can be called only from the organization's management account.

        " }, "LeaveOrganization":{ "name":"LeaveOrganization", @@ -640,7 +640,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead.

        This operation can be called only from a member account in the organization.

        • The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization.

        • You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear.

          • Choose a support plan

          • Provide and verify the required contact information

          • Provide a current payment method

          Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services activity that occurs while the account isn't attached to an organization. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

        • The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization.

        • You can leave an organization only after you enable IAM user access to billing in your account. For more information, see About IAM access to the Billing and Cost Management console in the Amazon Web Services Billing and Cost Management User Guide.

        • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags.

        • A newly created account has a waiting period before it can be removed from its organization. You must wait until at least seven days after the account was created. Invited accounts aren't subject to this waiting period.

        • If you are using an organization principal to call LeaveOrganization across multiple accounts, you can only do this up to 5 accounts per second in a single organization.

        " + "documentation":"

        Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead.

        This operation can be called only from a member account in the organization.

        • The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization.

        • You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear.

          • Choose a support plan

          • Provide and verify the required contact information

          • Provide a current payment method

          Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services activity that occurs while the account isn't attached to an organization. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

        • The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization.

        • After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags.

        • A newly created account has a waiting period before it can be removed from its organization. You must wait until at least seven days after the account was created. Invited accounts aren't subject to this waiting period.

        • If you are using an organization principal to call LeaveOrganization across multiple accounts, you can only do this up to 5 accounts per second in a single organization.

        " }, "ListAWSServiceAccessForOrganization":{ "name":"ListAWSServiceAccessForOrganization", @@ -659,7 +659,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

        Returns a list of the Amazon Web Services services that you enabled to integrate with your organization. After a service on this list creates the resources that it requires for the integration, it can perform operations on your organization and its accounts.

        For more information about integrating other services with Organizations, including the list of services that currently work with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Returns a list of the Amazon Web Services services that you enabled to integrate with your organization. After a service on this list creates the resources that it requires for the integration, it can perform operations on your organization and its accounts.

        For more information about integrating other services with Organizations, including the list of services that currently work with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "ListAccounts":{ "name":"ListAccounts", @@ -676,7 +676,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Lists all the accounts in the organization. To request only the accounts in a specified root or organizational unit (OU), use the ListAccountsForParent operation instead.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Lists all the accounts in the organization. To request only the accounts in a specified root or organizational unit (OU), use the ListAccountsForParent operation instead.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "ListAccountsForParent":{ "name":"ListAccountsForParent", @@ -694,7 +694,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Lists the accounts in an organization that are contained by the specified target root or organizational unit (OU). If you specify the root, you get a list of all the accounts that aren't in any OU. If you specify an OU, you get a list of all the accounts in only that OU and not in any child OUs. To get a list of all accounts in the organization, use the ListAccounts operation.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Lists the accounts in an organization that are contained by the specified target root or organizational unit (OU). If you specify the root, you get a list of all the accounts that aren't in any OU. If you specify an OU, you get a list of all the accounts in only that OU and not in any child OUs. To get a list of all accounts in the organization, use the ListAccounts operation.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "ListChildren":{ "name":"ListChildren", @@ -712,7 +712,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Lists all of the organizational units (OUs) or accounts that are contained in the specified parent OU or root. This operation, along with ListParents enables you to traverse the tree structure that makes up this root.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Lists all of the organizational units (OUs) or accounts that are contained in the specified parent OU or root. This operation, along with ListParents enables you to traverse the tree structure that makes up this root.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "ListCreateAccountStatus":{ "name":"ListCreateAccountStatus", @@ -730,7 +730,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

        Lists the account creation requests that match the specified status that is currently being tracked for the organization.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Lists the account creation requests that match the specified status that is currently being tracked for the organization.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "ListDelegatedAdministrators":{ "name":"ListDelegatedAdministrators", @@ -749,7 +749,7 @@ {"shape":"ServiceException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

        Lists the Amazon Web Services accounts that are designated as delegated administrators in this organization.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Lists the Amazon Web Services accounts that are designated as delegated administrators in this organization.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "ListDelegatedServicesForAccount":{ "name":"ListDelegatedServicesForAccount", @@ -770,7 +770,7 @@ {"shape":"ServiceException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

        List the Amazon Web Services services for which the specified account is a delegated administrator.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        List the Amazon Web Services services for which the specified account is a delegated administrator.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "ListHandshakesForAccount":{ "name":"ListHandshakesForAccount", @@ -805,7 +805,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Lists the handshakes that are associated with the organization that the requesting user is part of. The ListHandshakesForOrganization operation returns a list of handshake structures. Each structure contains details and status about a handshake.

        Handshakes that are ACCEPTED, DECLINED, CANCELED, or EXPIRED appear in the results of this API for only 30 days after changing to that state. After that, they're deleted and no longer accessible.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Lists the handshakes that are associated with the organization that the requesting user is part of. The ListHandshakesForOrganization operation returns a list of handshake structures. Each structure contains details and status about a handshake.

        Handshakes that are ACCEPTED, DECLINED, CANCELED, or EXPIRED appear in the results of this API for only 30 days after changing to that state. After that, they're deleted and no longer accessible.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "ListOrganizationalUnitsForParent":{ "name":"ListOrganizationalUnitsForParent", @@ -823,7 +823,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Lists the organizational units (OUs) in a parent organizational unit or root.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Lists the organizational units (OUs) in a parent organizational unit or root.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "ListParents":{ "name":"ListParents", @@ -841,7 +841,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Lists the root or organizational units (OUs) that serve as the immediate parent of the specified child OU or account. This operation, along with ListChildren enables you to traverse the tree structure that makes up this root.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        In the current release, a child can have only a single parent.

        " + "documentation":"

        Lists the root or organizational units (OUs) that serve as the immediate parent of the specified child OU or account. This operation, along with ListChildren enables you to traverse the tree structure that makes up this root.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        In the current release, a child can have only a single parent.

        " }, "ListPolicies":{ "name":"ListPolicies", @@ -859,7 +859,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

        Retrieves the list of all policies in an organization of a specified type.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Retrieves the list of all policies in an organization of a specified type.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "ListPoliciesForTarget":{ "name":"ListPoliciesForTarget", @@ -878,7 +878,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

        Lists the policies that are directly attached to the specified target root, organizational unit (OU), or account. You must specify the policy type that you want included in the returned list.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Lists the policies that are directly attached to the specified target root, organizational unit (OU), or account. You must specify the policy type that you want included in the returned list.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "ListRoots":{ "name":"ListRoots", @@ -895,7 +895,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Lists the roots that are defined in the current organization.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        Policy types can be enabled and disabled in roots. This is distinct from whether they're available in the organization. When you enable all features, you make policy types available for use in that organization. Individual policy types can then be enabled and disabled in a root. To see the availability of a policy type in an organization, use DescribeOrganization.

        " + "documentation":"

        Lists the roots that are defined in the current organization.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        Policy types can be enabled and disabled in roots. This is distinct from whether they're available in the organization. When you enable all features, you make policy types available for use in that organization. Individual policy types can then be enabled and disabled in a root. To see the availability of a policy type in an organization, use DescribeOrganization.

        " }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -913,7 +913,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Lists tags that are attached to the specified resource.

        You can attach tags to the following resources in Organizations.

        • Amazon Web Services account

        • Organization root

        • Organizational unit (OU)

        • Policy (any type)

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Lists tags that are attached to the specified resource.

        You can attach tags to the following resources in Organizations.

        • Amazon Web Services account

        • Organization root

        • Organizational unit (OU)

        • Policy (any type)

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "ListTargetsForPolicy":{ "name":"ListTargetsForPolicy", @@ -932,7 +932,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

        Lists all the roots, organizational units (OUs), and accounts that the specified policy is attached to.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Lists all the roots, organizational units (OUs), and accounts that the specified policy is attached to.

        Always check the NextToken response parameter for a null value when calling a List* operation. These operations can occasionally return an empty set of results even when there are more results available. The NextToken response parameter value is null only when there are no more results to display.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "MoveAccount":{ "name":"MoveAccount", @@ -973,7 +973,7 @@ {"shape":"ConstraintViolationException"}, {"shape":"AWSOrganizationsNotInUseException"} ], - "documentation":"

        Creates or updates a resource policy.

        You can only call this operation from the organization's management account.

        " + "documentation":"

        Creates or updates a resource policy.

        This operation can be called only from the organization's management account..

        " }, "RegisterDelegatedAdministrator":{ "name":"RegisterDelegatedAdministrator", @@ -1033,7 +1033,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Adds one or more tags to the specified resource.

        Currently, you can attach tags to the following resources in Organizations.

        • Amazon Web Services account

        • Organization root

        • Organizational unit (OU)

        • Policy (any type)

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Adds one or more tags to the specified resource.

        Currently, you can attach tags to the following resources in Organizations.

        • Amazon Web Services account

        • Organization root

        • Organizational unit (OU)

        • Policy (any type)

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "UntagResource":{ "name":"UntagResource", @@ -1052,7 +1052,7 @@ {"shape":"ServiceException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

        Removes any tags with the specified keys from the specified resource.

        You can attach tags to the following resources in Organizations.

        • Amazon Web Services account

        • Organization root

        • Organizational unit (OU)

        • Policy (any type)

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Removes any tags with the specified keys from the specified resource.

        You can attach tags to the following resources in Organizations.

        • Amazon Web Services account

        • Organization root

        • Organizational unit (OU)

        • Policy (any type)

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " }, "UpdateOrganizationalUnit":{ "name":"UpdateOrganizationalUnit", @@ -1096,7 +1096,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

        Updates an existing policy with a new name, description, or content. If you don't supply any parameter, that value remains unchanged. You can't change a policy's type.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator for an Amazon Web Services service.

        " + "documentation":"

        Updates an existing policy with a new name, description, or content. If you don't supply any parameter, that value remains unchanged. You can't change a policy's type.

        This operation can be called only from the organization's management account or by a member account that is a delegated administrator.

        " } }, "shapes":{ @@ -1381,7 +1381,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"ConstraintViolationExceptionReason"} }, - "documentation":"

        Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

        Some of the reasons in the following list might not be applicable to this specific API or operation.

        • ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself.

        • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the Organizations User Guide.

        • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

        • ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your account isn't fully active. You must complete the account setup before you create an organization.

        • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to request an increase in your limit.

          Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact Amazon Web Services Support to request an increase in the number of accounts.

          Deleted and closed accounts still count toward your limit.

          If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact Amazon Web Services Support.

        • ALL_FEATURES_MIGRATION_ORGANIZATION_SIZE_LIMIT_EXCEEDED: Your organization has more than 5000 accounts, and you can only use the standard migration process for organizations with less than 5000 accounts. Use the assisted migration process to enable all features mode, or create a support case for assistance if you are unable to use assisted migration.

        • CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot register a suspended account as a delegated administrator.

        • CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an Amazon Web Services service integrated with Organizations. You can designate only a member account as a delegated administrator.

        • CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management account. To close the management account for the organization, you must first either remove or close all member accounts in the organization. Follow standard account closure process using root credentials.​

        • CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator.

        • CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the past 30 days.

        • CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can close at a time. ​

        • CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode.

        • DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service.

        • EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code.

        • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

        • INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported payment method is associated with the account. Amazon Web Services does not support cards issued by financial institutions in Russia or Belarus. For more information, see Managing your Amazon Web Services payments.

        • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. All accounts in an organization must be associated with the same marketplace.

        • MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services Regions in China. To create an organization, the master must have a valid business license. For more information, contact customer support.

        • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again.

        • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the Amazon Web Services GovCloud (US-West) Region. For more information, see Organizations in the Amazon Web Services GovCloud User Guide.

        • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

        • MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal.

        • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

        • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

        • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

        • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

        • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

        • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

        • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

        • POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size.

        • POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization.

        • SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated administrator before you enabled service access. Call the EnableAWSServiceAccess API first.

        • TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account.

        • WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, you must wait until at least seven days after the account was created. Invited accounts aren't subject to this waiting period.

        ", + "documentation":"

        Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

        Some of the reasons in the following list might not be applicable to this specific API or operation.

        • ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself.

        • ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the Organizations User Guide.

        • ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day.

        • ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your account isn't fully active. You must complete the account setup before you create an organization.

        • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to request an increase in your limit.

          Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact Amazon Web Services Support to request an increase in the number of accounts.

          Deleted and closed accounts still count toward your limit.

          If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact Amazon Web Services Support.

        • ALL_FEATURES_MIGRATION_ORGANIZATION_SIZE_LIMIT_EXCEEDED: Your organization has more than 5000 accounts, and you can only use the standard migration process for organizations with less than 5000 accounts. Use the assisted migration process to enable all features mode, or create a support case for assistance if you are unable to use assisted migration.

        • CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot register a suspended account as a delegated administrator.

        • CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an Amazon Web Services service integrated with Organizations. You can designate only a member account as a delegated administrator.

        • CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management account. To close the management account for the organization, you must first either remove or close all member accounts in the organization. Follow standard account closure process using root credentials.​

        • CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator.

        • CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the past 30 days.

        • CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can close at a time. ​

        • CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode.

        • DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service.

        • EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code.

        • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

        • INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported payment method is associated with the account. Amazon Web Services does not support cards issued by financial institutions in Russia or Belarus. For more information, see Managing your Amazon Web Services payments.

        • MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. All accounts in an organization must be associated with the same marketplace.

        • MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services Regions in China. To create an organization, the master must have a valid business license. For more information, contact customer support.

        • MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again.

        • MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the Amazon Web Services GovCloud (US-West) Region. For more information, see Organizations in the Amazon Web Services GovCloud User Guide.

        • MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

        • MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal.

        • MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time.

        • MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource.

        • MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide.

        • MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required.

        • ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation.

        • OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep.

        • OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization.

        • POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size.

        • POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization.

        • POLICY_TYPE_ENABLED_FOR_THIS_SERVICE: You attempted to disable service access before you disabled the policy type (for example, SECURITYHUB_POLICY). To complete this operation, you must first disable the policy type.

        • SERVICE_ACCESS_NOT_ENABLED:

          • You attempted to register a delegated administrator before you enabled service access. Call the EnableAWSServiceAccess API first.

          • You attempted to enable a policy type before you enabled service access. Call the EnableAWSServiceAccess API first.

        • TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account.

        • WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, you must wait until at least seven days after the account was created. Invited accounts aren't subject to this waiting period.

        ", "exception":true }, "ConstraintViolationExceptionReason":{ @@ -1414,6 +1414,7 @@ "CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR", "CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG", "DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE", + "POLICY_TYPE_ENABLED_FOR_THIS_SERVICE", "MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE", "CANNOT_CLOSE_MANAGEMENT_ACCOUNT", "CLOSE_ACCOUNT_QUOTA_EXCEEDED", @@ -1663,7 +1664,7 @@ }, "Type":{ "shape":"PolicyType", - "documentation":"

        The type of policy to create. You can specify one of the following values:

        " + "documentation":"

        The type of policy to create. You can specify one of the following values:

        " }, "Tags":{ "shape":"Tags", @@ -1840,7 +1841,7 @@ "members":{ "PolicyType":{ "shape":"EffectivePolicyType", - "documentation":"

        The type of policy that you want information about. You can specify one of the following values:

        " + "documentation":"

        The type of policy that you want information about. You can specify one of the following values:

        " }, "TargetId":{ "shape":"PolicyTargetId", @@ -1980,7 +1981,7 @@ }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

        The policy type that you want to disable in this root. You can specify one of the following values:

        " + "documentation":"

        The policy type that you want to disable in this root. You can specify one of the following values:

        " } } }, @@ -2070,7 +2071,8 @@ "BACKUP_POLICY", "AISERVICES_OPT_OUT_POLICY", "CHATBOT_POLICY", - "DECLARATIVE_POLICY_EC2" + "DECLARATIVE_POLICY_EC2", + "SECURITYHUB_POLICY" ] }, "Email":{ @@ -2092,8 +2094,7 @@ }, "EnableAllFeaturesRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "EnableAllFeaturesResponse":{ "type":"structure", @@ -2117,7 +2118,7 @@ }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

        The policy type that you want to enable. You can specify one of the following values:

        " + "documentation":"

        The policy type that you want to enable. You can specify one of the following values:

        " } } }, @@ -2218,7 +2219,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"HandshakeConstraintViolationExceptionReason"} }, - "documentation":"

        The requested operation would violate the constraint identified in the reason code.

        Some of the reasons in the following list might not be applicable to this specific API or operation:

        • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. Note that deleted and closed accounts still count toward your limit.

          If you get this exception immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact Amazon Web Services Support.

        • ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because the invited account is already a member of an organization.

        • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

        • INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You can't issue new invitations to join an organization while it's in the process of enabling all features. You can resume inviting accounts after you finalize the process when all accounts have agreed to the change.

        • ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid because the organization has already enabled all features.

        • ORGANIZATION_IS_ALREADY_PENDING_ALL_FEATURES_MIGRATION: The handshake request is invalid because the organization has already started the process to enable all features.

        • ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because the account is from a different marketplace than the accounts in the organization. For example, accounts with India addresses must be associated with the AISPL marketplace. All accounts in an organization must be from the same marketplace.

        • ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to change the membership of an account too quickly after its previous change.

        • PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an account that doesn't have a payment instrument, such as a credit card, associated with it.

        ", + "documentation":"

        The requested operation would violate the constraint identified in the reason code.

        Some of the reasons in the following list might not be applicable to this specific API or operation:

        • ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. Note that deleted and closed accounts still count toward your limit.

          If you get this exception immediately after creating the organization, wait one hour and try again. If after an hour it continues to fail with this error, contact Amazon Web Services Support.

        • ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because the invited account is already a member of an organization.

        • HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day.

        • INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You can't issue new invitations to join an organization while it's in the process of enabling all features. You can resume inviting accounts after you finalize the process when all accounts have agreed to the change.

        • ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid because the organization has already enabled all features.

        • ORGANIZATION_IS_ALREADY_PENDING_ALL_FEATURES_MIGRATION: The handshake request is invalid because the organization has already started the process to enable all features.

        • ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because the account is from a different marketplace than the accounts in the organization.

        • ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You attempted to change the membership of an account too quickly after its previous change.

        • PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an account that doesn't have a payment instrument, such as a credit card, associated with it.

        ", "exception":true }, "HandshakeConstraintViolationExceptionReason":{ @@ -2793,7 +2794,7 @@ }, "Filter":{ "shape":"PolicyType", - "documentation":"

        The type of policy that you want to include in the returned list. You must specify one of the following values:

        " + "documentation":"

        The type of policy that you want to include in the returned list. You must specify one of the following values:

        " }, "NextToken":{ "shape":"NextToken", @@ -2824,7 +2825,7 @@ "members":{ "Filter":{ "shape":"PolicyType", - "documentation":"

        Specifies the type of policy that you want to include in the response. You must specify one of the following values:

        " + "documentation":"

        Specifies the type of policy that you want to include in the response. You must specify one of the following values:

        " }, "NextToken":{ "shape":"NextToken", @@ -3275,7 +3276,8 @@ "BACKUP_POLICY", "AISERVICES_OPT_OUT_POLICY", "CHATBOT_POLICY", - "DECLARATIVE_POLICY_EC2" + "DECLARATIVE_POLICY_EC2", + "SECURITYHUB_POLICY" ] }, "PolicyTypeAlreadyEnabledException":{ diff --git a/services/osis/pom.xml b/services/osis/pom.xml index 0418d1638548..0ce05fb40680 100644 --- a/services/osis/pom.xml +++ b/services/osis/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT osis AWS Java SDK :: Services :: OSIS diff --git a/services/osis/src/main/resources/codegen-resources/customization.config b/services/osis/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/osis/src/main/resources/codegen-resources/customization.config +++ b/services/osis/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/outposts/pom.xml b/services/outposts/pom.xml index 951c1e9b5c1e..9ca0531b1230 100644 --- a/services/outposts/pom.xml +++ b/services/outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT outposts AWS Java SDK :: Services :: Outposts diff --git a/services/outposts/src/main/resources/codegen-resources/customization.config b/services/outposts/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/outposts/src/main/resources/codegen-resources/customization.config +++ b/services/outposts/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/outposts/src/main/resources/codegen-resources/service-2.json b/services/outposts/src/main/resources/codegen-resources/service-2.json index 9631f5d0c9f2..f96140532f08 100644 --- a/services/outposts/src/main/resources/codegen-resources/service-2.json +++ b/services/outposts/src/main/resources/codegen-resources/service-2.json @@ -589,6 +589,8 @@ "Address":{ "type":"structure", "required":[ + "ContactName", + "ContactPhoneNumber", "AddressLine1", "City", "StateOrRegion", @@ -868,8 +870,7 @@ }, "CancelCapacityTaskOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "CancelOrderInput":{ "type":"structure", @@ -885,8 +886,7 @@ }, "CancelOrderOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "CapacityTaskFailure":{ "type":"structure", @@ -1278,8 +1278,7 @@ }, "DeleteOutpostOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteSiteInput":{ "type":"structure", @@ -1295,8 +1294,7 @@ }, "DeleteSiteOutput":{ "type":"structure", - "members":{ - } + "members":{} }, "DeviceSerialNumber":{ "type":"string", @@ -2937,8 +2935,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -2994,8 +2991,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateOutpostInput":{ "type":"structure", diff --git a/services/panorama/pom.xml b/services/panorama/pom.xml index 22b8e8dfb1e0..0db4a60aeb6d 100644 --- a/services/panorama/pom.xml +++ b/services/panorama/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT panorama AWS Java SDK :: Services :: Panorama diff --git a/services/panorama/src/main/resources/codegen-resources/customization.config b/services/panorama/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/panorama/src/main/resources/codegen-resources/customization.config +++ b/services/panorama/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/partnercentralselling/pom.xml b/services/partnercentralselling/pom.xml index 7c61dff1564d..eac6509c5236 100644 --- a/services/partnercentralselling/pom.xml +++ b/services/partnercentralselling/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT partnercentralselling AWS Java SDK :: Services :: Partner Central Selling diff --git a/services/partnercentralselling/src/main/resources/codegen-resources/customization.config b/services/partnercentralselling/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/partnercentralselling/src/main/resources/codegen-resources/customization.config +++ b/services/partnercentralselling/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/partnercentralselling/src/main/resources/codegen-resources/service-2.json b/services/partnercentralselling/src/main/resources/codegen-resources/service-2.json index 02f9b793bdef..587392971a15 100644 --- a/services/partnercentralselling/src/main/resources/codegen-resources/service-2.json +++ b/services/partnercentralselling/src/main/resources/codegen-resources/service-2.json @@ -23,8 +23,8 @@ }, "input":{"shape":"AcceptEngagementInvitationRequest"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, @@ -73,8 +73,8 @@ "input":{"shape":"CreateEngagementRequest"}, "output":{"shape":"CreateEngagementResponse"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, @@ -92,8 +92,8 @@ "input":{"shape":"CreateEngagementInvitationRequest"}, "output":{"shape":"CreateEngagementInvitationResponse"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, @@ -111,8 +111,8 @@ "input":{"shape":"CreateOpportunityRequest"}, "output":{"shape":"CreateOpportunityResponse"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, @@ -130,8 +130,8 @@ "input":{"shape":"CreateResourceSnapshotRequest"}, "output":{"shape":"CreateResourceSnapshotResponse"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, @@ -149,8 +149,8 @@ "input":{"shape":"CreateResourceSnapshotJobRequest"}, "output":{"shape":"CreateResourceSnapshotJobResponse"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, @@ -167,8 +167,8 @@ }, "input":{"shape":"DeleteResourceSnapshotJobRequest"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} @@ -511,8 +511,8 @@ }, "input":{"shape":"RejectEngagementInvitationRequest"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, @@ -529,8 +529,8 @@ "input":{"shape":"StartEngagementByAcceptingInvitationTaskRequest"}, "output":{"shape":"StartEngagementByAcceptingInvitationTaskResponse"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, @@ -548,8 +548,8 @@ "input":{"shape":"StartEngagementFromOpportunityTaskRequest"}, "output":{"shape":"StartEngagementFromOpportunityTaskResponse"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, @@ -615,8 +615,8 @@ "input":{"shape":"TagResourceRequest"}, "output":{"shape":"TagResourceResponse"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, @@ -634,8 +634,8 @@ "input":{"shape":"UntagResourceRequest"}, "output":{"shape":"UntagResourceResponse"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, @@ -653,8 +653,8 @@ "input":{"shape":"UpdateOpportunityRequest"}, "output":{"shape":"UpdateOpportunityResponse"}, "errors":[ - {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, @@ -874,7 +874,7 @@ "type":"string", "max":80, "min":0, - "pattern":"^[\\p{L}\\p{N}\\p{P}\\p{Z}]+$" + "sensitive":true }, "ApnPrograms":{ "type":"list", @@ -1301,7 +1301,7 @@ "type":"string", "max":120, "min":1, - "pattern":"^[\\p{L}\\p{N}\\p{P}\\p{Z}]+$" + "sensitive":true }, "CompanyWebsiteUrl":{ "type":"string", @@ -1848,7 +1848,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

        A list of objects specifying each tag name and value.

        " + "documentation":"

        A map of the key-value pairs of the tag or tags to assign.

        " } } }, @@ -2803,7 +2803,8 @@ "ExpectedCustomerSpendList":{ "type":"list", "member":{"shape":"ExpectedCustomerSpend"}, - "min":1 + "max":10, + "min":0 }, "ExpectedCustomerSpendTargetCompanyString":{ "type":"string", @@ -3372,7 +3373,8 @@ "InvitationMessage":{ "type":"string", "max":255, - "min":1 + "min":1, + "sensitive":true }, "InvitationStatus":{ "type":"string", @@ -3936,10 +3938,7 @@ "shape":"String", "documentation":"

        The token for the next set of results. This value is returned from a previous call.

        " }, - "Sort":{ - "shape":"EngagementSort", - "documentation":"

        An object that specifies the sort order of the results.

        " - } + "Sort":{"shape":"EngagementSort"} } }, "ListEngagementsResponse":{ @@ -4291,7 +4290,8 @@ "MemberCompanyName":{ "type":"string", "max":120, - "min":1 + "min":1, + "sensitive":true }, "MemberPageSize":{ "type":"integer", @@ -5302,7 +5302,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

        A list of objects specifying each tag name and value.

        " + "documentation":"

        A map of the key-value pairs of the tag or tags to assign.

        " } } }, @@ -5377,7 +5377,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

        A list of objects specifying each tag name and value.

        " + "documentation":"

        A map of the key-value pairs of the tag or tags to assign.

        " } } }, @@ -5541,7 +5541,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

        A map of the key-value pairs of the tag or tags to assign to the resource.

        " + "documentation":"

        A map of the key-value pairs of the tag or tags to assign.

        " } } }, diff --git a/services/paymentcryptography/pom.xml b/services/paymentcryptography/pom.xml index de5ddf738a67..6136d159a56e 100644 --- a/services/paymentcryptography/pom.xml +++ b/services/paymentcryptography/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT paymentcryptography AWS Java SDK :: Services :: Payment Cryptography diff --git a/services/paymentcryptography/src/main/resources/codegen-resources/customization.config b/services/paymentcryptography/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/paymentcryptography/src/main/resources/codegen-resources/customization.config +++ b/services/paymentcryptography/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/paymentcryptography/src/main/resources/codegen-resources/service-2.json b/services/paymentcryptography/src/main/resources/codegen-resources/service-2.json index 5f0227ef03c2..a39d010c55fd 100644 --- a/services/paymentcryptography/src/main/resources/codegen-resources/service-2.json +++ b/services/paymentcryptography/src/main/resources/codegen-resources/service-2.json @@ -54,7 +54,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Creates an Amazon Web Services Payment Cryptography key, a logical representation of a cryptographic key, that is unique in your account and Amazon Web Services Region. You use keys for cryptographic functions such as encryption and decryption.

        In addition to the key material used in cryptographic operations, an Amazon Web Services Payment Cryptography key includes metadata such as the key ARN, key usage, key origin, creation date, description, and key state.

        When you create a key, you specify both immutable and mutable data about the key. The immutable data contains key attributes that define the scope and cryptographic operations that you can perform using the key, for example key class (example: SYMMETRIC_KEY), key algorithm (example: TDES_2KEY), key usage (example: TR31_P0_PIN_ENCRYPTION_KEY) and key modes of use (example: Encrypt). For information about valid combinations of key attributes, see Understanding key attributes in the Amazon Web Services Payment Cryptography User Guide. The mutable data contained within a key includes usage timestamp and key deletion timestamp and can be modified after creation.

        Amazon Web Services Payment Cryptography binds key attributes to keys using key blocks when you store or export them. Amazon Web Services Payment Cryptography stores the key contents wrapped and never stores or transmits them in the clear.

        Cross-account use: This operation can't be used across different Amazon Web Services accounts.

        Related operations:

        " + "documentation":"

        Creates an Amazon Web Services Payment Cryptography key, a logical representation of a cryptographic key, that is unique in your account and Amazon Web Services Region. You use keys for cryptographic functions such as encryption and decryption.

        In addition to the key material used in cryptographic operations, an Amazon Web Services Payment Cryptography key includes metadata such as the key ARN, key usage, key origin, creation date, description, and key state.

        When you create a key, you specify both immutable and mutable data about the key. The immutable data contains key attributes that define the scope and cryptographic operations that you can perform using the key, for example key class (example: SYMMETRIC_KEY), key algorithm (example: TDES_2KEY), key usage (example: TR31_P0_PIN_ENCRYPTION_KEY) and key modes of use (example: Encrypt). Amazon Web Services Payment Cryptography binds key attributes to keys using key blocks when you store or export them. Amazon Web Services Payment Cryptography stores the key contents wrapped and never stores or transmits them in the clear.

        For information about valid combinations of key attributes, see Understanding key attributes in the Amazon Web Services Payment Cryptography User Guide. The mutable data contained within a key includes usage timestamp and key deletion timestamp and can be modified after creation.

        You can use the CreateKey operation to generate an ECC (Elliptic Curve Cryptography) key pair used for establishing an ECDH (Elliptic Curve Diffie-Hellman) key agreement between two parties. In the ECDH key agreement process, both parties generate their own ECC key pair with key usage K3 and exchange the public keys. Each party then use their private key, the received public key from the other party, and the key derivation parameters including key derivation function, hash algorithm, derivation data, and key algorithm to derive a shared key.

        To maintain the single-use principle of cryptographic keys in payments, ECDH derived keys should not be used for multiple purposes, such as a TR31_P0_PIN_ENCRYPTION_KEY and TR31_K1_KEY_BLOCK_PROTECTION_KEY. When creating ECC key pairs in Amazon Web Services Payment Cryptography you can optionally set the DeriveKeyUsage parameter, which defines the key usage bound to the symmetric key that will be derived using the ECC key pair.

        Cross-account use: This operation can't be used across different Amazon Web Services accounts.

        Related operations:

        " }, "DeleteAlias":{ "name":"DeleteAlias", @@ -113,7 +113,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Exports a key from Amazon Web Services Payment Cryptography.

        Amazon Web Services Payment Cryptography simplifies key exchange by replacing the existing paper-based approach with a modern electronic approach. With ExportKey you can export symmetric keys using either symmetric and asymmetric key exchange mechanisms. Using this operation, you can share your Amazon Web Services Payment Cryptography generated keys with other service partners to perform cryptographic operations outside of Amazon Web Services Payment Cryptography

        For symmetric key exchange, Amazon Web Services Payment Cryptography uses the ANSI X9 TR-31 norm in accordance with PCI PIN guidelines. And for asymmetric key exchange, Amazon Web Services Payment Cryptography supports ANSI X9 TR-34 norm and RSA wrap and unwrap key exchange mechanism. Asymmetric key exchange methods are typically used to establish bi-directional trust between the two parties exhanging keys and are used for initial key exchange such as Key Encryption Key (KEK). After which you can export working keys using symmetric method to perform various cryptographic operations within Amazon Web Services Payment Cryptography.

        The TR-34 norm is intended for exchanging 3DES keys only and keys are imported in a WrappedKeyBlock format. Key attributes (such as KeyUsage, KeyAlgorithm, KeyModesOfUse, Exportability) are contained within the key block. With RSA wrap and unwrap, you can exchange both 3DES and AES-128 keys. The keys are imported in a WrappedKeyCryptogram format and you will need to specify the key attributes during import.

        You can also use ExportKey functionality to generate and export an IPEK (Initial Pin Encryption Key) from Amazon Web Services Payment Cryptography using either TR-31 or TR-34 export key exchange. IPEK is generated from BDK (Base Derivation Key) and ExportDukptInitialKey attribute KSN (KeySerialNumber). The generated IPEK does not persist within Amazon Web Services Payment Cryptography and has to be re-generated each time during export.

        For key exchange using TR-31 or TR-34 key blocks, you can also export optional blocks within the key block header which contain additional attribute information about the key. The KeyVersion within KeyBlockHeaders indicates the version of the key within the key block. Furthermore, KeyExportability within KeyBlockHeaders can be used to further restrict exportability of the key after export from Amazon Web Services Payment Cryptography.

        The OptionalBlocks contain the additional data related to the key. For information on data type that can be included within optional blocks, refer to ASC X9.143-2022.

        Data included in key block headers is signed but transmitted in clear text. Sensitive or confidential information should not be included in optional blocks. Refer to ASC X9.143-2022 standard for information on allowed data type.

        To export initial keys (KEK) or IPEK using TR-34

        Using this operation, you can export initial key using TR-34 asymmetric key exchange. You can only export KEK generated within Amazon Web Services Payment Cryptography. In TR-34 terminology, the sending party of the key is called Key Distribution Host (KDH) and the receiving party of the key is called Key Receiving Device (KRD). During key export process, KDH is Amazon Web Services Payment Cryptography which initiates key export and KRD is the user receiving the key.

        To initiate TR-34 key export, the KRD must obtain an export token by calling GetParametersForExport. This operation also generates a key pair for the purpose of key export, signs the key and returns back the signing public key certificate (also known as KDH signing certificate) and root certificate chain. The KDH uses the private key to sign the the export payload and the signing public key certificate is provided to KRD to verify the signature. The KRD can import the root certificate into its Hardware Security Module (HSM), as required. The export token and the associated KDH signing certificate expires after 7 days.

        Next the KRD generates a key pair for the the purpose of encrypting the KDH key and provides the public key cerificate (also known as KRD wrapping certificate) back to KDH. The KRD will also import the root cerificate chain into Amazon Web Services Payment Cryptography by calling ImportKey for RootCertificatePublicKey. The KDH, Amazon Web Services Payment Cryptography, will use the KRD wrapping cerificate to encrypt (wrap) the key under export and signs it with signing private key to generate a TR-34 WrappedKeyBlock. For more information on TR-34 key export, see section Exporting symmetric keys in the Amazon Web Services Payment Cryptography User Guide.

        Set the following parameters:

        • ExportAttributes: Specify export attributes in case of IPEK export. This parameter is optional for KEK export.

        • ExportKeyIdentifier: The KeyARN of the KEK or BDK (in case of IPEK) under export.

        • KeyMaterial: Use Tr34KeyBlock parameters.

        • CertificateAuthorityPublicKeyIdentifier: The KeyARN of the certificate chain that signed the KRD wrapping key certificate.

        • ExportToken: Obtained from KDH by calling GetParametersForImport.

        • WrappingKeyCertificate: The public key certificate in PEM format (base64 encoded) of the KRD wrapping key Amazon Web Services Payment Cryptography uses for encryption of the TR-34 export payload. This certificate must be signed by the root certificate (CertificateAuthorityPublicKeyIdentifier) imported into Amazon Web Services Payment Cryptography.

        When this operation is successful, Amazon Web Services Payment Cryptography returns the KEK or IPEK as a TR-34 WrappedKeyBlock.

        To export initial keys (KEK) or IPEK using RSA Wrap and Unwrap

        Using this operation, you can export initial key using asymmetric RSA wrap and unwrap key exchange method. To initiate export, generate an asymmetric key pair on the receiving HSM and obtain the public key certificate in PEM format (base64 encoded) for the purpose of wrapping and the root certifiate chain. Import the root certificate into Amazon Web Services Payment Cryptography by calling ImportKey for RootCertificatePublicKey.

        Next call ExportKey and set the following parameters:

        • CertificateAuthorityPublicKeyIdentifier: The KeyARN of the certificate chain that signed wrapping key certificate.

        • KeyMaterial: Set to KeyCryptogram.

        • WrappingKeyCertificate: The public key certificate in PEM format (base64 encoded) obtained by the receiving HSM and signed by the root certificate (CertificateAuthorityPublicKeyIdentifier) imported into Amazon Web Services Payment Cryptography. The receiving HSM uses its private key component to unwrap the WrappedKeyCryptogram.

        When this operation is successful, Amazon Web Services Payment Cryptography returns the WrappedKeyCryptogram.

        To export working keys or IPEK using TR-31

        Using this operation, you can export working keys or IPEK using TR-31 symmetric key exchange. In TR-31, you must use an initial key such as KEK to encrypt or wrap the key under export. To establish a KEK, you can use CreateKey or ImportKey.

        Set the following parameters:

        • ExportAttributes: Specify export attributes in case of IPEK export. This parameter is optional for KEK export.

        • ExportKeyIdentifier: The KeyARN of the KEK or BDK (in case of IPEK) under export.

        • KeyMaterial: Use Tr31KeyBlock parameters.

        When this operation is successful, Amazon Web Services Payment Cryptography returns the working key or IPEK as a TR-31 WrappedKeyBlock.

        Cross-account use: This operation can't be used across different Amazon Web Services accounts.

        Related operations:

        " + "documentation":"

        Exports a key from Amazon Web Services Payment Cryptography.

        Amazon Web Services Payment Cryptography simplifies key exchange by replacing the existing paper-based approach with a modern electronic approach. With ExportKey you can export symmetric keys using either symmetric and asymmetric key exchange mechanisms. Using this operation, you can share your Amazon Web Services Payment Cryptography generated keys with other service partners to perform cryptographic operations outside of Amazon Web Services Payment Cryptography

        For symmetric key exchange, Amazon Web Services Payment Cryptography uses the ANSI X9 TR-31 norm in accordance with PCI PIN guidelines. And for asymmetric key exchange, Amazon Web Services Payment Cryptography supports ANSI X9 TR-34 norm, RSA unwrap, and ECDH (Elliptic Curve Diffie-Hellman) key exchange mechanisms. Asymmetric key exchange methods are typically used to establish bi-directional trust between the two parties exhanging keys and are used for initial key exchange such as Key Encryption Key (KEK). After which you can export working keys using symmetric method to perform various cryptographic operations within Amazon Web Services Payment Cryptography.

        PCI requires specific minimum key strength of wrapping keys used to protect the keys being exchanged electronically. These requirements can change when PCI standards are revised. The rules specify that wrapping keys used for transport must be at least as strong as the key being protected. For more information on recommended key strength of wrapping keys and key exchange mechanism, see Importing and exporting keys in the Amazon Web Services Payment Cryptography User Guide.

        You can also use ExportKey functionality to generate and export an IPEK (Initial Pin Encryption Key) from Amazon Web Services Payment Cryptography using either TR-31 or TR-34 export key exchange. IPEK is generated from BDK (Base Derivation Key) and ExportDukptInitialKey attribute KSN (KeySerialNumber). The generated IPEK does not persist within Amazon Web Services Payment Cryptography and has to be re-generated each time during export.

        For key exchange using TR-31 or TR-34 key blocks, you can also export optional blocks within the key block header which contain additional attribute information about the key. The KeyVersion within KeyBlockHeaders indicates the version of the key within the key block. Furthermore, KeyExportability within KeyBlockHeaders can be used to further restrict exportability of the key after export from Amazon Web Services Payment Cryptography.

        The OptionalBlocks contain the additional data related to the key. For information on data type that can be included within optional blocks, refer to ASC X9.143-2022.

        Data included in key block headers is signed but transmitted in clear text. Sensitive or confidential information should not be included in optional blocks. Refer to ASC X9.143-2022 standard for information on allowed data type.

        To export initial keys (KEK) or IPEK using TR-34

        Using this operation, you can export initial key using TR-34 asymmetric key exchange. You can only export KEK generated within Amazon Web Services Payment Cryptography. In TR-34 terminology, the sending party of the key is called Key Distribution Host (KDH) and the receiving party of the key is called Key Receiving Device (KRD). During key export process, KDH is Amazon Web Services Payment Cryptography which initiates key export and KRD is the user receiving the key.

        To initiate TR-34 key export, the KRD must obtain an export token by calling GetParametersForExport. This operation also generates a key pair for the purpose of key export, signs the key and returns back the signing public key certificate (also known as KDH signing certificate) and root certificate chain. The KDH uses the private key to sign the the export payload and the signing public key certificate is provided to KRD to verify the signature. The KRD can import the root certificate into its Hardware Security Module (HSM), as required. The export token and the associated KDH signing certificate expires after 30 days.

        Next the KRD generates a key pair for the the purpose of encrypting the KDH key and provides the public key cerificate (also known as KRD wrapping certificate) back to KDH. The KRD will also import the root cerificate chain into Amazon Web Services Payment Cryptography by calling ImportKey for RootCertificatePublicKey. The KDH, Amazon Web Services Payment Cryptography, will use the KRD wrapping cerificate to encrypt (wrap) the key under export and signs it with signing private key to generate a TR-34 WrappedKeyBlock. For more information on TR-34 key export, see section Exporting symmetric keys in the Amazon Web Services Payment Cryptography User Guide.

        Set the following parameters:

        • ExportAttributes: Specify export attributes in case of IPEK export. This parameter is optional for KEK export.

        • ExportKeyIdentifier: The KeyARN of the KEK or BDK (in case of IPEK) under export.

        • KeyMaterial: Use Tr34KeyBlock parameters.

        • CertificateAuthorityPublicKeyIdentifier: The KeyARN of the certificate chain that signed the KRD wrapping key certificate.

        • ExportToken: Obtained from KDH by calling GetParametersForImport.

        • WrappingKeyCertificate: The public key certificate in PEM format (base64 encoded) of the KRD wrapping key Amazon Web Services Payment Cryptography uses for encryption of the TR-34 export payload. This certificate must be signed by the root certificate (CertificateAuthorityPublicKeyIdentifier) imported into Amazon Web Services Payment Cryptography.

        When this operation is successful, Amazon Web Services Payment Cryptography returns the KEK or IPEK as a TR-34 WrappedKeyBlock.

        To export initial keys (KEK) or IPEK using RSA Wrap and Unwrap

        Using this operation, you can export initial key using asymmetric RSA wrap and unwrap key exchange method. To initiate export, generate an asymmetric key pair on the receiving HSM and obtain the public key certificate in PEM format (base64 encoded) for the purpose of wrapping and the root certifiate chain. Import the root certificate into Amazon Web Services Payment Cryptography by calling ImportKey for RootCertificatePublicKey.

        Next call ExportKey and set the following parameters:

        • CertificateAuthorityPublicKeyIdentifier: The KeyARN of the certificate chain that signed wrapping key certificate.

        • KeyMaterial: Set to KeyCryptogram.

        • WrappingKeyCertificate: The public key certificate in PEM format (base64 encoded) obtained by the receiving HSM and signed by the root certificate (CertificateAuthorityPublicKeyIdentifier) imported into Amazon Web Services Payment Cryptography. The receiving HSM uses its private key component to unwrap the WrappedKeyCryptogram.

        When this operation is successful, Amazon Web Services Payment Cryptography returns the WrappedKeyCryptogram.

        To export working keys or IPEK using TR-31

        Using this operation, you can export working keys or IPEK using TR-31 symmetric key exchange. In TR-31, you must use an initial key such as KEK to encrypt or wrap the key under export. To establish a KEK, you can use CreateKey or ImportKey.

        Set the following parameters:

        • ExportAttributes: Specify export attributes in case of IPEK export. This parameter is optional for KEK export.

        • ExportKeyIdentifier: The KeyARN of the KEK or BDK (in case of IPEK) under export.

        • KeyMaterial: Use Tr31KeyBlock parameters.

        To export working keys using ECDH

        You can also use ECDH key agreement to export working keys in a TR-31 keyblock, where the wrapping key is an ECDH derived key.

        To initiate a TR-31 key export using ECDH, both sides must create an ECC key pair with key usage K3 and exchange public key certificates. In Amazon Web Services Payment Cryptography, you can do this by calling CreateKey. If you have not already done so, you must import the CA chain that issued the receiving public key certificate by calling ImportKey with input RootCertificatePublicKey for root CA or TrustedPublicKey for intermediate CA. You can then complete a TR-31 key export by deriving a shared wrapping key using the service ECC key pair, public certificate of your ECC key pair outside of Amazon Web Services Payment Cryptography, and the key derivation parameters including key derivation function, hash algorithm, derivation data, key algorithm.

        • KeyMaterial: Use DiffieHellmanTr31KeyBlock parameters.

        • PrivateKeyIdentifier: The KeyArn of the ECC key pair created within Amazon Web Services Payment Cryptography to derive a shared KEK.

        • PublicKeyCertificate: The public key certificate of the receiving ECC key pair in PEM format (base64 encoded) to derive a shared KEK.

        • CertificateAuthorityPublicKeyIdentifier: The keyARN of the CA that signed the public key certificate of the receiving ECC key pair.

        When this operation is successful, Amazon Web Services Payment Cryptography returns the working key as a TR-31 WrappedKeyBlock, where the wrapping key is the ECDH derived key.

        Cross-account use: This operation can't be used across different Amazon Web Services accounts.

        Related operations:

        " }, "GetAlias":{ "name":"GetAlias", @@ -169,7 +169,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Gets the export token and the signing key certificate to initiate a TR-34 key export from Amazon Web Services Payment Cryptography.

        The signing key certificate signs the wrapped key under export within the TR-34 key payload. The export token and signing key certificate must be in place and operational before calling ExportKey. The export token expires in 7 days. You can use the same export token to export multiple keys from your service account.

        Cross-account use: This operation can't be used across different Amazon Web Services accounts.

        Related operations:

        " + "documentation":"

        Gets the export token and the signing key certificate to initiate a TR-34 key export from Amazon Web Services Payment Cryptography.

        The signing key certificate signs the wrapped key under export within the TR-34 key payload. The export token and signing key certificate must be in place and operational before calling ExportKey. The export token expires in 30 days. You can use the same export token to export multiple keys from your service account.

        Cross-account use: This operation can't be used across different Amazon Web Services accounts.

        Related operations:

        " }, "GetParametersForImport":{ "name":"GetParametersForImport", @@ -189,7 +189,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Gets the import token and the wrapping key certificate in PEM format (base64 encoded) to initiate a TR-34 WrappedKeyBlock or a RSA WrappedKeyCryptogram import into Amazon Web Services Payment Cryptography.

        The wrapping key certificate wraps the key under import. The import token and wrapping key certificate must be in place and operational before calling ImportKey. The import token expires in 7 days. You can use the same import token to import multiple keys into your service account.

        Cross-account use: This operation can't be used across different Amazon Web Services accounts.

        Related operations:

        " + "documentation":"

        Gets the import token and the wrapping key certificate in PEM format (base64 encoded) to initiate a TR-34 WrappedKeyBlock or a RSA WrappedKeyCryptogram import into Amazon Web Services Payment Cryptography.

        The wrapping key certificate wraps the key under import. The import token and wrapping key certificate must be in place and operational before calling ImportKey. The import token expires in 30 days. You can use the same import token to import multiple keys into your service account.

        Cross-account use: This operation can't be used across different Amazon Web Services accounts.

        Related operations:

        " }, "GetPublicKeyCertificate":{ "name":"GetPublicKeyCertificate", @@ -227,7 +227,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

        Imports symmetric keys and public key certificates in PEM format (base64 encoded) into Amazon Web Services Payment Cryptography.

        Amazon Web Services Payment Cryptography simplifies key exchange by replacing the existing paper-based approach with a modern electronic approach. With ImportKey you can import symmetric keys using either symmetric and asymmetric key exchange mechanisms.

        For symmetric key exchange, Amazon Web Services Payment Cryptography uses the ANSI X9 TR-31 norm in accordance with PCI PIN guidelines. And for asymmetric key exchange, Amazon Web Services Payment Cryptography supports ANSI X9 TR-34 norm and RSA wrap and unwrap key exchange mechanisms. Asymmetric key exchange methods are typically used to establish bi-directional trust between the two parties exhanging keys and are used for initial key exchange such as Key Encryption Key (KEK) or Zone Master Key (ZMK). After which you can import working keys using symmetric method to perform various cryptographic operations within Amazon Web Services Payment Cryptography.

        The TR-34 norm is intended for exchanging 3DES keys only and keys are imported in a WrappedKeyBlock format. Key attributes (such as KeyUsage, KeyAlgorithm, KeyModesOfUse, Exportability) are contained within the key block. With RSA wrap and unwrap, you can exchange both 3DES and AES-128 keys. The keys are imported in a WrappedKeyCryptogram format and you will need to specify the key attributes during import.

        You can also import a root public key certificate, used to sign other public key certificates, or a trusted public key certificate under an already established root public key certificate.

        To import a public root key certificate

        Using this operation, you can import the public component (in PEM cerificate format) of your private root key. You can use the imported public root key certificate for digital signatures, for example signing wrapping key or signing key in TR-34, within your Amazon Web Services Payment Cryptography account.

        Set the following parameters:

        • KeyMaterial: RootCertificatePublicKey

        • KeyClass: PUBLIC_KEY

        • KeyModesOfUse: Verify

        • KeyUsage: TR31_S0_ASYMMETRIC_KEY_FOR_DIGITAL_SIGNATURE

        • PublicKeyCertificate: The public key certificate in PEM format (base64 encoded) of the private root key under import.

        To import a trusted public key certificate

        The root public key certificate must be in place and operational before you import a trusted public key certificate. Set the following parameters:

        • KeyMaterial: TrustedCertificatePublicKey

        • CertificateAuthorityPublicKeyIdentifier: KeyArn of the RootCertificatePublicKey.

        • KeyModesOfUse and KeyUsage: Corresponding to the cryptographic operations such as wrap, sign, or encrypt that you will allow the trusted public key certificate to perform.

        • PublicKeyCertificate: The trusted public key certificate in PEM format (base64 encoded) under import.

        To import initial keys (KEK or ZMK or similar) using TR-34

        Using this operation, you can import initial key using TR-34 asymmetric key exchange. In TR-34 terminology, the sending party of the key is called Key Distribution Host (KDH) and the receiving party of the key is called Key Receiving Device (KRD). During the key import process, KDH is the user who initiates the key import and KRD is Amazon Web Services Payment Cryptography who receives the key.

        To initiate TR-34 key import, the KDH must obtain an import token by calling GetParametersForImport. This operation generates an encryption keypair for the purpose of key import, signs the key and returns back the wrapping key certificate (also known as KRD wrapping certificate) and the root certificate chain. The KDH must trust and install the KRD wrapping certificate on its HSM and use it to encrypt (wrap) the KDH key during TR-34 WrappedKeyBlock generation. The import token and associated KRD wrapping certificate expires after 7 days.

        Next the KDH generates a key pair for the purpose of signing the encrypted KDH key and provides the public certificate of the signing key to Amazon Web Services Payment Cryptography. The KDH will also need to import the root certificate chain of the KDH signing certificate by calling ImportKey for RootCertificatePublicKey. For more information on TR-34 key import, see section Importing symmetric keys in the Amazon Web Services Payment Cryptography User Guide.

        Set the following parameters:

        • KeyMaterial: Use Tr34KeyBlock parameters.

        • CertificateAuthorityPublicKeyIdentifier: The KeyARN of the certificate chain that signed the KDH signing key certificate.

        • ImportToken: Obtained from KRD by calling GetParametersForImport.

        • WrappedKeyBlock: The TR-34 wrapped key material from KDH. It contains the KDH key under import, wrapped with KRD wrapping certificate and signed by KDH signing private key. This TR-34 key block is typically generated by the KDH Hardware Security Module (HSM) outside of Amazon Web Services Payment Cryptography.

        • SigningKeyCertificate: The public key certificate in PEM format (base64 encoded) of the KDH signing key generated under the root certificate (CertificateAuthorityPublicKeyIdentifier) imported in Amazon Web Services Payment Cryptography.

        To import initial keys (KEK or ZMK or similar) using RSA Wrap and Unwrap

        Using this operation, you can import initial key using asymmetric RSA wrap and unwrap key exchange method. To initiate import, call GetParametersForImport with KeyMaterial set to KEY_CRYPTOGRAM to generate an import token. This operation also generates an encryption keypair for the purpose of key import, signs the key and returns back the wrapping key certificate in PEM format (base64 encoded) and its root certificate chain. The import token and associated KRD wrapping certificate expires after 7 days.

        You must trust and install the wrapping certificate and its certificate chain on the sending HSM and use it to wrap the key under export for WrappedKeyCryptogram generation. Next call ImportKey with KeyMaterial set to KEY_CRYPTOGRAM and provide the ImportToken and KeyAttributes for the key under import.

        To import working keys using TR-31

        Amazon Web Services Payment Cryptography uses TR-31 symmetric key exchange norm to import working keys. A KEK must be established within Amazon Web Services Payment Cryptography by using TR-34 key import or by using CreateKey. To initiate a TR-31 key import, set the following parameters:

        • KeyMaterial: Use Tr31KeyBlock parameters.

        • WrappedKeyBlock: The TR-31 wrapped key material. It contains the key under import, encrypted using KEK. The TR-31 key block is typically generated by a HSM outside of Amazon Web Services Payment Cryptography.

        • WrappingKeyIdentifier: The KeyArn of the KEK that Amazon Web Services Payment Cryptography uses to decrypt or unwrap the key under import.

        Cross-account use: This operation can't be used across different Amazon Web Services accounts.

        Related operations:

        " + "documentation":"

        Imports symmetric keys and public key certificates in PEM format (base64 encoded) into Amazon Web Services Payment Cryptography.

        Amazon Web Services Payment Cryptography simplifies key exchange by replacing the existing paper-based approach with a modern electronic approach. With ImportKey you can import symmetric keys using either symmetric and asymmetric key exchange mechanisms.

        For symmetric key exchange, Amazon Web Services Payment Cryptography uses the ANSI X9 TR-31 norm in accordance with PCI PIN guidelines. And for asymmetric key exchange, Amazon Web Services Payment Cryptography supports ANSI X9 TR-34 norm, RSA unwrap, and ECDH (Elliptic Curve Diffie-Hellman) key exchange mechanisms. Asymmetric key exchange methods are typically used to establish bi-directional trust between the two parties exhanging keys and are used for initial key exchange such as Key Encryption Key (KEK) or Zone Master Key (ZMK). After which you can import working keys using symmetric method to perform various cryptographic operations within Amazon Web Services Payment Cryptography.

        PCI requires specific minimum key strength of wrapping keys used to protect the keys being exchanged electronically. These requirements can change when PCI standards are revised. The rules specify that wrapping keys used for transport must be at least as strong as the key being protected. For more information on recommended key strength of wrapping keys and key exchange mechanism, see Importing and exporting keys in the Amazon Web Services Payment Cryptography User Guide.

        You can also import a root public key certificate, used to sign other public key certificates, or a trusted public key certificate under an already established root public key certificate.

        To import a public root key certificate

        Using this operation, you can import the public component (in PEM cerificate format) of your private root key. You can use the imported public root key certificate for digital signatures, for example signing wrapping key or signing key in TR-34, within your Amazon Web Services Payment Cryptography account.

        Set the following parameters:

        • KeyMaterial: RootCertificatePublicKey

        • KeyClass: PUBLIC_KEY

        • KeyModesOfUse: Verify

        • KeyUsage: TR31_S0_ASYMMETRIC_KEY_FOR_DIGITAL_SIGNATURE

        • PublicKeyCertificate: The public key certificate in PEM format (base64 encoded) of the private root key under import.

        To import a trusted public key certificate

        The root public key certificate must be in place and operational before you import a trusted public key certificate. Set the following parameters:

        • KeyMaterial: TrustedCertificatePublicKey

        • CertificateAuthorityPublicKeyIdentifier: KeyArn of the RootCertificatePublicKey.

        • KeyModesOfUse and KeyUsage: Corresponding to the cryptographic operations such as wrap, sign, or encrypt that you will allow the trusted public key certificate to perform.

        • PublicKeyCertificate: The trusted public key certificate in PEM format (base64 encoded) under import.

        To import initial keys (KEK or ZMK or similar) using TR-34

        Using this operation, you can import initial key using TR-34 asymmetric key exchange. In TR-34 terminology, the sending party of the key is called Key Distribution Host (KDH) and the receiving party of the key is called Key Receiving Device (KRD). During the key import process, KDH is the user who initiates the key import and KRD is Amazon Web Services Payment Cryptography who receives the key.

        To initiate TR-34 key import, the KDH must obtain an import token by calling GetParametersForImport. This operation generates an encryption keypair for the purpose of key import, signs the key and returns back the wrapping key certificate (also known as KRD wrapping certificate) and the root certificate chain. The KDH must trust and install the KRD wrapping certificate on its HSM and use it to encrypt (wrap) the KDH key during TR-34 WrappedKeyBlock generation. The import token and associated KRD wrapping certificate expires after 30 days.

        Next the KDH generates a key pair for the purpose of signing the encrypted KDH key and provides the public certificate of the signing key to Amazon Web Services Payment Cryptography. The KDH will also need to import the root certificate chain of the KDH signing certificate by calling ImportKey for RootCertificatePublicKey. For more information on TR-34 key import, see section Importing symmetric keys in the Amazon Web Services Payment Cryptography User Guide.

        Set the following parameters:

        • KeyMaterial: Use Tr34KeyBlock parameters.

        • CertificateAuthorityPublicKeyIdentifier: The KeyARN of the certificate chain that signed the KDH signing key certificate.

        • ImportToken: Obtained from KRD by calling GetParametersForImport.

        • WrappedKeyBlock: The TR-34 wrapped key material from KDH. It contains the KDH key under import, wrapped with KRD wrapping certificate and signed by KDH signing private key. This TR-34 key block is typically generated by the KDH Hardware Security Module (HSM) outside of Amazon Web Services Payment Cryptography.

        • SigningKeyCertificate: The public key certificate in PEM format (base64 encoded) of the KDH signing key generated under the root certificate (CertificateAuthorityPublicKeyIdentifier) imported in Amazon Web Services Payment Cryptography.

        To import initial keys (KEK or ZMK or similar) using RSA Wrap and Unwrap

        Using this operation, you can import initial key using asymmetric RSA wrap and unwrap key exchange method. To initiate import, call GetParametersForImport with KeyMaterial set to KEY_CRYPTOGRAM to generate an import token. This operation also generates an encryption keypair for the purpose of key import, signs the key and returns back the wrapping key certificate in PEM format (base64 encoded) and its root certificate chain. The import token and associated KRD wrapping certificate expires after 30 days.

        You must trust and install the wrapping certificate and its certificate chain on the sending HSM and use it to wrap the key under export for WrappedKeyCryptogram generation. Next call ImportKey with KeyMaterial set to KEY_CRYPTOGRAM and provide the ImportToken and KeyAttributes for the key under import.

        To import working keys using TR-31

        Amazon Web Services Payment Cryptography uses TR-31 symmetric key exchange norm to import working keys. A KEK must be established within Amazon Web Services Payment Cryptography by using TR-34 key import or by using CreateKey. To initiate a TR-31 key import, set the following parameters:

        • KeyMaterial: Use Tr31KeyBlock parameters.

        • WrappedKeyBlock: The TR-31 wrapped key material. It contains the key under import, encrypted using KEK. The TR-31 key block is typically generated by a HSM outside of Amazon Web Services Payment Cryptography.

        • WrappingKeyIdentifier: The KeyArn of the KEK that Amazon Web Services Payment Cryptography uses to decrypt or unwrap the key under import.

        To import working keys using ECDH

        You can also use ECDH key agreement to import working keys as a TR-31 keyblock, where the wrapping key is an ECDH derived key.

        To initiate a TR-31 key import using ECDH, both sides must create an ECC key pair with key usage K3 and exchange public key certificates. In Amazon Web Services Payment Cryptography, you can do this by calling CreateKey and then GetPublicKeyCertificate to retrieve its public key certificate. Next, you can then generate a TR-31 WrappedKeyBlock using your own ECC key pair, the public certificate of the service's ECC key pair, and the key derivation parameters including key derivation function, hash algorithm, derivation data, and key algorithm. If you have not already done so, you must import the CA chain that issued the receiving public key certificate by calling ImportKey with input RootCertificatePublicKey for root CA or TrustedPublicKey for intermediate CA. To complete the TR-31 key import, you can use the following parameters. It is important that the ECDH key derivation parameters you use should match those used during import to derive the same shared wrapping key within Amazon Web Services Payment Cryptography.

        • KeyMaterial: Use DiffieHellmanTr31KeyBlock parameters.

        • PrivateKeyIdentifier: The KeyArn of the ECC key pair created within Amazon Web Services Payment Cryptography to derive a shared KEK.

        • PublicKeyCertificate: The public key certificate of the receiving ECC key pair in PEM format (base64 encoded) to derive a shared KEK.

        • CertificateAuthorityPublicKeyIdentifier: The keyARN of the CA that signed the public key certificate of the receiving ECC key pair.

        Cross-account use: This operation can't be used across different Amazon Web Services accounts.

        Related operations:

        " }, "ListAliases":{ "name":"ListAliases", @@ -508,7 +508,7 @@ }, "DeriveKeyUsage":{ "shape":"DeriveKeyUsage", - "documentation":"

        The cryptographic usage of an ECDH derived key as defined in section A.5.2 of the TR-31 spec.

        " + "documentation":"

        The intended cryptographic usage of keys derived from the ECC key pair to be created.

        After creating an ECC key pair, you cannot change the intended cryptographic usage of keys derived from it using ECDH.

        " } } }, @@ -596,10 +596,10 @@ "members":{ "SharedInformation":{ "shape":"SharedInformation", - "documentation":"

        A byte string containing information that binds the ECDH derived key to the two parties involved or to the context of the key.

        It may include details like identities of the two parties deriving the key, context of the operation, session IDs, and optionally a nonce. It must not contain zero bytes, and re-using shared information for multiple ECDH key derivations is not recommended.

        " + "documentation":"

        A string containing information that binds the ECDH derived key to the two parties involved or to the context of the key.

        It may include details like identities of the two parties deriving the key, context of the operation, session IDs, and optionally a nonce. It must not contain zero bytes. It is not recommended to reuse shared information for multiple ECDH key derivations, as it could result in derived key material being the same across different derivations.

        " } }, - "documentation":"

        Derivation data used to derive an ECDH key.

        ", + "documentation":"

        The shared information used when deriving a key using ECDH.

        ", "union":true }, "EvenHexLengthBetween16And32":{ @@ -636,35 +636,35 @@ "members":{ "PrivateKeyIdentifier":{ "shape":"KeyArnOrKeyAliasType", - "documentation":"

        The keyARN of the asymmetric ECC key.

        " + "documentation":"

        The keyARN of the asymmetric ECC key created within Amazon Web Services Payment Cryptography.

        " }, "CertificateAuthorityPublicKeyIdentifier":{ "shape":"KeyArnOrKeyAliasType", - "documentation":"

        The keyARN of the certificate that signed the client's PublicKeyCertificate.

        " + "documentation":"

        The keyARN of the CA that signed the PublicKeyCertificate for the client's receiving ECC key pair.

        " }, "PublicKeyCertificate":{ "shape":"CertificateType", - "documentation":"

        The client's public key certificate in PEM format (base64 encoded) to use for ECDH key derivation.

        " + "documentation":"

        The public key certificate of the client's receiving ECC key pair, in PEM format (base64 encoded), to use for ECDH key derivation.

        " }, "DeriveKeyAlgorithm":{ "shape":"SymmetricKeyAlgorithm", - "documentation":"

        The key algorithm of the derived ECDH key.

        " + "documentation":"

        The key algorithm of the shared derived ECDH key.

        " }, "KeyDerivationFunction":{ "shape":"KeyDerivationFunction", - "documentation":"

        The key derivation function to use for deriving a key using ECDH.

        " + "documentation":"

        The key derivation function to use when deriving a key using ECDH.

        " }, "KeyDerivationHashAlgorithm":{ "shape":"KeyDerivationHashAlgorithm", - "documentation":"

        The hash type to use for deriving a key using ECDH.

        " + "documentation":"

        The hash type to use when deriving a key using ECDH.

        " }, "DerivationData":{ "shape":"DiffieHellmanDerivationData", - "documentation":"

        Derivation data used to derive an ECDH key.

        " + "documentation":"

        The shared information used when deriving a key using ECDH.

        " }, "KeyBlockHeaders":{"shape":"KeyBlockHeaders"} }, - "documentation":"

        Parameter information for key material export using the asymmetric ECDH key exchange method.

        " + "documentation":"

        Key derivation parameter information for key material export using asymmetric ECDH key exchange method.

        " }, "ExportDukptInitialKey":{ "type":"structure", @@ -737,7 +737,7 @@ }, "DiffieHellmanTr31KeyBlock":{ "shape":"ExportDiffieHellmanTr31KeyBlock", - "documentation":"

        Parameter information for key material export using the asymmetric ECDH key exchange method.

        " + "documentation":"

        Key derivation parameter information for key material export using asymmetric ECDH key exchange method.

        " } }, "documentation":"

        Parameter information for key material export from Amazon Web Services Payment Cryptography using TR-31 or TR-34 or RSA wrap and unwrap key exchange method.

        ", @@ -790,7 +790,7 @@ }, "ExportToken":{ "shape":"ExportTokenId", - "documentation":"

        The export token to initiate key export from Amazon Web Services Payment Cryptography. It also contains the signing key certificate that will sign the wrapped key during TR-34 key block generation. Call GetParametersForExport to receive an export token. It expires after 7 days. You can use the same export token to export multiple keys from the same service account.

        " + "documentation":"

        The export token to initiate key export from Amazon Web Services Payment Cryptography. It also contains the signing key certificate that will sign the wrapped key during TR-34 key block generation. Call GetParametersForExport to receive an export token. It expires after 30 days. You can use the same export token to export multiple keys from the same service account.

        " }, "KeyBlockFormat":{ "shape":"Tr34KeyBlockFormat", @@ -876,7 +876,7 @@ "members":{ "SigningKeyCertificate":{ "shape":"CertificateType", - "documentation":"

        The signing key certificate in PEM format (base64 encoded) of the public key for signature within the TR-34 key block. The certificate expires after 7 days.

        " + "documentation":"

        The signing key certificate in PEM format (base64 encoded) of the public key for signature within the TR-34 key block. The certificate expires after 30 days.

        " }, "SigningKeyCertificateChain":{ "shape":"CertificateType", @@ -888,7 +888,7 @@ }, "ExportToken":{ "shape":"ExportTokenId", - "documentation":"

        The export token to initiate key export from Amazon Web Services Payment Cryptography. The export token expires after 7 days. You can use the same export token to export multiple keys from the same service account.

        " + "documentation":"

        The export token to initiate key export from Amazon Web Services Payment Cryptography. The export token expires after 30 days. You can use the same export token to export multiple keys from the same service account.

        " }, "ParametersValidUntilTimestamp":{ "shape":"Timestamp", @@ -925,7 +925,7 @@ "members":{ "WrappingKeyCertificate":{ "shape":"CertificateType", - "documentation":"

        The wrapping key certificate in PEM format (base64 encoded) of the wrapping key for use within the TR-34 key block. The certificate expires in 7 days.

        " + "documentation":"

        The wrapping key certificate in PEM format (base64 encoded) of the wrapping key for use within the TR-34 key block. The certificate expires in 30 days.

        " }, "WrappingKeyCertificateChain":{ "shape":"CertificateType", @@ -937,7 +937,7 @@ }, "ImportToken":{ "shape":"ImportTokenId", - "documentation":"

        The import token to initiate key import into Amazon Web Services Payment Cryptography. The import token expires after 7 days. You can use the same import token to import multiple keys to the same service account.

        " + "documentation":"

        The import token to initiate key import into Amazon Web Services Payment Cryptography. The import token expires after 30 days. You can use the same import token to import multiple keys to the same service account.

        " }, "ParametersValidUntilTimestamp":{ "shape":"Timestamp", @@ -993,38 +993,38 @@ "members":{ "PrivateKeyIdentifier":{ "shape":"KeyArnOrKeyAliasType", - "documentation":"

        The keyARN of the asymmetric ECC key.

        " + "documentation":"

        The keyARN of the asymmetric ECC key created within Amazon Web Services Payment Cryptography.

        " }, "CertificateAuthorityPublicKeyIdentifier":{ "shape":"KeyArnOrKeyAliasType", - "documentation":"

        The keyARN of the certificate that signed the client's PublicKeyCertificate.

        " + "documentation":"

        The keyARN of the CA that signed the PublicKeyCertificate for the client's receiving ECC key pair.

        " }, "PublicKeyCertificate":{ "shape":"CertificateType", - "documentation":"

        The client's public key certificate in PEM format (base64 encoded) to use for ECDH key derivation.

        " + "documentation":"

        The public key certificate of the client's receiving ECC key pair, in PEM format (base64 encoded), to use for ECDH key derivation.

        " }, "DeriveKeyAlgorithm":{ "shape":"SymmetricKeyAlgorithm", - "documentation":"

        The key algorithm of the derived ECDH key.

        " + "documentation":"

        The key algorithm of the shared derived ECDH key.

        " }, "KeyDerivationFunction":{ "shape":"KeyDerivationFunction", - "documentation":"

        The key derivation function to use for deriving a key using ECDH.

        " + "documentation":"

        The key derivation function to use when deriving a key using ECDH.

        " }, "KeyDerivationHashAlgorithm":{ "shape":"KeyDerivationHashAlgorithm", - "documentation":"

        The hash type to use for deriving a key using ECDH.

        " + "documentation":"

        The hash type to use when deriving a key using ECDH.

        " }, "DerivationData":{ "shape":"DiffieHellmanDerivationData", - "documentation":"

        Derivation data used to derive an ECDH key.

        " + "documentation":"

        The shared information used when deriving a key using ECDH.

        " }, "WrappedKeyBlock":{ "shape":"Tr31WrappedKeyBlock", "documentation":"

        The ECDH wrapped key block to import.

        " } }, - "documentation":"

        Parameter information for key material import using the asymmetric ECDH key exchange method.

        " + "documentation":"

        Key derivation parameter information for key material import using asymmetric ECDH key exchange method.

        " }, "ImportKeyCryptogram":{ "type":"structure", @@ -1046,7 +1046,7 @@ }, "ImportToken":{ "shape":"ImportTokenId", - "documentation":"

        The import token that initiates key import using the asymmetric RSA wrap and unwrap key exchange method into AWS Payment Cryptography. It expires after 7 days. You can use the same import token to import multiple keys to the same service account.

        " + "documentation":"

        The import token that initiates key import using the asymmetric RSA wrap and unwrap key exchange method into AWS Payment Cryptography. It expires after 30 days. You can use the same import token to import multiple keys to the same service account.

        " }, "WrappingSpec":{ "shape":"WrappingKeySpec", @@ -1102,7 +1102,7 @@ }, "DiffieHellmanTr31KeyBlock":{ "shape":"ImportDiffieHellmanTr31KeyBlock", - "documentation":"

        Parameter information for key material import using the asymmetric ECDH key exchange method.

        " + "documentation":"

        Key derivation parameter information for key material import using asymmetric ECDH key exchange method.

        " } }, "documentation":"

        Parameter information for key material import into Amazon Web Services Payment Cryptography using TR-31 or TR-34 or RSA wrap and unwrap key exchange method.

        ", @@ -1160,7 +1160,7 @@ }, "ImportToken":{ "shape":"ImportTokenId", - "documentation":"

        The import token that initiates key import using the asymmetric TR-34 key exchange method into Amazon Web Services Payment Cryptography. It expires after 7 days. You can use the same import token to import multiple keys to the same service account.

        " + "documentation":"

        The import token that initiates key import using the asymmetric TR-34 key exchange method into Amazon Web Services Payment Cryptography. It expires after 30 days. You can use the same import token to import multiple keys to the same service account.

        " }, "WrappedKeyBlock":{ "shape":"Tr34WrappedKeyBlock", @@ -1267,6 +1267,10 @@ "AES_128", "AES_192", "AES_256", + "HMAC_SHA256", + "HMAC_SHA384", + "HMAC_SHA512", + "HMAC_SHA224", "RSA_2048", "RSA_3072", "RSA_4096", @@ -1344,7 +1348,8 @@ "type":"string", "enum":[ "CMAC", - "ANSI_X9_24" + "ANSI_X9_24", + "HMAC" ] }, "KeyClass":{ @@ -1782,7 +1787,11 @@ "TDES_3KEY", "AES_128", "AES_192", - "AES_256" + "AES_256", + "HMAC_SHA256", + "HMAC_SHA384", + "HMAC_SHA512", + "HMAC_SHA224" ] }, "Tag":{ diff --git a/services/paymentcryptographydata/pom.xml b/services/paymentcryptographydata/pom.xml index ba96db5e6ff5..12c8033c8ad3 100644 --- a/services/paymentcryptographydata/pom.xml +++ b/services/paymentcryptographydata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT paymentcryptographydata AWS Java SDK :: Services :: Payment Cryptography Data diff --git a/services/paymentcryptographydata/src/main/resources/codegen-resources/customization.config b/services/paymentcryptographydata/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/paymentcryptographydata/src/main/resources/codegen-resources/customization.config +++ b/services/paymentcryptographydata/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/paymentcryptographydata/src/main/resources/codegen-resources/service-2.json b/services/paymentcryptographydata/src/main/resources/codegen-resources/service-2.json index ae9a8ec440ee..03b7aaf5d529 100644 --- a/services/paymentcryptographydata/src/main/resources/codegen-resources/service-2.json +++ b/services/paymentcryptographydata/src/main/resources/codegen-resources/service-2.json @@ -480,7 +480,7 @@ }, "CipherTextType":{ "type":"string", - "max":4096, + "max":4224, "min":2, "pattern":"(?:[0-9a-fA-F][0-9a-fA-F])+", "sensitive":true @@ -666,7 +666,7 @@ ], "members":{ "KeySerialNumber":{ - "shape":"HexLengthBetween10And24", + "shape":"HexLength16Or20Or24", "documentation":"

        The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

        " }, "DukptDerivationType":{ @@ -681,7 +681,7 @@ "required":["KeySerialNumber"], "members":{ "KeySerialNumber":{ - "shape":"HexLengthBetween10And24", + "shape":"HexLength16Or20Or24", "documentation":"

        The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

        " }, "DukptKeyDerivationType":{ @@ -710,7 +710,7 @@ "required":["KeySerialNumber"], "members":{ "KeySerialNumber":{ - "shape":"HexLengthBetween10And24", + "shape":"HexLength16Or20Or24", "documentation":"

        The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

        " }, "Mode":{ @@ -1318,11 +1318,11 @@ "pattern":"(?:[0-9a-fA-F][0-9a-fA-F])+", "sensitive":true }, - "HexLengthBetween10And24":{ + "HexLength16Or20Or24":{ "type":"string", "max":24, - "min":10, - "pattern":"[0-9a-fA-F]+" + "min":16, + "pattern":"(?:[0-9a-fA-F]{16}|[0-9a-fA-F]{20}|[0-9a-fA-F]{24})" }, "HexLengthBetween2And4":{ "type":"string", @@ -1547,7 +1547,8 @@ "type":"string", "enum":[ "CMAC", - "ANSI_X9_24" + "ANSI_X9_24", + "HMAC" ] }, "KeyDerivationFunction":{ @@ -1585,7 +1586,7 @@ ], "members":{ "KeySerialNumber":{ - "shape":"HexLengthBetween10And24", + "shape":"HexLength16Or20Or24", "documentation":"

        The unique identifier known as Key Serial Number (KSN) that comes from an encrypting device using DUKPT encryption method. The KSN is derived from the encrypting device unique identifier and an internal transaction counter.

        " }, "DukptKeyVariant":{ @@ -1851,14 +1852,14 @@ }, "PlainTextOutputType":{ "type":"string", - "max":4096, + "max":4224, "min":2, "pattern":"(?:[0-9a-fA-F][0-9a-fA-F])+", "sensitive":true }, "PlainTextType":{ "type":"string", - "max":4064, + "max":4096, "min":2, "pattern":"(?:[0-9a-fA-F][0-9a-fA-F])+", "sensitive":true @@ -2177,7 +2178,11 @@ "TDES_3KEY", "AES_128", "AES_192", - "AES_256" + "AES_256", + "HMAC_SHA256", + "HMAC_SHA384", + "HMAC_SHA512", + "HMAC_SHA224" ] }, "ThrottlingException":{ diff --git a/services/pcaconnectorad/pom.xml b/services/pcaconnectorad/pom.xml index 8f58e0494534..12bdf1808c9e 100644 --- a/services/pcaconnectorad/pom.xml +++ b/services/pcaconnectorad/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT pcaconnectorad AWS Java SDK :: Services :: Pca Connector Ad diff --git a/services/pcaconnectorad/src/main/resources/codegen-resources/customization.config b/services/pcaconnectorad/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/pcaconnectorad/src/main/resources/codegen-resources/customization.config +++ b/services/pcaconnectorad/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/pcaconnectorscep/pom.xml b/services/pcaconnectorscep/pom.xml index 790b8b2bb844..e98a84005ea7 100644 --- a/services/pcaconnectorscep/pom.xml +++ b/services/pcaconnectorscep/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT pcaconnectorscep AWS Java SDK :: Services :: Pca Connector Scep diff --git a/services/pcaconnectorscep/src/main/resources/codegen-resources/customization.config b/services/pcaconnectorscep/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/pcaconnectorscep/src/main/resources/codegen-resources/customization.config +++ b/services/pcaconnectorscep/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/pcs/pom.xml b/services/pcs/pom.xml index 20ba3b7db4ee..b4f6a80c9ed6 100644 --- a/services/pcs/pom.xml +++ b/services/pcs/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT pcs AWS Java SDK :: Services :: PCS diff --git a/services/pcs/src/main/resources/codegen-resources/customization.config b/services/pcs/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/pcs/src/main/resources/codegen-resources/customization.config +++ b/services/pcs/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/pcs/src/main/resources/codegen-resources/service-2.json b/services/pcs/src/main/resources/codegen-resources/service-2.json index 8c3df15d1d04..59e2cc94da33 100644 --- a/services/pcs/src/main/resources/codegen-resources/service-2.json +++ b/services/pcs/src/main/resources/codegen-resources/service-2.json @@ -264,7 +264,7 @@ {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

        This API action isn't intended for you to use.

        Amazon Web Services PCS uses this API action to register the compute nodes it launches in your account.

        " + "documentation":"

        This API action isn't intended for you to use.

        Amazon Web Services PCS uses this API action to register the compute nodes it launches in your account.

        " }, "TagResource":{ "name":"TagResource", @@ -344,6 +344,55 @@ "documentation":"

        You don't have permission to perform the action.

        Examples

        • The launch template instance profile doesn't pass iam:PassRole verification.

        • There is a mismatch between the account ID and cluster ID.

        • The cluster ID doesn't exist.

        • The EC2 instance isn't present.

        ", "exception":true }, + "Accounting":{ + "type":"structure", + "required":["mode"], + "members":{ + "mode":{ + "shape":"AccountingMode", + "documentation":"

        The default value for mode is STANDARD. A value of STANDARD means Slurm accounting is enabled.

        " + }, + "defaultPurgeTimeInDays":{ + "shape":"AccountingDefaultPurgeTimeInDaysInteger", + "documentation":"

        The default value for all purge settings for slurmdbd.conf. For more information, see the slurmdbd.conf documentation at SchedMD.

        The default value for defaultPurgeTimeInDays is -1.

        A value of -1 means there is no purge time and records persist as long as the cluster exists.

        0 isn't a valid value.

        " + } + }, + "documentation":"

        The accounting configuration includes configurable settings for Slurm accounting. It's a property of the ClusterSlurmConfiguration object.

        " + }, + "AccountingDefaultPurgeTimeInDaysInteger":{ + "type":"integer", + "box":true, + "max":10000, + "min":-1 + }, + "AccountingMode":{ + "type":"string", + "enum":[ + "STANDARD", + "NONE" + ] + }, + "AccountingRequest":{ + "type":"structure", + "required":["mode"], + "members":{ + "mode":{ + "shape":"AccountingMode", + "documentation":"

        The default value for mode is STANDARD. A value of STANDARD means Slurm accounting is enabled.

        " + }, + "defaultPurgeTimeInDays":{ + "shape":"AccountingRequestDefaultPurgeTimeInDaysInteger", + "documentation":"

        The default value for all purge settings for slurmdbd.conf. For more information, see the slurmdbd.conf documentation at SchedMD.

        The default value for defaultPurgeTimeInDays is -1.

        A value of -1 means there is no purge time and records persist as long as the cluster exists.

        0 isn't a valid value.

        " + } + }, + "documentation":"

        The accounting configuration includes configurable settings for Slurm accounting. It's a property of the ClusterSlurmConfiguration object.

        " + }, + "AccountingRequestDefaultPurgeTimeInDaysInteger":{ + "type":"integer", + "box":true, + "max":10000, + "min":-1 + }, "AmiId":{ "type":"string", "pattern":"ami-[a-z0-9]+" @@ -352,7 +401,7 @@ "type":"string", "max":1011, "min":1, - "pattern":"arn:aws*:pcs:.*:[0-9]{12}:.*/[a-z0-9_\\-]+" + "pattern":"arn:aws.*:pcs:.*:[0-9]{12}:.*/[a-z0-9_\\-]+" }, "BootstrapId":{ "type":"string", @@ -388,7 +437,7 @@ }, "status":{ "shape":"ClusterStatus", - "documentation":"

        The provisioning status of the cluster.

        The provisioning status doesn't indicate the overall health of the cluster.

        " + "documentation":"

        The provisioning status of the cluster.

        The provisioning status doesn't indicate the overall health of the cluster.

        The resource enters the SUSPENDING and SUSPENDED states when the scheduler is beyond end of life and we have suspended the cluster. When in these states, you can't use the cluster. The cluster controller is down and all compute instances are terminated. The resources still count toward your service quotas. You can delete a resource if its status is SUSPENDED. For more information, see Frequently asked questions about Slurm versions in PCS in the PCS User Guide.

        " }, "createdAt":{ "shape":"SyntheticTimestamp_date_time", @@ -447,6 +496,10 @@ "authKey":{ "shape":"SlurmAuthKey", "documentation":"

        The shared Slurm key for authentication, also known as the cluster secret.

        " + }, + "accounting":{ + "shape":"Accounting", + "documentation":"

        The accounting configuration includes configurable settings for Slurm accounting.

        " } }, "documentation":"

        Additional options related to the Slurm scheduler.

        " @@ -461,6 +514,10 @@ "slurmCustomSettings":{ "shape":"SlurmCustomSettings", "documentation":"

        Additional Slurm-specific configuration that directly maps to Slurm settings.

        " + }, + "accounting":{ + "shape":"AccountingRequest", + "documentation":"

        The accounting configuration includes configurable settings for Slurm accounting.

        " } }, "documentation":"

        Additional options related to the Slurm scheduler.

        " @@ -484,7 +541,9 @@ "DELETING", "CREATE_FAILED", "DELETE_FAILED", - "UPDATE_FAILED" + "UPDATE_FAILED", + "SUSPENDING", + "SUSPENDED" ] }, "ClusterSummary":{ @@ -520,7 +579,7 @@ }, "status":{ "shape":"ClusterStatus", - "documentation":"

        The provisioning status of the cluster.

        The provisioning status doesn't indicate the overall health of the cluster.

        " + "documentation":"

        The provisioning status of the cluster.

        The provisioning status doesn't indicate the overall health of the cluster.

        The resource enters the SUSPENDING and SUSPENDED states when the scheduler is beyond end of life and we have suspended the cluster. When in these states, you can't use the cluster. The cluster controller is down and all compute instances are terminated. The resources still count toward your service quotas. You can delete a resource if its status is SUSPENDED. For more information, see Frequently asked questions about Slurm versions in PCS in the PCS User Guide.

        " } }, "documentation":"

        The object returned by the ListClusters API action.

        " @@ -568,7 +627,7 @@ }, "status":{ "shape":"ComputeNodeGroupStatus", - "documentation":"

        The provisioning status of the compute node group.

        The provisioning status doesn't indicate the overall health of the compute node group.

        " + "documentation":"

        The provisioning status of the compute node group.

        The provisioning status doesn't indicate the overall health of the compute node group.

        The resource enters the SUSPENDING and SUSPENDED states when the scheduler is beyond end of life and we have suspended the cluster. When in these states, you can't use the cluster. The cluster controller is down and all compute instances are terminated. The resources still count toward your service quotas. You can delete a resource if its status is SUSPENDED. For more information, see Frequently asked questions about Slurm versions in PCS in the PCS User Guide.

        " }, "amiId":{ "shape":"AmiId", @@ -659,7 +718,9 @@ "CREATE_FAILED", "DELETE_FAILED", "UPDATE_FAILED", - "DELETED" + "DELETED", + "SUSPENDING", + "SUSPENDED" ] }, "ComputeNodeGroupSummary":{ @@ -700,7 +761,7 @@ }, "status":{ "shape":"ComputeNodeGroupStatus", - "documentation":"

        The provisioning status of the compute node group.

        The provisioning status doesn't indicate the overall health of the compute node group.

        " + "documentation":"

        The provisioning status of the compute node group.

        The provisioning status doesn't indicate the overall health of the compute node group.

        The resource enters the SUSPENDING and SUSPENDED states when the scheduler is beyond end of life and we have suspended the cluster. When in these states, you can't use the cluster. The cluster controller is down and all compute instances are terminated. The resources still count toward your service quotas. You can delete a resource if its status is SUSPENDED. For more information, see Frequently asked questions about Slurm versions in PCS in the PCS User Guide.

        " } }, "documentation":"

        The object returned by the ListComputeNodeGroups API action.

        " @@ -1107,7 +1168,7 @@ }, "InstanceProfileArn":{ "type":"string", - "pattern":"arn:aws([a-zA-Z-]{0,10})?:iam::[0-9]{12}:instance-profile/.{1,128}" + "pattern":"arn:aws([a-zA-Z-]{0,10})?:iam::[0-9]{12}:instance-profile/[/\\w+=,.@-]{1,128}" }, "Integer":{ "type":"integer", @@ -1314,7 +1375,7 @@ }, "status":{ "shape":"QueueStatus", - "documentation":"

        The provisioning status of the queue.

        The provisioning status doesn't indicate the overall health of the queue.

        " + "documentation":"

        The provisioning status of the queue.

        The provisioning status doesn't indicate the overall health of the queue.

        The resource enters the SUSPENDING and SUSPENDED states when the scheduler is beyond end of life and we have suspended the cluster. When in these states, you can't use the cluster. The cluster controller is down and all compute instances are terminated. The resources still count toward your service quotas. You can delete a resource if its status is SUSPENDED. For more information, see Frequently asked questions about Slurm versions in PCS in the PCS User Guide.

        " }, "computeNodeGroupConfigurations":{ "shape":"ComputeNodeGroupConfigurationList", @@ -1350,7 +1411,9 @@ "DELETING", "CREATE_FAILED", "DELETE_FAILED", - "UPDATE_FAILED" + "UPDATE_FAILED", + "SUSPENDING", + "SUSPENDED" ] }, "QueueSummary":{ @@ -1391,7 +1454,7 @@ }, "status":{ "shape":"QueueStatus", - "documentation":"

        The provisioning status of the queue.

        The provisioning status doesn't indicate the overall health of the queue.

        " + "documentation":"

        The provisioning status of the queue.

        The provisioning status doesn't indicate the overall health of the queue.

        The resource enters the SUSPENDING and SUSPENDED states when the scheduler is beyond end of life and we have suspended the cluster. When in these states, you can't use the cluster. The cluster controller is down and all compute instances are terminated. The resources still count toward your service quotas. You can delete a resource if its status is SUSPENDED. For more information, see Frequently asked questions about Slurm versions in PCS in the PCS User Guide.

        " } }, "documentation":"

        The object returned by the ListQueues API action.

        " @@ -1542,7 +1605,7 @@ }, "version":{ "shape":"String", - "documentation":"

        The version of the specified scheduling software that Amazon Web Services PCS uses to manage cluster scaling and job scheduling. For more information, see Slurm versions in Amazon Web Services PCS in the Amazon Web Services PCS User Guide.

        Valid Values: 23.11 | 24.05

        " + "documentation":"

        The version of the specified scheduling software that Amazon Web Services PCS uses to manage cluster scaling and job scheduling. For more information, see Slurm versions in Amazon Web Services PCS in the Amazon Web Services PCS User Guide.

        Valid Values: 23.11 | 24.05 | 24.11

        " } }, "documentation":"

        The cluster management and job scheduling software associated with the cluster.

        " @@ -1560,7 +1623,7 @@ }, "version":{ "shape":"String", - "documentation":"

        The version of the specified scheduling software that Amazon Web Services PCS uses to manage cluster scaling and job scheduling. For more information, see Slurm versions in Amazon Web Services PCS in the Amazon Web Services PCS User Guide.

        Valid Values: 23.11 | 24.05

        " + "documentation":"

        The version of the specified scheduling software that Amazon Web Services PCS uses to manage cluster scaling and job scheduling. For more information, see Slurm versions in Amazon Web Services PCS in the Amazon Web Services PCS User Guide.

        Valid Values: 23.11 | 24.05 | 24.11

        " } }, "documentation":"

        The cluster management and job scheduling software associated with the cluster.

        " diff --git a/services/personalize/pom.xml b/services/personalize/pom.xml index 9ba4e13c5f2b..cb589a147756 100644 --- a/services/personalize/pom.xml +++ b/services/personalize/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT personalize AWS Java SDK :: Services :: Personalize diff --git a/services/personalize/src/main/resources/codegen-resources/customization.config b/services/personalize/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/personalize/src/main/resources/codegen-resources/customization.config +++ b/services/personalize/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/personalizeevents/pom.xml b/services/personalizeevents/pom.xml index 4183cf9da8cc..f25ef1651497 100644 --- a/services/personalizeevents/pom.xml +++ b/services/personalizeevents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT personalizeevents AWS Java SDK :: Services :: Personalize Events diff --git a/services/personalizeevents/src/main/resources/codegen-resources/customization.config b/services/personalizeevents/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/personalizeevents/src/main/resources/codegen-resources/customization.config +++ b/services/personalizeevents/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/personalizeruntime/pom.xml b/services/personalizeruntime/pom.xml index f958897c59a4..c8c589806e6e 100644 --- a/services/personalizeruntime/pom.xml +++ b/services/personalizeruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT personalizeruntime AWS Java SDK :: Services :: Personalize Runtime diff --git a/services/personalizeruntime/src/main/resources/codegen-resources/customization.config b/services/personalizeruntime/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/personalizeruntime/src/main/resources/codegen-resources/customization.config +++ b/services/personalizeruntime/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/pi/pom.xml b/services/pi/pom.xml index c4223890b15d..f245b4f6376b 100644 --- a/services/pi/pom.xml +++ b/services/pi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT pi AWS Java SDK :: Services :: PI diff --git a/services/pi/src/main/resources/codegen-resources/customization.config b/services/pi/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/pi/src/main/resources/codegen-resources/customization.config +++ b/services/pi/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/pinpoint/pom.xml b/services/pinpoint/pom.xml index 7b0a850f2aab..4483fbfcf376 100644 --- a/services/pinpoint/pom.xml +++ b/services/pinpoint/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT pinpoint AWS Java SDK :: Services :: Amazon Pinpoint diff --git a/services/pinpoint/src/main/resources/codegen-resources/customization.config b/services/pinpoint/src/main/resources/codegen-resources/customization.config index b02a82f89fc1..b62371178863 100644 --- a/services/pinpoint/src/main/resources/codegen-resources/customization.config +++ b/services/pinpoint/src/main/resources/codegen-resources/customization.config @@ -6,6 +6,5 @@ "__EndpointTypesElement": "EndpointTypesElement" }, "underscoresInNameBehavior": "ALLOW", - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/pinpointemail/pom.xml b/services/pinpointemail/pom.xml index 02e6232bd41b..0920d09a64d7 100644 --- a/services/pinpointemail/pom.xml +++ b/services/pinpointemail/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT pinpointemail AWS Java SDK :: Services :: Pinpoint Email diff --git a/services/pinpointemail/src/main/resources/codegen-resources/customization.config b/services/pinpointemail/src/main/resources/codegen-resources/customization.config index de8948661468..3324fc23dfb2 100644 --- a/services/pinpointemail/src/main/resources/codegen-resources/customization.config +++ b/services/pinpointemail/src/main/resources/codegen-resources/customization.config @@ -10,6 +10,5 @@ "listDeliverabilityTestReports", "listEmailIdentities" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/pinpointsmsvoice/pom.xml b/services/pinpointsmsvoice/pom.xml index d4f2b00e9b40..bbb20111295f 100644 --- a/services/pinpointsmsvoice/pom.xml +++ b/services/pinpointsmsvoice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT pinpointsmsvoice AWS Java SDK :: Services :: Pinpoint SMS Voice diff --git a/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config b/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config index 2b8577ea6263..ba02b96b8737 100644 --- a/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config +++ b/services/pinpointsmsvoice/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "excludedSimpleMethods": [ "listConfigurationSets" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/pinpointsmsvoicev2/pom.xml b/services/pinpointsmsvoicev2/pom.xml index 1bf1fff2239a..88cb51ebc779 100644 --- a/services/pinpointsmsvoicev2/pom.xml +++ b/services/pinpointsmsvoicev2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT pinpointsmsvoicev2 AWS Java SDK :: Services :: Pinpoint SMS Voice V2 diff --git a/services/pinpointsmsvoicev2/src/main/resources/codegen-resources/customization.config b/services/pinpointsmsvoicev2/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/pinpointsmsvoicev2/src/main/resources/codegen-resources/customization.config +++ b/services/pinpointsmsvoicev2/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/pipes/pom.xml b/services/pipes/pom.xml index 723953fcb434..b300d9c04b3a 100644 --- a/services/pipes/pom.xml +++ b/services/pipes/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT pipes AWS Java SDK :: Services :: Pipes diff --git a/services/pipes/src/main/resources/codegen-resources/customization.config b/services/pipes/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/pipes/src/main/resources/codegen-resources/customization.config +++ b/services/pipes/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/polly/pom.xml b/services/polly/pom.xml index ddbdd180c352..7df87b20dc0d 100644 --- a/services/polly/pom.xml +++ b/services/polly/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT polly AWS Java SDK :: Services :: Amazon Polly diff --git a/services/polly/src/main/resources/codegen-resources/customization.config b/services/polly/src/main/resources/codegen-resources/customization.config index 1ab268a60af4..a32ecb08f66a 100644 --- a/services/polly/src/main/resources/codegen-resources/customization.config +++ b/services/polly/src/main/resources/codegen-resources/customization.config @@ -4,6 +4,5 @@ "listLexicons", "listSpeechSynthesisTasks" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/pom.xml b/services/pom.xml index 4a8af04b3a61..9179a6cc1a0b 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT services AWS Java SDK :: Services @@ -320,7 +320,6 @@ redshiftserverless rolesanywhere licensemanagerusersubscriptions - privatenetworks supportapp controltower iotfleetwise @@ -425,6 +424,12 @@ iotmanagedintegrations gameliftstreams ssmguiconnect + evs + mpa + aiops + workspacesinstances + keyspacesstreams + odb The AWS Java SDK services https://aws.amazon.com/sdkforjava diff --git a/services/pricing/pom.xml b/services/pricing/pom.xml index 25ae40557ad8..246e1b0dbef9 100644 --- a/services/pricing/pom.xml +++ b/services/pricing/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 pricing diff --git a/services/pricing/src/main/resources/codegen-resources/customization.config b/services/pricing/src/main/resources/codegen-resources/customization.config index ed8dca4b229c..9cf3a85600aa 100644 --- a/services/pricing/src/main/resources/codegen-resources/customization.config +++ b/services/pricing/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ "excludedSimpleMethods": [ "getProducts" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/privatenetworks/src/main/resources/codegen-resources/customization.config b/services/privatenetworks/src/main/resources/codegen-resources/customization.config deleted file mode 100644 index 6bc46bc3c310..000000000000 --- a/services/privatenetworks/src/main/resources/codegen-resources/customization.config +++ /dev/null @@ -1,4 +0,0 @@ -{ - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true -} diff --git a/services/privatenetworks/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/privatenetworks/src/main/resources/codegen-resources/endpoint-rule-set.json deleted file mode 100644 index 13a5a5820511..000000000000 --- a/services/privatenetworks/src/main/resources/codegen-resources/endpoint-rule-set.json +++ /dev/null @@ -1,314 +0,0 @@ -{ - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://private-networks-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://private-networks-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://private-networks.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://private-networks.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] -} \ No newline at end of file diff --git a/services/privatenetworks/src/main/resources/codegen-resources/paginators-1.json b/services/privatenetworks/src/main/resources/codegen-resources/paginators-1.json deleted file mode 100644 index 8b7d279c188c..000000000000 --- a/services/privatenetworks/src/main/resources/codegen-resources/paginators-1.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "pagination": { - "ListDeviceIdentifiers": { - "input_token": "startToken", - "output_token": "nextToken", - "limit_key": "maxResults", - "result_key": "deviceIdentifiers" - }, - "ListNetworkResources": { - "input_token": "startToken", - "output_token": "nextToken", - "limit_key": "maxResults", - "result_key": "networkResources" - }, - "ListNetworkSites": { - "input_token": "startToken", - "output_token": "nextToken", - "limit_key": "maxResults", - "result_key": "networkSites" - }, - "ListNetworks": { - "input_token": "startToken", - "output_token": "nextToken", - "limit_key": "maxResults", - "result_key": "networks" - }, - "ListOrders": { - "input_token": "startToken", - "output_token": "nextToken", - "limit_key": "maxResults", - "result_key": "orders" - } - } -} diff --git a/services/privatenetworks/src/main/resources/codegen-resources/service-2.json b/services/privatenetworks/src/main/resources/codegen-resources/service-2.json deleted file mode 100644 index daf96e232b8b..000000000000 --- a/services/privatenetworks/src/main/resources/codegen-resources/service-2.json +++ /dev/null @@ -1,2182 +0,0 @@ -{ - "version":"2.0", - "metadata":{ - "apiVersion":"2021-12-03", - "endpointPrefix":"private-networks", - "jsonVersion":"1.1", - "protocol":"rest-json", - "serviceFullName":"AWS Private 5G", - "serviceId":"PrivateNetworks", - "signatureVersion":"v4", - "signingName":"private-networks", - "uid":"privatenetworks-2021-12-03" - }, - "operations":{ - "AcknowledgeOrderReceipt":{ - "name":"AcknowledgeOrderReceipt", - "http":{ - "method":"POST", - "requestUri":"/v1/orders/acknowledge", - "responseCode":200 - }, - "input":{"shape":"AcknowledgeOrderReceiptRequest"}, - "output":{"shape":"AcknowledgeOrderReceiptResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Acknowledges that the specified network order was received.

        " - }, - "ActivateDeviceIdentifier":{ - "name":"ActivateDeviceIdentifier", - "http":{ - "method":"POST", - "requestUri":"/v1/device-identifiers/activate", - "responseCode":200 - }, - "input":{"shape":"ActivateDeviceIdentifierRequest"}, - "output":{"shape":"ActivateDeviceIdentifierResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Activates the specified device identifier.

        ", - "idempotent":true - }, - "ActivateNetworkSite":{ - "name":"ActivateNetworkSite", - "http":{ - "method":"POST", - "requestUri":"/v1/network-sites/activate", - "responseCode":200 - }, - "input":{"shape":"ActivateNetworkSiteRequest"}, - "output":{"shape":"ActivateNetworkSiteResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Activates the specified network site.

        ", - "idempotent":true - }, - "ConfigureAccessPoint":{ - "name":"ConfigureAccessPoint", - "http":{ - "method":"POST", - "requestUri":"/v1/network-resources/configure", - "responseCode":200 - }, - "input":{"shape":"ConfigureAccessPointRequest"}, - "output":{"shape":"ConfigureAccessPointResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Configures the specified network resource.

        Use this action to specify the geographic position of the hardware. You must provide Certified Professional Installer (CPI) credentials in the request so that we can obtain spectrum grants. For more information, see Radio units in the Amazon Web Services Private 5G User Guide.

        ", - "idempotent":true - }, - "CreateNetwork":{ - "name":"CreateNetwork", - "http":{ - "method":"POST", - "requestUri":"/v1/networks", - "responseCode":200 - }, - "input":{"shape":"CreateNetworkRequest"}, - "output":{"shape":"CreateNetworkResponse"}, - "errors":[ - {"shape":"LimitExceededException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Creates a network.

        ", - "idempotent":true - }, - "CreateNetworkSite":{ - "name":"CreateNetworkSite", - "http":{ - "method":"POST", - "requestUri":"/v1/network-sites", - "responseCode":200 - }, - "input":{"shape":"CreateNetworkSiteRequest"}, - "output":{"shape":"CreateNetworkSiteResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Creates a network site.

        ", - "idempotent":true - }, - "DeactivateDeviceIdentifier":{ - "name":"DeactivateDeviceIdentifier", - "http":{ - "method":"POST", - "requestUri":"/v1/device-identifiers/deactivate", - "responseCode":200 - }, - "input":{"shape":"DeactivateDeviceIdentifierRequest"}, - "output":{"shape":"DeactivateDeviceIdentifierResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Deactivates the specified device identifier.

        ", - "idempotent":true - }, - "DeleteNetwork":{ - "name":"DeleteNetwork", - "http":{ - "method":"DELETE", - "requestUri":"/v1/networks/{networkArn}", - "responseCode":200 - }, - "input":{"shape":"DeleteNetworkRequest"}, - "output":{"shape":"DeleteNetworkResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Deletes the specified network. You must delete network sites before you delete the network. For more information, see DeleteNetworkSite in the API Reference for Amazon Web Services Private 5G.

        ", - "idempotent":true - }, - "DeleteNetworkSite":{ - "name":"DeleteNetworkSite", - "http":{ - "method":"DELETE", - "requestUri":"/v1/network-sites/{networkSiteArn}", - "responseCode":200 - }, - "input":{"shape":"DeleteNetworkSiteRequest"}, - "output":{"shape":"DeleteNetworkSiteResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Deletes the specified network site. Return the hardware after you delete the network site. You are responsible for minimum charges. For more information, see Hardware returns in the Amazon Web Services Private 5G User Guide.

        ", - "idempotent":true - }, - "GetDeviceIdentifier":{ - "name":"GetDeviceIdentifier", - "http":{ - "method":"GET", - "requestUri":"/v1/device-identifiers/{deviceIdentifierArn}", - "responseCode":200 - }, - "input":{"shape":"GetDeviceIdentifierRequest"}, - "output":{"shape":"GetDeviceIdentifierResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Gets the specified device identifier.

        " - }, - "GetNetwork":{ - "name":"GetNetwork", - "http":{ - "method":"GET", - "requestUri":"/v1/networks/{networkArn}", - "responseCode":200 - }, - "input":{"shape":"GetNetworkRequest"}, - "output":{"shape":"GetNetworkResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Gets the specified network.

        " - }, - "GetNetworkResource":{ - "name":"GetNetworkResource", - "http":{ - "method":"GET", - "requestUri":"/v1/network-resources/{networkResourceArn}", - "responseCode":200 - }, - "input":{"shape":"GetNetworkResourceRequest"}, - "output":{"shape":"GetNetworkResourceResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Gets the specified network resource.

        " - }, - "GetNetworkSite":{ - "name":"GetNetworkSite", - "http":{ - "method":"GET", - "requestUri":"/v1/network-sites/{networkSiteArn}", - "responseCode":200 - }, - "input":{"shape":"GetNetworkSiteRequest"}, - "output":{"shape":"GetNetworkSiteResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Gets the specified network site.

        " - }, - "GetOrder":{ - "name":"GetOrder", - "http":{ - "method":"GET", - "requestUri":"/v1/orders/{orderArn}", - "responseCode":200 - }, - "input":{"shape":"GetOrderRequest"}, - "output":{"shape":"GetOrderResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Gets the specified order.

        " - }, - "ListDeviceIdentifiers":{ - "name":"ListDeviceIdentifiers", - "http":{ - "method":"POST", - "requestUri":"/v1/device-identifiers/list", - "responseCode":200 - }, - "input":{"shape":"ListDeviceIdentifiersRequest"}, - "output":{"shape":"ListDeviceIdentifiersResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Lists device identifiers. Add filters to your request to return a more specific list of results. Use filters to match the Amazon Resource Name (ARN) of an order, the status of device identifiers, or the ARN of the traffic group.

        If you specify multiple filters, filters are joined with an OR, and the request returns results that match all of the specified filters.

        " - }, - "ListNetworkResources":{ - "name":"ListNetworkResources", - "http":{ - "method":"POST", - "requestUri":"/v1/network-resources", - "responseCode":200 - }, - "input":{"shape":"ListNetworkResourcesRequest"}, - "output":{"shape":"ListNetworkResourcesResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Lists network resources. Add filters to your request to return a more specific list of results. Use filters to match the Amazon Resource Name (ARN) of an order or the status of network resources.

        If you specify multiple filters, filters are joined with an OR, and the request returns results that match all of the specified filters.

        " - }, - "ListNetworkSites":{ - "name":"ListNetworkSites", - "http":{ - "method":"POST", - "requestUri":"/v1/network-sites/list", - "responseCode":200 - }, - "input":{"shape":"ListNetworkSitesRequest"}, - "output":{"shape":"ListNetworkSitesResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Lists network sites. Add filters to your request to return a more specific list of results. Use filters to match the status of the network site.

        " - }, - "ListNetworks":{ - "name":"ListNetworks", - "http":{ - "method":"POST", - "requestUri":"/v1/networks/list", - "responseCode":200 - }, - "input":{"shape":"ListNetworksRequest"}, - "output":{"shape":"ListNetworksResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Lists networks. Add filters to your request to return a more specific list of results. Use filters to match the status of the network.

        " - }, - "ListOrders":{ - "name":"ListOrders", - "http":{ - "method":"POST", - "requestUri":"/v1/orders/list", - "responseCode":200 - }, - "input":{"shape":"ListOrdersRequest"}, - "output":{"shape":"ListOrdersResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Lists orders. Add filters to your request to return a more specific list of results. Use filters to match the Amazon Resource Name (ARN) of the network site or the status of the order.

        If you specify multiple filters, filters are joined with an OR, and the request returns results that match all of the specified filters.

        " - }, - "ListTagsForResource":{ - "name":"ListTagsForResource", - "http":{ - "method":"GET", - "requestUri":"/tags/{resourceArn}", - "responseCode":200 - }, - "input":{"shape":"ListTagsForResourceRequest"}, - "output":{"shape":"ListTagsForResourceResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"}, - {"shape":"ThrottlingException"} - ], - "documentation":"

        Lists the tags for the specified resource.

        " - }, - "Ping":{ - "name":"Ping", - "http":{ - "method":"GET", - "requestUri":"/ping", - "responseCode":200 - }, - "output":{"shape":"PingResponse"}, - "errors":[ - {"shape":"InternalServerException"} - ], - "documentation":"

        Checks the health of the service.

        " - }, - "StartNetworkResourceUpdate":{ - "name":"StartNetworkResourceUpdate", - "http":{ - "method":"POST", - "requestUri":"/v1/network-resources/update", - "responseCode":200 - }, - "input":{"shape":"StartNetworkResourceUpdateRequest"}, - "output":{"shape":"StartNetworkResourceUpdateResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Use this action to do the following tasks:

        • Update the duration and renewal status of the commitment period for a radio unit. The update goes into effect immediately.

        • Request a replacement for a network resource.

        • Request that you return a network resource.

        After you submit a request to replace or return a network resource, the status of the network resource changes to CREATING_SHIPPING_LABEL. The shipping label is available when the status of the network resource is PENDING_RETURN. After the network resource is successfully returned, its status changes to DELETED. For more information, see Return a radio unit.

        ", - "idempotent":true - }, - "TagResource":{ - "name":"TagResource", - "http":{ - "method":"POST", - "requestUri":"/tags/{resourceArn}", - "responseCode":200 - }, - "input":{"shape":"TagResourceRequest"}, - "output":{"shape":"TagResourceResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"}, - {"shape":"ThrottlingException"} - ], - "documentation":"

        Adds tags to the specified resource.

        " - }, - "UntagResource":{ - "name":"UntagResource", - "http":{ - "method":"DELETE", - "requestUri":"/tags/{resourceArn}", - "responseCode":200 - }, - "input":{"shape":"UntagResourceRequest"}, - "output":{"shape":"UntagResourceResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"}, - {"shape":"ThrottlingException"} - ], - "documentation":"

        Removes tags from the specified resource.

        " - }, - "UpdateNetworkSite":{ - "name":"UpdateNetworkSite", - "http":{ - "method":"PUT", - "requestUri":"/v1/network-sites/site", - "responseCode":200 - }, - "input":{"shape":"UpdateNetworkSiteRequest"}, - "output":{"shape":"UpdateNetworkSiteResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Updates the specified network site.

        ", - "idempotent":true - }, - "UpdateNetworkSitePlan":{ - "name":"UpdateNetworkSitePlan", - "http":{ - "method":"PUT", - "requestUri":"/v1/network-sites/plan", - "responseCode":200 - }, - "input":{"shape":"UpdateNetworkSitePlanRequest"}, - "output":{"shape":"UpdateNetworkSiteResponse"}, - "errors":[ - {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"InternalServerException"} - ], - "documentation":"

        Updates the specified network site plan.

        ", - "idempotent":true - } - }, - "shapes":{ - "AccessDeniedException":{ - "type":"structure", - "members":{ - "message":{"shape":"String"} - }, - "documentation":"

        You do not have permission to perform this operation.

        ", - "error":{ - "httpStatusCode":403, - "senderFault":true - }, - "exception":true - }, - "AcknowledgeOrderReceiptRequest":{ - "type":"structure", - "required":["orderArn"], - "members":{ - "orderArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the order.

        " - } - } - }, - "AcknowledgeOrderReceiptResponse":{ - "type":"structure", - "required":["order"], - "members":{ - "order":{ - "shape":"Order", - "documentation":"

        Information about the order.

        " - } - } - }, - "AcknowledgmentStatus":{ - "type":"string", - "enum":[ - "ACKNOWLEDGING", - "ACKNOWLEDGED", - "UNACKNOWLEDGED" - ] - }, - "ActivateDeviceIdentifierRequest":{ - "type":"structure", - "required":["deviceIdentifierArn"], - "members":{ - "clientToken":{ - "shape":"ClientToken", - "documentation":"

        Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

        " - }, - "deviceIdentifierArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the device identifier.

        " - } - } - }, - "ActivateDeviceIdentifierResponse":{ - "type":"structure", - "required":["deviceIdentifier"], - "members":{ - "deviceIdentifier":{ - "shape":"DeviceIdentifier", - "documentation":"

        Information about the device identifier.

        " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

        The tags on the device identifier.

        " - } - } - }, - "ActivateNetworkSiteRequest":{ - "type":"structure", - "required":[ - "networkSiteArn", - "shippingAddress" - ], - "members":{ - "clientToken":{ - "shape":"ClientToken", - "documentation":"

        Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

        " - }, - "commitmentConfiguration":{ - "shape":"CommitmentConfiguration", - "documentation":"

        Determines the duration and renewal status of the commitment period for all pending radio units.

        If you include commitmentConfiguration in the ActivateNetworkSiteRequest action, you must specify the following:

        • The commitment period for the radio unit. You can choose a 60-day, 1-year, or 3-year period.

        • Whether you want your commitment period to automatically renew for one more year after your current commitment period expires.

        For pricing, see Amazon Web Services Private 5G Pricing.

        If you do not include commitmentConfiguration in the ActivateNetworkSiteRequest action, the commitment period is set to 60-days.

        " - }, - "networkSiteArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network site.

        " - }, - "shippingAddress":{ - "shape":"Address", - "documentation":"

        The shipping address of the network site.

        " - } - } - }, - "ActivateNetworkSiteResponse":{ - "type":"structure", - "members":{ - "networkSite":{ - "shape":"NetworkSite", - "documentation":"

        Information about the network site.

        " - } - } - }, - "Address":{ - "type":"structure", - "required":[ - "city", - "country", - "name", - "postalCode", - "stateOrProvince", - "street1" - ], - "members":{ - "city":{ - "shape":"AddressContent", - "documentation":"

        The city for this address.

        " - }, - "company":{ - "shape":"AddressContent", - "documentation":"

        The company name for this address.

        " - }, - "country":{ - "shape":"AddressContent", - "documentation":"

        The country for this address.

        " - }, - "emailAddress":{ - "shape":"AddressContent", - "documentation":"

        The recipient's email address.

        " - }, - "name":{ - "shape":"AddressContent", - "documentation":"

        The recipient's name for this address.

        " - }, - "phoneNumber":{ - "shape":"AddressContent", - "documentation":"

        The recipient's phone number.

        " - }, - "postalCode":{ - "shape":"AddressContent", - "documentation":"

        The postal code for this address.

        " - }, - "stateOrProvince":{ - "shape":"AddressContent", - "documentation":"

        The state or province for this address.

        " - }, - "street1":{ - "shape":"AddressContent", - "documentation":"

        The first line of the street address.

        " - }, - "street2":{ - "shape":"AddressContent", - "documentation":"

        The second line of the street address.

        " - }, - "street3":{ - "shape":"AddressContent", - "documentation":"

        The third line of the street address.

        " - } - }, - "documentation":"

        Information about an address.

        " - }, - "AddressContent":{ - "type":"string", - "max":1024, - "min":1, - "sensitive":true - }, - "Arn":{ - "type":"string", - "pattern":"^arn:aws:private-networks:[a-z0-9-]+:[^:]*:.*$" - }, - "Boolean":{ - "type":"boolean", - "box":true - }, - "ClientToken":{ - "type":"string", - "max":100, - "min":1 - }, - "CommitmentConfiguration":{ - "type":"structure", - "required":[ - "automaticRenewal", - "commitmentLength" - ], - "members":{ - "automaticRenewal":{ - "shape":"Boolean", - "documentation":"

        Determines whether the commitment period for a radio unit is set to automatically renew for an additional 1 year after your current commitment period expires.

        Set to True, if you want your commitment period to automatically renew. Set to False if you do not want your commitment to automatically renew.

        You can do the following:

        • Set a 1-year commitment to automatically renew for an additional 1 year. The hourly rate for the additional year will continue to be the same as your existing 1-year rate.

        • Set a 3-year commitment to automatically renew for an additional 1 year. The hourly rate for the additional year will continue to be the same as your existing 3-year rate.

        • Turn off a previously-enabled automatic renewal on a 1-year or 3-year commitment.

        You cannot use the automatic-renewal option for a 60-day commitment.

        " - }, - "commitmentLength":{ - "shape":"CommitmentLength", - "documentation":"

        The duration of the commitment period for the radio unit. You can choose a 60-day, 1-year, or 3-year period.

        " - } - }, - "documentation":"

        Determines the duration and renewal status of the commitment period for a radio unit.

        For pricing, see Amazon Web Services Private 5G Pricing.

        " - }, - "CommitmentInformation":{ - "type":"structure", - "required":["commitmentConfiguration"], - "members":{ - "commitmentConfiguration":{ - "shape":"CommitmentConfiguration", - "documentation":"

        The duration and renewal status of the commitment period for the radio unit.

        " - }, - "expiresOn":{ - "shape":"Timestamp", - "documentation":"

        The date and time that the commitment period ends. If you do not cancel or renew the commitment before the expiration date, you will be billed at the 60-day-commitment rate.

        " - }, - "startAt":{ - "shape":"Timestamp", - "documentation":"

        The date and time that the commitment period started.

        " - } - }, - "documentation":"

        Shows the duration, the date and time that the contract started and ends, and the renewal status of the commitment period for the radio unit.

        " - }, - "CommitmentLength":{ - "type":"string", - "enum":[ - "SIXTY_DAYS", - "ONE_YEAR", - "THREE_YEARS" - ] - }, - "ConfigureAccessPointRequest":{ - "type":"structure", - "required":["accessPointArn"], - "members":{ - "accessPointArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network resource.

        " - }, - "cpiSecretKey":{ - "shape":"ConfigureAccessPointRequestCpiSecretKeyString", - "documentation":"

        A Base64 encoded string of the CPI certificate associated with the CPI user who is certifying the coordinates of the network resource.

        " - }, - "cpiUserId":{ - "shape":"ConfigureAccessPointRequestCpiUserIdString", - "documentation":"

        The CPI user ID of the CPI user who is certifying the coordinates of the network resource.

        " - }, - "cpiUserPassword":{ - "shape":"ConfigureAccessPointRequestCpiUserPasswordString", - "documentation":"

        The CPI password associated with the CPI certificate in cpiSecretKey.

        " - }, - "cpiUsername":{ - "shape":"ConfigureAccessPointRequestCpiUsernameString", - "documentation":"

        The CPI user name of the CPI user who is certifying the coordinates of the radio unit.

        " - }, - "position":{ - "shape":"Position", - "documentation":"

        The position of the network resource.

        " - } - } - }, - "ConfigureAccessPointRequestCpiSecretKeyString":{ - "type":"string", - "max":100000, - "min":1, - "sensitive":true - }, - "ConfigureAccessPointRequestCpiUserIdString":{ - "type":"string", - "max":4096, - "min":1, - "sensitive":true - }, - "ConfigureAccessPointRequestCpiUserPasswordString":{ - "type":"string", - "max":4096, - "min":1, - "sensitive":true - }, - "ConfigureAccessPointRequestCpiUsernameString":{ - "type":"string", - "max":4096, - "min":1, - "sensitive":true - }, - "ConfigureAccessPointResponse":{ - "type":"structure", - "required":["accessPoint"], - "members":{ - "accessPoint":{ - "shape":"NetworkResource", - "documentation":"

        Information about the network resource.

        " - } - } - }, - "CreateNetworkRequest":{ - "type":"structure", - "required":["networkName"], - "members":{ - "clientToken":{ - "shape":"ClientToken", - "documentation":"

        Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

        " - }, - "description":{ - "shape":"Description", - "documentation":"

        The description of the network.

        " - }, - "networkName":{ - "shape":"Name", - "documentation":"

        The name of the network. You can't change the name after you create the network.

        " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

        The tags to apply to the network.

        " - } - } - }, - "CreateNetworkResponse":{ - "type":"structure", - "required":["network"], - "members":{ - "network":{ - "shape":"Network", - "documentation":"

        Information about the network.

        " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

        The network tags.

        " - } - } - }, - "CreateNetworkSiteRequest":{ - "type":"structure", - "required":[ - "networkArn", - "networkSiteName" - ], - "members":{ - "availabilityZone":{ - "shape":"String", - "documentation":"

        The Availability Zone that is the parent of this site. You can't change the Availability Zone after you create the site.

        " - }, - "availabilityZoneId":{ - "shape":"String", - "documentation":"

        The ID of the Availability Zone that is the parent of this site. You can't change the Availability Zone after you create the site.

        " - }, - "clientToken":{ - "shape":"ClientToken", - "documentation":"

        Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

        " - }, - "description":{ - "shape":"Description", - "documentation":"

        The description of the site.

        " - }, - "networkArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network.

        " - }, - "networkSiteName":{ - "shape":"Name", - "documentation":"

        The name of the site. You can't change the name after you create the site.

        " - }, - "pendingPlan":{ - "shape":"SitePlan", - "documentation":"

        Information about the pending plan for this site.

        " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

        The tags to apply to the network site.

        " - } - } - }, - "CreateNetworkSiteResponse":{ - "type":"structure", - "members":{ - "networkSite":{ - "shape":"NetworkSite", - "documentation":"

        Information about the network site.

        " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

        The network site tags.

        " - } - } - }, - "DeactivateDeviceIdentifierRequest":{ - "type":"structure", - "required":["deviceIdentifierArn"], - "members":{ - "clientToken":{ - "shape":"ClientToken", - "documentation":"

        Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

        " - }, - "deviceIdentifierArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the device identifier.

        " - } - } - }, - "DeactivateDeviceIdentifierResponse":{ - "type":"structure", - "required":["deviceIdentifier"], - "members":{ - "deviceIdentifier":{ - "shape":"DeviceIdentifier", - "documentation":"

        Information about the device identifier.

        " - } - } - }, - "DeleteNetworkRequest":{ - "type":"structure", - "required":["networkArn"], - "members":{ - "clientToken":{ - "shape":"ClientToken", - "documentation":"

        Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

        ", - "location":"querystring", - "locationName":"clientToken" - }, - "networkArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network.

        ", - "location":"uri", - "locationName":"networkArn" - } - } - }, - "DeleteNetworkResponse":{ - "type":"structure", - "required":["network"], - "members":{ - "network":{ - "shape":"Network", - "documentation":"

        Information about the network.

        " - } - } - }, - "DeleteNetworkSiteRequest":{ - "type":"structure", - "required":["networkSiteArn"], - "members":{ - "clientToken":{ - "shape":"ClientToken", - "documentation":"

        Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

        ", - "location":"querystring", - "locationName":"clientToken" - }, - "networkSiteArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network site.

        ", - "location":"uri", - "locationName":"networkSiteArn" - } - } - }, - "DeleteNetworkSiteResponse":{ - "type":"structure", - "members":{ - "networkSite":{ - "shape":"NetworkSite", - "documentation":"

        Information about the network site.

        " - } - } - }, - "Description":{ - "type":"string", - "max":100, - "min":0 - }, - "DeviceIdentifier":{ - "type":"structure", - "members":{ - "createdAt":{ - "shape":"Timestamp", - "documentation":"

        The creation time of this device identifier.

        " - }, - "deviceIdentifierArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the device identifier.

        " - }, - "iccid":{ - "shape":"String", - "documentation":"

        The Integrated Circuit Card Identifier of the device identifier.

        " - }, - "imsi":{ - "shape":"DeviceIdentifierImsiString", - "documentation":"

        The International Mobile Subscriber Identity of the device identifier.

        " - }, - "networkArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network on which the device identifier appears.

        " - }, - "orderArn":{ - "shape":"String", - "documentation":"

        The Amazon Resource Name (ARN) of the order used to purchase the device identifier.

        " - }, - "status":{ - "shape":"DeviceIdentifierStatus", - "documentation":"

        The status of the device identifier.

        " - }, - "trafficGroupArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the traffic group to which the device identifier belongs.

        " - }, - "vendor":{ - "shape":"String", - "documentation":"

        The vendor of the device identifier.

        " - } - }, - "documentation":"

        Information about a subscriber of a device that can use a network.

        " - }, - "DeviceIdentifierFilterKeys":{ - "type":"string", - "enum":[ - "STATUS", - "ORDER", - "TRAFFIC_GROUP" - ] - }, - "DeviceIdentifierFilterValues":{ - "type":"list", - "member":{"shape":"String"} - }, - "DeviceIdentifierFilters":{ - "type":"map", - "key":{"shape":"DeviceIdentifierFilterKeys"}, - "value":{"shape":"DeviceIdentifierFilterValues"} - }, - "DeviceIdentifierImsiString":{ - "type":"string", - "pattern":"^[0-9]{15}$", - "sensitive":true - }, - "DeviceIdentifierList":{ - "type":"list", - "member":{"shape":"DeviceIdentifier"} - }, - "DeviceIdentifierStatus":{ - "type":"string", - "enum":[ - "ACTIVE", - "INACTIVE" - ] - }, - "Double":{ - "type":"double", - "box":true - }, - "ElevationReference":{ - "type":"string", - "enum":[ - "AGL", - "AMSL" - ] - }, - "ElevationUnit":{ - "type":"string", - "enum":["FEET"] - }, - "GetDeviceIdentifierRequest":{ - "type":"structure", - "required":["deviceIdentifierArn"], - "members":{ - "deviceIdentifierArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the device identifier.

        ", - "location":"uri", - "locationName":"deviceIdentifierArn" - } - } - }, - "GetDeviceIdentifierResponse":{ - "type":"structure", - "members":{ - "deviceIdentifier":{ - "shape":"DeviceIdentifier", - "documentation":"

        Information about the device identifier.

        " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

        The device identifier tags.

        " - } - } - }, - "GetNetworkRequest":{ - "type":"structure", - "required":["networkArn"], - "members":{ - "networkArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network.

        ", - "location":"uri", - "locationName":"networkArn" - } - } - }, - "GetNetworkResourceRequest":{ - "type":"structure", - "required":["networkResourceArn"], - "members":{ - "networkResourceArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network resource.

        ", - "location":"uri", - "locationName":"networkResourceArn" - } - } - }, - "GetNetworkResourceResponse":{ - "type":"structure", - "required":["networkResource"], - "members":{ - "networkResource":{ - "shape":"NetworkResource", - "documentation":"

        Information about the network resource.

        " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

        The network resource tags.

        " - } - } - }, - "GetNetworkResponse":{ - "type":"structure", - "required":["network"], - "members":{ - "network":{ - "shape":"Network", - "documentation":"

        Information about the network.

        " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

        The network tags.

        " - } - } - }, - "GetNetworkSiteRequest":{ - "type":"structure", - "required":["networkSiteArn"], - "members":{ - "networkSiteArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network site.

        ", - "location":"uri", - "locationName":"networkSiteArn" - } - } - }, - "GetNetworkSiteResponse":{ - "type":"structure", - "members":{ - "networkSite":{ - "shape":"NetworkSite", - "documentation":"

        Information about the network site.

        " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

        The network site tags.

        " - } - } - }, - "GetOrderRequest":{ - "type":"structure", - "required":["orderArn"], - "members":{ - "orderArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the order.

        ", - "location":"uri", - "locationName":"orderArn" - } - } - }, - "GetOrderResponse":{ - "type":"structure", - "required":["order"], - "members":{ - "order":{ - "shape":"Order", - "documentation":"

        Information about the order.

        " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

        The order tags.

        " - } - } - }, - "HealthStatus":{ - "type":"string", - "enum":[ - "INITIAL", - "HEALTHY", - "UNHEALTHY" - ] - }, - "Integer":{ - "type":"integer", - "box":true - }, - "InternalServerException":{ - "type":"structure", - "required":["message"], - "members":{ - "message":{ - "shape":"String", - "documentation":"

        Description of the error.

        " - }, - "retryAfterSeconds":{ - "shape":"Integer", - "documentation":"

        Advice to clients on when the call can be safely retried.

        ", - "location":"header", - "locationName":"Retry-After" - } - }, - "documentation":"

        Information about an internal error.

        ", - "error":{"httpStatusCode":500}, - "exception":true, - "fault":true, - "retryable":{"throttling":false} - }, - "LimitExceededException":{ - "type":"structure", - "required":["message"], - "members":{ - "message":{"shape":"String"} - }, - "documentation":"

        The limit was exceeded.

        ", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "ListDeviceIdentifiersRequest":{ - "type":"structure", - "required":["networkArn"], - "members":{ - "filters":{ - "shape":"DeviceIdentifierFilters", - "documentation":"

        The filters.

        • ORDER - The Amazon Resource Name (ARN) of the order.

        • STATUS - The status (ACTIVE | INACTIVE).

        • TRAFFIC_GROUP - The Amazon Resource Name (ARN) of the traffic group.

        Filter values are case sensitive. If you specify multiple values for a filter, the values are joined with an OR, and the request returns all results that match any of the specified values.

        " - }, - "maxResults":{ - "shape":"ListDeviceIdentifiersRequestMaxResultsInteger", - "documentation":"

        The maximum number of results to return.

        " - }, - "networkArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network.

        " - }, - "startToken":{ - "shape":"PaginationToken", - "documentation":"

        The token for the next page of results.

        " - } - } - }, - "ListDeviceIdentifiersRequestMaxResultsInteger":{ - "type":"integer", - "box":true, - "max":20, - "min":1 - }, - "ListDeviceIdentifiersResponse":{ - "type":"structure", - "members":{ - "deviceIdentifiers":{ - "shape":"DeviceIdentifierList", - "documentation":"

        Information about the device identifiers.

        " - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

        The token for the next page of results.

        " - } - } - }, - "ListNetworkResourcesRequest":{ - "type":"structure", - "required":["networkArn"], - "members":{ - "filters":{ - "shape":"NetworkResourceFilters", - "documentation":"

        The filters.

        • ORDER - The Amazon Resource Name (ARN) of the order.

        • STATUS - The status (AVAILABLE | DELETED | DELETING | PENDING | PENDING_RETURN | PROVISIONING | SHIPPED).

        Filter values are case sensitive. If you specify multiple values for a filter, the values are joined with an OR, and the request returns all results that match any of the specified values.

        " - }, - "maxResults":{ - "shape":"ListNetworkResourcesRequestMaxResultsInteger", - "documentation":"

        The maximum number of results to return.

        " - }, - "networkArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network.

        " - }, - "startToken":{ - "shape":"PaginationToken", - "documentation":"

        The token for the next page of results.

        " - } - } - }, - "ListNetworkResourcesRequestMaxResultsInteger":{ - "type":"integer", - "box":true, - "max":20, - "min":1 - }, - "ListNetworkResourcesResponse":{ - "type":"structure", - "members":{ - "networkResources":{ - "shape":"NetworkResourceList", - "documentation":"

        Information about network resources.

        " - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

        The token for the next page of results.

        " - } - } - }, - "ListNetworkSitesRequest":{ - "type":"structure", - "required":["networkArn"], - "members":{ - "filters":{ - "shape":"NetworkSiteFilters", - "documentation":"

        The filters. Add filters to your request to return a more specific list of results. Use filters to match the status of the network sites.

        • STATUS - The status (AVAILABLE | CREATED | DELETED | DEPROVISIONING | PROVISIONING).

        Filter values are case sensitive. If you specify multiple values for a filter, the values are joined with an OR, and the request returns all results that match any of the specified values.

        " - }, - "maxResults":{ - "shape":"ListNetworkSitesRequestMaxResultsInteger", - "documentation":"

        The maximum number of results to return.

        " - }, - "networkArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network.

        " - }, - "startToken":{ - "shape":"PaginationToken", - "documentation":"

        The token for the next page of results.

        " - } - } - }, - "ListNetworkSitesRequestMaxResultsInteger":{ - "type":"integer", - "box":true, - "max":20, - "min":1 - }, - "ListNetworkSitesResponse":{ - "type":"structure", - "members":{ - "networkSites":{ - "shape":"NetworkSiteList", - "documentation":"

        Information about the network sites.

        " - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

        The token for the next page of results.

        " - } - } - }, - "ListNetworksRequest":{ - "type":"structure", - "members":{ - "filters":{ - "shape":"NetworkFilters", - "documentation":"

        The filters.

        • STATUS - The status (AVAILABLE | CREATED | DELETED | DEPROVISIONING | PROVISIONING).

        Filter values are case sensitive. If you specify multiple values for a filter, the values are joined with an OR, and the request returns all results that match any of the specified values.

        " - }, - "maxResults":{ - "shape":"ListNetworksRequestMaxResultsInteger", - "documentation":"

        The maximum number of results to return.

        " - }, - "startToken":{ - "shape":"PaginationToken", - "documentation":"

        The token for the next page of results.

        " - } - } - }, - "ListNetworksRequestMaxResultsInteger":{ - "type":"integer", - "box":true, - "max":20, - "min":1 - }, - "ListNetworksResponse":{ - "type":"structure", - "members":{ - "networks":{ - "shape":"NetworkList", - "documentation":"

        The networks.

        " - }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

        The token for the next page of results.

        " - } - } - }, - "ListOrdersRequest":{ - "type":"structure", - "required":["networkArn"], - "members":{ - "filters":{ - "shape":"OrderFilters", - "documentation":"

        The filters.

        • NETWORK_SITE - The Amazon Resource Name (ARN) of the network site.

        • STATUS - The status (ACKNOWLEDGING | ACKNOWLEDGED | UNACKNOWLEDGED).

        Filter values are case sensitive. If you specify multiple values for a filter, the values are joined with an OR, and the request returns all results that match any of the specified values.

        " - }, - "maxResults":{ - "shape":"ListOrdersRequestMaxResultsInteger", - "documentation":"

        The maximum number of results to return.

        " - }, - "networkArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network.

        " - }, - "startToken":{ - "shape":"PaginationToken", - "documentation":"

        The token for the next page of results.

        " - } - } - }, - "ListOrdersRequestMaxResultsInteger":{ - "type":"integer", - "box":true, - "max":20, - "min":1 - }, - "ListOrdersResponse":{ - "type":"structure", - "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

        The token for the next page of results.

        " - }, - "orders":{ - "shape":"OrderList", - "documentation":"

        Information about the orders.

        " - } - } - }, - "ListTagsForResourceRequest":{ - "type":"structure", - "required":["resourceArn"], - "members":{ - "resourceArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the resource.

        ", - "location":"uri", - "locationName":"resourceArn" - } - } - }, - "ListTagsForResourceResponse":{ - "type":"structure", - "members":{ - "tags":{ - "shape":"TagMap", - "documentation":"

        The resource tags.

        " - } - } - }, - "Name":{ - "type":"string", - "max":64, - "min":1, - "pattern":"^[0-9a-zA-Z-]*$" - }, - "NameValuePair":{ - "type":"structure", - "required":["name"], - "members":{ - "name":{ - "shape":"String", - "documentation":"

        The name of the pair.

        " - }, - "value":{ - "shape":"String", - "documentation":"

        The value of the pair.

        " - } - }, - "documentation":"

        Information about a name/value pair.

        " - }, - "NameValuePairs":{ - "type":"list", - "member":{"shape":"NameValuePair"} - }, - "Network":{ - "type":"structure", - "required":[ - "networkArn", - "networkName", - "status" - ], - "members":{ - "createdAt":{ - "shape":"Timestamp", - "documentation":"

        The creation time of the network.

        " - }, - "description":{ - "shape":"Description", - "documentation":"

        The description of the network.

        " - }, - "networkArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network.

        " - }, - "networkName":{ - "shape":"Name", - "documentation":"

        The name of the network.

        " - }, - "status":{ - "shape":"NetworkStatus", - "documentation":"

        The status of the network.

        " - }, - "statusReason":{ - "shape":"String", - "documentation":"

        The status reason of the network.

        " - } - }, - "documentation":"

        Information about a network.

        " - }, - "NetworkFilterKeys":{ - "type":"string", - "enum":["STATUS"] - }, - "NetworkFilterValues":{ - "type":"list", - "member":{"shape":"String"} - }, - "NetworkFilters":{ - "type":"map", - "key":{"shape":"NetworkFilterKeys"}, - "value":{"shape":"NetworkFilterValues"} - }, - "NetworkList":{ - "type":"list", - "member":{"shape":"Network"} - }, - "NetworkResource":{ - "type":"structure", - "members":{ - "attributes":{ - "shape":"NameValuePairs", - "documentation":"

        The attributes of the network resource.

        " - }, - "commitmentInformation":{ - "shape":"CommitmentInformation", - "documentation":"

        Information about the commitment period for the radio unit. Shows the duration, the date and time that the contract started and ends, and the renewal status of the commitment period.

        " - }, - "createdAt":{ - "shape":"Timestamp", - "documentation":"

        The creation time of the network resource.

        " - }, - "description":{ - "shape":"Description", - "documentation":"

        The description of the network resource.

        " - }, - "health":{ - "shape":"HealthStatus", - "documentation":"

        The health of the network resource.

        " - }, - "model":{ - "shape":"String", - "documentation":"

        The model of the network resource.

        " - }, - "networkArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network on which this network resource appears.

        " - }, - "networkResourceArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network resource.

        " - }, - "networkSiteArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network site on which this network resource appears.

        " - }, - "orderArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the order used to purchase this network resource.

        " - }, - "position":{ - "shape":"Position", - "documentation":"

        The position of the network resource.

        " - }, - "returnInformation":{ - "shape":"ReturnInformation", - "documentation":"

        Information about a request to return the network resource.

        " - }, - "serialNumber":{ - "shape":"String", - "documentation":"

        The serial number of the network resource.

        " - }, - "status":{ - "shape":"NetworkResourceStatus", - "documentation":"

        The status of the network resource.

        " - }, - "statusReason":{ - "shape":"String", - "documentation":"

        The status reason of the network resource.

        " - }, - "type":{ - "shape":"NetworkResourceType", - "documentation":"

        The type of the network resource.

        " - }, - "vendor":{ - "shape":"String", - "documentation":"

        The vendor of the network resource.

        " - } - }, - "documentation":"

        Information about a network resource.

        " - }, - "NetworkResourceDefinition":{ - "type":"structure", - "required":[ - "count", - "type" - ], - "members":{ - "count":{ - "shape":"NetworkResourceDefinitionCountInteger", - "documentation":"

        The count in the network resource definition.

        " - }, - "options":{ - "shape":"Options", - "documentation":"

        The options in the network resource definition.

        " - }, - "type":{ - "shape":"NetworkResourceDefinitionType", - "documentation":"

        The type in the network resource definition.

        " - } - }, - "documentation":"

        Information about a network resource definition.

        " - }, - "NetworkResourceDefinitionCountInteger":{ - "type":"integer", - "box":true, - "min":0 - }, - "NetworkResourceDefinitionType":{ - "type":"string", - "enum":[ - "RADIO_UNIT", - "DEVICE_IDENTIFIER" - ] - }, - "NetworkResourceDefinitions":{ - "type":"list", - "member":{"shape":"NetworkResourceDefinition"} - }, - "NetworkResourceFilterKeys":{ - "type":"string", - "enum":[ - "ORDER", - "STATUS" - ] - }, - "NetworkResourceFilterValues":{ - "type":"list", - "member":{"shape":"String"} - }, - "NetworkResourceFilters":{ - "type":"map", - "key":{"shape":"NetworkResourceFilterKeys"}, - "value":{"shape":"NetworkResourceFilterValues"} - }, - "NetworkResourceList":{ - "type":"list", - "member":{"shape":"NetworkResource"} - }, - "NetworkResourceStatus":{ - "type":"string", - "enum":[ - "PENDING", - "SHIPPED", - "PROVISIONING", - "PROVISIONED", - "AVAILABLE", - "DELETING", - "PENDING_RETURN", - "DELETED", - "CREATING_SHIPPING_LABEL" - ] - }, - "NetworkResourceType":{ - "type":"string", - "enum":["RADIO_UNIT"] - }, - "NetworkSite":{ - "type":"structure", - "required":[ - "networkArn", - "networkSiteArn", - "networkSiteName", - "status" - ], - "members":{ - "availabilityZone":{ - "shape":"String", - "documentation":"

        The parent Availability Zone for the network site.

        " - }, - "availabilityZoneId":{ - "shape":"String", - "documentation":"

        The parent Availability Zone ID for the network site.

        " - }, - "createdAt":{ - "shape":"Timestamp", - "documentation":"

        The creation time of the network site.

        " - }, - "currentPlan":{ - "shape":"SitePlan", - "documentation":"

        The current plan of the network site.

        " - }, - "description":{ - "shape":"Description", - "documentation":"

        The description of the network site.

        " - }, - "networkArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network to which the network site belongs.

        " - }, - "networkSiteArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network site.

        " - }, - "networkSiteName":{ - "shape":"Name", - "documentation":"

        The name of the network site.

        " - }, - "pendingPlan":{ - "shape":"SitePlan", - "documentation":"

        The pending plan of the network site.

        " - }, - "status":{ - "shape":"NetworkSiteStatus", - "documentation":"

        The status of the network site.

        " - }, - "statusReason":{ - "shape":"String", - "documentation":"

        The status reason of the network site.

        " - } - }, - "documentation":"

        Information about a network site.

        " - }, - "NetworkSiteFilterKeys":{ - "type":"string", - "enum":["STATUS"] - }, - "NetworkSiteFilterValues":{ - "type":"list", - "member":{"shape":"String"} - }, - "NetworkSiteFilters":{ - "type":"map", - "key":{"shape":"NetworkSiteFilterKeys"}, - "value":{"shape":"NetworkSiteFilterValues"} - }, - "NetworkSiteList":{ - "type":"list", - "member":{"shape":"NetworkSite"} - }, - "NetworkSiteStatus":{ - "type":"string", - "enum":[ - "CREATED", - "PROVISIONING", - "AVAILABLE", - "DEPROVISIONING", - "DELETED" - ] - }, - "NetworkStatus":{ - "type":"string", - "enum":[ - "CREATED", - "PROVISIONING", - "AVAILABLE", - "DEPROVISIONING", - "DELETED" - ] - }, - "Options":{ - "type":"list", - "member":{"shape":"NameValuePair"} - }, - "Order":{ - "type":"structure", - "members":{ - "acknowledgmentStatus":{ - "shape":"AcknowledgmentStatus", - "documentation":"

        The acknowledgement status of the order.

        " - }, - "createdAt":{ - "shape":"Timestamp", - "documentation":"

        The creation time of the order.

        " - }, - "networkArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network associated with this order.

        " - }, - "networkSiteArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network site associated with this order.

        " - }, - "orderArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the order.

        " - }, - "orderedResources":{ - "shape":"OrderedResourceDefinitions", - "documentation":"

        A list of the network resources placed in the order.

        " - }, - "shippingAddress":{ - "shape":"Address", - "documentation":"

        The shipping address of the order.

        " - }, - "trackingInformation":{ - "shape":"TrackingInformationList", - "documentation":"

        The tracking information of the order.

        " - } - }, - "documentation":"

        Information about an order.

        " - }, - "OrderFilterKeys":{ - "type":"string", - "enum":[ - "STATUS", - "NETWORK_SITE" - ] - }, - "OrderFilterValues":{ - "type":"list", - "member":{"shape":"String"} - }, - "OrderFilters":{ - "type":"map", - "key":{"shape":"OrderFilterKeys"}, - "value":{"shape":"OrderFilterValues"} - }, - "OrderList":{ - "type":"list", - "member":{"shape":"Order"} - }, - "OrderedResourceDefinition":{ - "type":"structure", - "required":[ - "count", - "type" - ], - "members":{ - "commitmentConfiguration":{ - "shape":"CommitmentConfiguration", - "documentation":"

        The duration and renewal status of the commitment period for each radio unit in the order. Does not show details if the resource type is DEVICE_IDENTIFIER.

        " - }, - "count":{ - "shape":"OrderedResourceDefinitionCountInteger", - "documentation":"

        The number of network resources in the order.

        " - }, - "type":{ - "shape":"NetworkResourceDefinitionType", - "documentation":"

        The type of network resource in the order.

        " - } - }, - "documentation":"

        Details of the network resources in the order.

        " - }, - "OrderedResourceDefinitionCountInteger":{ - "type":"integer", - "box":true, - "min":0 - }, - "OrderedResourceDefinitions":{ - "type":"list", - "member":{"shape":"OrderedResourceDefinition"} - }, - "PaginationToken":{"type":"string"}, - "PingResponse":{ - "type":"structure", - "members":{ - "status":{ - "shape":"String", - "documentation":"

        Information about the health of the service.

        " - } - } - }, - "Position":{ - "type":"structure", - "members":{ - "elevation":{ - "shape":"Double", - "documentation":"

        The elevation of the equipment at this position.

        " - }, - "elevationReference":{ - "shape":"ElevationReference", - "documentation":"

        The reference point from which elevation is reported.

        " - }, - "elevationUnit":{ - "shape":"ElevationUnit", - "documentation":"

        The units used to measure the elevation of the position.

        " - }, - "latitude":{ - "shape":"Double", - "documentation":"

        The latitude of the position.

        " - }, - "longitude":{ - "shape":"Double", - "documentation":"

        The longitude of the position.

        " - } - }, - "documentation":"

        Information about a position.

        " - }, - "ResourceNotFoundException":{ - "type":"structure", - "required":[ - "message", - "resourceId", - "resourceType" - ], - "members":{ - "message":{ - "shape":"String", - "documentation":"

        Description of the error.

        " - }, - "resourceId":{ - "shape":"String", - "documentation":"

        Identifier of the affected resource.

        " - }, - "resourceType":{ - "shape":"String", - "documentation":"

        Type of the affected resource.

        " - } - }, - "documentation":"

        The resource was not found.

        ", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "ReturnInformation":{ - "type":"structure", - "members":{ - "replacementOrderArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the replacement order.

        " - }, - "returnReason":{ - "shape":"String", - "documentation":"

        The reason for the return. If the return request did not include a reason for the return, this value is null.

        " - }, - "shippingAddress":{ - "shape":"Address", - "documentation":"

        The shipping address.

        " - }, - "shippingLabel":{ - "shape":"String", - "documentation":"

        The URL of the shipping label. The shipping label is available for download only if the status of the network resource is PENDING_RETURN. For more information, see Return a radio unit.

        " - } - }, - "documentation":"

        Information about a request to return a network resource.

        " - }, - "SitePlan":{ - "type":"structure", - "members":{ - "options":{ - "shape":"Options", - "documentation":"

        The options of the plan.

        " - }, - "resourceDefinitions":{ - "shape":"NetworkResourceDefinitions", - "documentation":"

        The resource definitions of the plan.

        " - } - }, - "documentation":"

        Information about a site plan.

        " - }, - "StartNetworkResourceUpdateRequest":{ - "type":"structure", - "required":[ - "networkResourceArn", - "updateType" - ], - "members":{ - "commitmentConfiguration":{ - "shape":"CommitmentConfiguration", - "documentation":"

        Use this action to extend and automatically renew the commitment period for the radio unit. You can do the following:

        • Change a 60-day commitment to a 1-year or 3-year commitment. The change is immediate and the hourly rate decreases to the rate for the new commitment period.

        • Change a 1-year commitment to a 3-year commitment. The change is immediate and the hourly rate decreases to the rate for the 3-year commitment period.

        • Set a 1-year commitment to automatically renew for an additional 1 year. The hourly rate for the additional year will continue to be the same as your existing 1-year rate.

        • Set a 3-year commitment to automatically renew for an additional 1 year. The hourly rate for the additional year will continue to be the same as your existing 3-year rate.

        • Turn off a previously-enabled automatic renewal on a 1-year or 3-year commitment. You cannot use the automatic-renewal option for a 60-day commitment.

        For pricing, see Amazon Web Services Private 5G Pricing.

        " - }, - "networkResourceArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network resource.

        " - }, - "returnReason":{ - "shape":"StartNetworkResourceUpdateRequestReturnReasonString", - "documentation":"

        The reason for the return. Providing a reason for a return is optional.

        " - }, - "shippingAddress":{ - "shape":"Address", - "documentation":"

        The shipping address. If you don't provide a shipping address when replacing or returning a network resource, we use the address from the original order for the network resource.

        " - }, - "updateType":{ - "shape":"UpdateType", - "documentation":"

        The update type.

        • REPLACE - Submits a request to replace a defective radio unit. We provide a shipping label that you can use for the return process and we ship a replacement radio unit to you.

        • RETURN - Submits a request to return a radio unit that you no longer need. We provide a shipping label that you can use for the return process.

        • COMMITMENT - Submits a request to change or renew the commitment period. If you choose this value, then you must set commitmentConfiguration .

        " - } - } - }, - "StartNetworkResourceUpdateRequestReturnReasonString":{ - "type":"string", - "max":1000, - "min":0 - }, - "StartNetworkResourceUpdateResponse":{ - "type":"structure", - "members":{ - "networkResource":{ - "shape":"NetworkResource", - "documentation":"

        The network resource.

        " - } - } - }, - "String":{"type":"string"}, - "TagKey":{ - "type":"string", - "max":128, - "min":1, - "pattern":"^(?!aws:)[^\\x00-\\x1f\\x22]+$" - }, - "TagKeyList":{ - "type":"list", - "member":{"shape":"TagKey"}, - "max":50, - "min":1, - "sensitive":true - }, - "TagMap":{ - "type":"map", - "key":{"shape":"TagKey"}, - "value":{"shape":"TagValue"}, - "max":50, - "min":1, - "sensitive":true - }, - "TagResourceRequest":{ - "type":"structure", - "required":[ - "resourceArn", - "tags" - ], - "members":{ - "resourceArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the resource.

        ", - "location":"uri", - "locationName":"resourceArn" - }, - "tags":{ - "shape":"TagMap", - "documentation":"

        The tags to add to the resource.

        " - } - } - }, - "TagResourceResponse":{ - "type":"structure", - "members":{ - } - }, - "TagValue":{ - "type":"string", - "max":256, - "min":0, - "pattern":"^[^\\x00-\\x1f\\x22]*$" - }, - "ThrottlingException":{ - "type":"structure", - "members":{ - "message":{"shape":"String"} - }, - "documentation":"

        The request was denied due to request throttling.

        ", - "error":{ - "httpStatusCode":429, - "senderFault":true - }, - "exception":true, - "retryable":{"throttling":true} - }, - "Timestamp":{ - "type":"timestamp", - "timestampFormat":"iso8601" - }, - "TrackingInformation":{ - "type":"structure", - "members":{ - "trackingNumber":{ - "shape":"String", - "documentation":"

        The tracking number of the shipment.

        " - } - }, - "documentation":"

        Information about tracking a shipment.

        " - }, - "TrackingInformationList":{ - "type":"list", - "member":{"shape":"TrackingInformation"} - }, - "UntagResourceRequest":{ - "type":"structure", - "required":[ - "resourceArn", - "tagKeys" - ], - "members":{ - "resourceArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the resource.

        ", - "location":"uri", - "locationName":"resourceArn" - }, - "tagKeys":{ - "shape":"TagKeyList", - "documentation":"

        The tag keys.

        ", - "location":"querystring", - "locationName":"tagKeys" - } - } - }, - "UntagResourceResponse":{ - "type":"structure", - "members":{ - } - }, - "UpdateNetworkSitePlanRequest":{ - "type":"structure", - "required":[ - "networkSiteArn", - "pendingPlan" - ], - "members":{ - "clientToken":{ - "shape":"ClientToken", - "documentation":"

        Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

        " - }, - "networkSiteArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network site.

        " - }, - "pendingPlan":{ - "shape":"SitePlan", - "documentation":"

        The pending plan.

        " - } - } - }, - "UpdateNetworkSiteRequest":{ - "type":"structure", - "required":["networkSiteArn"], - "members":{ - "clientToken":{ - "shape":"ClientToken", - "documentation":"

        Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

        " - }, - "description":{ - "shape":"Description", - "documentation":"

        The description.

        " - }, - "networkSiteArn":{ - "shape":"Arn", - "documentation":"

        The Amazon Resource Name (ARN) of the network site.

        " - } - } - }, - "UpdateNetworkSiteResponse":{ - "type":"structure", - "members":{ - "networkSite":{ - "shape":"NetworkSite", - "documentation":"

        Information about the network site.

        " - }, - "tags":{ - "shape":"TagMap", - "documentation":"

        The network site tags.

        " - } - } - }, - "UpdateType":{ - "type":"string", - "enum":[ - "REPLACE", - "RETURN", - "COMMITMENT" - ] - }, - "ValidationException":{ - "type":"structure", - "required":[ - "message", - "reason" - ], - "members":{ - "fieldList":{ - "shape":"ValidationExceptionFieldList", - "documentation":"

        The list of fields that caused the error, if applicable.

        " - }, - "message":{ - "shape":"String", - "documentation":"

        Description of the error.

        " - }, - "reason":{ - "shape":"ValidationExceptionReason", - "documentation":"

        Reason the request failed validation.

        " - } - }, - "documentation":"

        The request failed validation.

        ", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "ValidationExceptionField":{ - "type":"structure", - "required":[ - "message", - "name" - ], - "members":{ - "message":{ - "shape":"String", - "documentation":"

        The message about the validation failure.

        " - }, - "name":{ - "shape":"String", - "documentation":"

        The field name that failed validation.

        " - } - }, - "documentation":"

        Information about a field that failed validation.

        " - }, - "ValidationExceptionFieldList":{ - "type":"list", - "member":{"shape":"ValidationExceptionField"} - }, - "ValidationExceptionReason":{ - "type":"string", - "enum":[ - "UNKNOWN_OPERATION", - "CANNOT_PARSE", - "CANNOT_ASSUME_ROLE", - "FIELD_VALIDATION_FAILED", - "OTHER" - ] - } - }, - "documentation":"

        Amazon Web Services Private 5G is a managed service that makes it easy to deploy, operate, and scale your own private mobile network at your on-premises location. Private 5G provides the pre-configured hardware and software for mobile networks, helps automate setup, and scales capacity on demand to support additional devices as needed.

        " -} diff --git a/services/proton/pom.xml b/services/proton/pom.xml index b1050b478853..7214d0eeae7f 100644 --- a/services/proton/pom.xml +++ b/services/proton/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT proton AWS Java SDK :: Services :: Proton diff --git a/services/proton/src/main/resources/codegen-resources/customization.config b/services/proton/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/proton/src/main/resources/codegen-resources/customization.config +++ b/services/proton/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/qapps/pom.xml b/services/qapps/pom.xml index 5803f6c2739e..73668ab8f1a5 100644 --- a/services/qapps/pom.xml +++ b/services/qapps/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT qapps AWS Java SDK :: Services :: Q Apps diff --git a/services/qapps/src/main/resources/codegen-resources/customization.config b/services/qapps/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/qapps/src/main/resources/codegen-resources/customization.config +++ b/services/qapps/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/qbusiness/pom.xml b/services/qbusiness/pom.xml index afc8a8c1b58a..32c081b38fd9 100644 --- a/services/qbusiness/pom.xml +++ b/services/qbusiness/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT qbusiness AWS Java SDK :: Services :: Q Business diff --git a/services/qbusiness/src/main/resources/codegen-resources/customization.config b/services/qbusiness/src/main/resources/codegen-resources/customization.config index 6424907d3404..f754ccb8740d 100644 --- a/services/qbusiness/src/main/resources/codegen-resources/customization.config +++ b/services/qbusiness/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "enableGenerateCompiledEndpointRules": true, - "usePriorKnowledgeForH2": true, - "enableFastUnmarshaller": true + "usePriorKnowledgeForH2": true } diff --git a/services/qbusiness/src/main/resources/codegen-resources/paginators-1.json b/services/qbusiness/src/main/resources/codegen-resources/paginators-1.json index 5d1f84682682..5b8489e98502 100644 --- a/services/qbusiness/src/main/resources/codegen-resources/paginators-1.json +++ b/services/qbusiness/src/main/resources/codegen-resources/paginators-1.json @@ -18,6 +18,12 @@ "limit_key": "maxResults", "result_key": "attachments" }, + "ListChatResponseConfigurations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "chatResponseConfigurations" + }, "ListConversations": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/qbusiness/src/main/resources/codegen-resources/service-2.json b/services/qbusiness/src/main/resources/codegen-resources/service-2.json index 0212d9ca32d0..ae893a55095e 100644 --- a/services/qbusiness/src/main/resources/codegen-resources/service-2.json +++ b/services/qbusiness/src/main/resources/codegen-resources/service-2.json @@ -192,6 +192,27 @@ "documentation":"

        Creates an Amazon Q Business application.

        There are new tiers for Amazon Q Business. Not all features in Amazon Q Business Pro are also available in Amazon Q Business Lite. For information on what's included in Amazon Q Business Lite and what's included in Amazon Q Business Pro, see Amazon Q Business tiers. You must use the Amazon Q Business console to assign subscription tiers to users.

        An Amazon Q Apps service linked role will be created if it's absent in the Amazon Web Services account when QAppsConfiguration is enabled in the request. For more information, see Using service-linked roles for Q Apps.

        When you create an application, Amazon Q Business may securely transmit data for processing from your selected Amazon Web Services region, but within your geography. For more information, see Cross region inference in Amazon Q Business.

        ", "idempotent":true }, + "CreateChatResponseConfiguration":{ + "name":"CreateChatResponseConfiguration", + "http":{ + "method":"POST", + "requestUri":"/applications/{applicationId}/chatresponseconfigurations", + "responseCode":200 + }, + "input":{"shape":"CreateChatResponseConfigurationRequest"}, + "output":{"shape":"CreateChatResponseConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

        Creates a new chat response configuration for an Amazon Q Business application. This operation establishes a set of parameters that define how the system generates and formats responses to user queries in chat interactions.

        ", + "idempotent":true + }, "CreateDataAccessor":{ "name":"CreateDataAccessor", "http":{ @@ -415,6 +436,26 @@ "documentation":"

        Deletes chat controls configured for an existing Amazon Q Business application.

        ", "idempotent":true }, + "DeleteChatResponseConfiguration":{ + "name":"DeleteChatResponseConfiguration", + "http":{ + "method":"DELETE", + "requestUri":"/applications/{applicationId}/chatresponseconfigurations/{chatResponseConfigurationId}", + "responseCode":200 + }, + "input":{"shape":"DeleteChatResponseConfigurationRequest"}, + "output":{"shape":"DeleteChatResponseConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

        Deletes a specified chat response configuration from an Amazon Q Business application.

        ", + "idempotent":true + }, "DeleteConversation":{ "name":"DeleteConversation", "http":{ @@ -651,6 +692,24 @@ ], "documentation":"

        Gets information about chat controls configured for an existing Amazon Q Business application.

        " }, + "GetChatResponseConfiguration":{ + "name":"GetChatResponseConfiguration", + "http":{ + "method":"GET", + "requestUri":"/applications/{applicationId}/chatresponseconfigurations/{chatResponseConfigurationId}", + "responseCode":200 + }, + "input":{"shape":"GetChatResponseConfigurationRequest"}, + "output":{"shape":"GetChatResponseConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

        Retrieves detailed information about a specific chat response configuration from an Amazon Q Business application. This operation returns the complete configuration settings and metadata.

        " + }, "GetDataAccessor":{ "name":"GetDataAccessor", "http":{ @@ -871,6 +930,24 @@ ], "documentation":"

        Gets a list of attachments associated with an Amazon Q Business web experience or a list of attachements associated with a specific Amazon Q Business conversation.

        " }, + "ListChatResponseConfigurations":{ + "name":"ListChatResponseConfigurations", + "http":{ + "method":"GET", + "requestUri":"/applications/{applicationId}/chatresponseconfigurations", + "responseCode":200 + }, + "input":{"shape":"ListChatResponseConfigurationsRequest"}, + "output":{"shape":"ListChatResponseConfigurationsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

        Retrieves a list of all chat response configurations available in a specified Amazon Q Business application. This operation returns summary information about each configuration to help administrators manage and select appropriate response settings.

        " + }, "ListConversations":{ "name":"ListConversations", "http":{ @@ -1338,6 +1415,26 @@ "documentation":"

        Updates a set of chat controls configured for an existing Amazon Q Business application.

        ", "idempotent":true }, + "UpdateChatResponseConfiguration":{ + "name":"UpdateChatResponseConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/applications/{applicationId}/chatresponseconfigurations/{chatResponseConfigurationId}", + "responseCode":200 + }, + "input":{"shape":"UpdateChatResponseConfigurationRequest"}, + "output":{"shape":"UpdateChatResponseConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

        Updates an existing chat response configuration in an Amazon Q Business application. This operation allows administrators to modify configuration settings, display name, and response parameters to refine how the system generates responses.

        ", + "idempotent":true + }, "UpdateDataAccessor":{ "name":"UpdateDataAccessor", "http":{ @@ -1966,6 +2063,10 @@ "shape":"QIamActions", "documentation":"

        The list of Amazon Q Business actions that the ISV is allowed to perform.

        " }, + "conditions":{ + "shape":"PermissionConditions", + "documentation":"

        The conditions that restrict when the permission is effective. These conditions can be used to limit the permission based on specific attributes of the request.

        " + }, "principal":{ "shape":"PrincipalRoleArn", "documentation":"

        The Amazon Resource Name of the IAM role for the ISV that is being granted permission.

        " @@ -2710,6 +2811,94 @@ "documentation":"

        The streaming output for the Chat API.

        ", "eventstream":true }, + "ChatResponseConfiguration":{ + "type":"structure", + "required":[ + "chatResponseConfigurationId", + "chatResponseConfigurationArn", + "displayName", + "status" + ], + "members":{ + "chatResponseConfigurationId":{ + "shape":"ChatResponseConfigurationId", + "documentation":"

        A unique identifier for your chat response configuration settings, used to reference and manage the configuration within the Amazon Q Business service.

        " + }, + "chatResponseConfigurationArn":{ + "shape":"ChatResponseConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the chat response configuration, which uniquely identifies the resource across all Amazon Web Services services and accounts.

        " + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"

        A human-readable name for the chat response configuration, making it easier to identify and manage multiple configurations within an organization.

        " + }, + "responseConfigurationSummary":{ + "shape":"ResponseConfigurationSummary", + "documentation":"

        A summary of the response configuration settings, providing a concise overview of the key parameters that define how responses are generated and formatted.

        " + }, + "status":{ + "shape":"ChatResponseConfigurationStatus", + "documentation":"

        The current status of the chat response configuration, indicating whether it is active, pending, or in another state that affects its availability for use in chat interactions.

        " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp indicating when the chat response configuration was initially created, useful for tracking the lifecycle of configuration resources.

        " + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp indicating when the chat response configuration was last modified, helping administrators track changes and maintain version awareness.

        " + } + }, + "documentation":"

        Configuration details that define how Amazon Q Business generates and formats responses to user queries in chat interactions. This configuration allows administrators to customize response characteristics to meet specific organizational needs and communication standards.

        " + }, + "ChatResponseConfigurationArn":{ + "type":"string", + "max":1284, + "min":1, + "pattern":"arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}" + }, + "ChatResponseConfigurationDetail":{ + "type":"structure", + "members":{ + "responseConfigurations":{ + "shape":"ResponseConfigurations", + "documentation":"

        A collection of specific response configuration settings that collectively define how responses are generated, formatted, and presented to users in chat interactions.

        " + }, + "responseConfigurationSummary":{ + "shape":"String", + "documentation":"

        A summary of the response configuration details, providing a concise overview of the key parameters and settings that define the response generation behavior.

        " + }, + "status":{ + "shape":"ChatResponseConfigurationStatus", + "documentation":"

        The current status of the chat response configuration, indicating whether it is active, pending, or in another state that affects its availability for use.

        " + }, + "error":{"shape":"ErrorDetail"}, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp indicating when the detailed chat response configuration was last modified, helping administrators track changes and maintain version awareness.

        " + } + }, + "documentation":"

        Detailed information about a chat response configuration, including comprehensive settings and parameters that define how Amazon Q Business generates and formats responses.

        " + }, + "ChatResponseConfigurationId":{ + "type":"string", + "max":36, + "min":36, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9-]{35}" + }, + "ChatResponseConfigurationStatus":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "FAILED", + "ACTIVE" + ] + }, + "ChatResponseConfigurations":{ + "type":"list", + "member":{"shape":"ChatResponseConfiguration"} + }, "ChatSyncInput":{ "type":"structure", "required":["applicationId"], @@ -3152,6 +3341,56 @@ } } }, + "CreateChatResponseConfigurationRequest":{ + "type":"structure", + "required":[ + "applicationId", + "displayName", + "responseConfigurations" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"

        The unique identifier of the Amazon Q Business application for which to create the new chat response configuration.

        ", + "location":"uri", + "locationName":"applicationId" + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"

        A human-readable name for the new chat response configuration, making it easier to identify and manage among multiple configurations.

        " + }, + "clientToken":{ + "shape":"String", + "documentation":"

        A unique, case-sensitive identifier to ensure idempotency of the request. This helps prevent the same configuration from being created multiple times if retries occur.

        ", + "idempotencyToken":true + }, + "responseConfigurations":{ + "shape":"ResponseConfigurations", + "documentation":"

        A collection of response configuration settings that define how Amazon Q Business will generate and format responses to user queries in chat interactions.

        " + }, + "tags":{ + "shape":"Tags", + "documentation":"

        A list of key-value pairs to apply as tags to the new chat response configuration, enabling categorization and management of resources across Amazon Web Services services.

        " + } + } + }, + "CreateChatResponseConfigurationResponse":{ + "type":"structure", + "required":[ + "chatResponseConfigurationId", + "chatResponseConfigurationArn" + ], + "members":{ + "chatResponseConfigurationId":{ + "shape":"ChatResponseConfigurationId", + "documentation":"

        The unique identifier assigned to a newly created chat response configuration, used for subsequent operations on this resource.

        " + }, + "chatResponseConfigurationArn":{ + "shape":"ChatResponseConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the newly created chat response configuration, which uniquely identifies the resource across all Amazon Web Services services.

        " + } + } + }, "CreateDataAccessorRequest":{ "type":"structure", "required":[ @@ -3184,6 +3423,10 @@ "shape":"DataAccessorName", "documentation":"

        A friendly name for the data accessor.

        " }, + "authenticationDetail":{ + "shape":"DataAccessorAuthenticationDetail", + "documentation":"

        The authentication configuration details for the data accessor. This specifies how the ISV will authenticate when accessing data through this data accessor.

        " + }, "tags":{ "shape":"Tags", "documentation":"

        The tags to associate with the data accessor.

        " @@ -3638,8 +3881,7 @@ "type":"structure", "required":[ "description", - "apiSchemaType", - "apiSchema" + "apiSchemaType" ], "members":{ "description":{ @@ -3702,6 +3944,10 @@ "shape":"PrincipalRoleArn", "documentation":"

        The Amazon Resource Name (ARN) of the IAM role for the ISV associated with this data accessor.

        " }, + "authenticationDetail":{ + "shape":"DataAccessorAuthenticationDetail", + "documentation":"

        The authentication configuration details for the data accessor. This specifies how the ISV authenticates when accessing data through this data accessor.

        " + }, "createdAt":{ "shape":"Timestamp", "documentation":"

        The timestamp when the data accessor was created.

        " @@ -3719,12 +3965,73 @@ "min":0, "pattern":"arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}" }, + "DataAccessorAuthenticationConfiguration":{ + "type":"structure", + "members":{ + "idcTrustedTokenIssuerConfiguration":{ + "shape":"DataAccessorIdcTrustedTokenIssuerConfiguration", + "documentation":"

        Configuration for IAM Identity Center Trusted Token Issuer (TTI) authentication used when the authentication type is AWS_IAM_IDC_TTI.

        " + } + }, + "documentation":"

        A union type that contains the specific authentication configuration based on the authentication type selected.

        ", + "union":true + }, + "DataAccessorAuthenticationDetail":{ + "type":"structure", + "required":["authenticationType"], + "members":{ + "authenticationType":{ + "shape":"DataAccessorAuthenticationType", + "documentation":"

        The type of authentication to use for the data accessor. This determines how the ISV authenticates when accessing data. You can use one of two authentication types:

        • AWS_IAM_IDC_TTI - Authentication using IAM Identity Center Trusted Token Issuer (TTI). This authentication type allows the ISV to use a trusted token issuer to generate tokens for accessing the data.

        • AWS_IAM_IDC_AUTH_CODE - Authentication using IAM Identity Center authorization code flow. This authentication type uses the standard OAuth 2.0 authorization code flow for authentication.

        " + }, + "authenticationConfiguration":{ + "shape":"DataAccessorAuthenticationConfiguration", + "documentation":"

        The specific authentication configuration based on the authentication type.

        " + }, + "externalIds":{ + "shape":"DataAccessorExternalIds", + "documentation":"

        A list of external identifiers associated with this authentication configuration. These are used to correlate the data accessor with external systems.

        " + } + }, + "documentation":"

        Contains the authentication configuration details for a data accessor. This structure defines how the ISV authenticates when accessing data through the data accessor.

        " + }, + "DataAccessorAuthenticationType":{ + "type":"string", + "documentation":"

        The type of authentication mechanism used by the data accessor.

        ", + "enum":[ + "AWS_IAM_IDC_TTI", + "AWS_IAM_IDC_AUTH_CODE" + ] + }, + "DataAccessorExternalId":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "DataAccessorExternalIds":{ + "type":"list", + "member":{"shape":"DataAccessorExternalId"}, + "max":1, + "min":1 + }, "DataAccessorId":{ "type":"string", "max":36, "min":36, "pattern":"[a-zA-Z0-9][a-zA-Z0-9-]{35}" }, + "DataAccessorIdcTrustedTokenIssuerConfiguration":{ + "type":"structure", + "required":["idcTrustedTokenIssuerArn"], + "members":{ + "idcTrustedTokenIssuerArn":{ + "shape":"IdcTrustedTokenIssuerArn", + "documentation":"

        The Amazon Resource Name (ARN) of the IAM Identity Center Trusted Token Issuer that will be used for authentication.

        " + } + }, + "documentation":"

        Configuration details for IAM Identity Center Trusted Token Issuer (TTI) authentication.

        " + }, "DataAccessorName":{ "type":"string", "max":100, @@ -3918,7 +4225,7 @@ "members":{ "boostingLevel":{ "shape":"DocumentAttributeBoostingLevel", - "documentation":"

        Specifies how much a document attribute is boosted.

        " + "documentation":"

        Specifies the priority tier ranking of boosting applied to document attributes. For version 2, this parameter indicates the relative ranking between boosted fields (ONE being highest priority, TWO being second highest, etc.) and determines the order in which attributes influence document ranking in search results. For version 1, this parameter specifies the boosting intensity. For version 2, boosting intensity (VERY HIGH, HIGH, MEDIUM, LOW, NONE) are not supported. Note that in version 2, you are not allowed to boost on only one field and make this value TWO.

        " }, "boostingDurationInSeconds":{ "shape":"BoostingDurationInSeconds", @@ -4000,6 +4307,32 @@ "members":{ } }, + "DeleteChatResponseConfigurationRequest":{ + "type":"structure", + "required":[ + "applicationId", + "chatResponseConfigurationId" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"

        The unique identifier of theAmazon Q Business application from which to delete the chat response configuration.

        ", + "location":"uri", + "locationName":"applicationId" + }, + "chatResponseConfigurationId":{ + "shape":"ChatResponseConfigurationId", + "documentation":"

        The unique identifier of the chat response configuration to delete from the specified application.

        ", + "location":"uri", + "locationName":"chatResponseConfigurationId" + } + } + }, + "DeleteChatResponseConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteConversationRequest":{ "type":"structure", "required":[ @@ -4307,6 +4640,11 @@ "members":{ } }, + "DisplayName":{ + "type":"string", + "max":100, + "min":1 + }, "Document":{ "type":"structure", "required":["id"], @@ -4455,22 +4793,22 @@ "members":{ "numberConfiguration":{ "shape":"NumberAttributeBoostingConfiguration", - "documentation":"

        Provides information on boosting NUMBER type document attributes.

        " + "documentation":"

        Provides information on boosting NUMBER type document attributes.

        NUMBER attributes are not supported when using NativeIndexConfiguration version 2, which focuses on DATE attributes for recency and STRING attributes for source prioritization.

        " }, "stringConfiguration":{ "shape":"StringAttributeBoostingConfiguration", - "documentation":"

        Provides information on boosting STRING type document attributes.

        " + "documentation":"

        Provides information on boosting STRING type document attributes.

        Version 2 assigns priority tiers to STRING attributes, establishing clear hierarchical relationships with other boosted attributes.

        " }, "dateConfiguration":{ "shape":"DateAttributeBoostingConfiguration", - "documentation":"

        Provides information on boosting DATE type document attributes.

        " + "documentation":"

        Provides information on boosting DATE type document attributes.

        Version 2 assigns priority tiers to DATE attributes, establishing clear hierarchical relationships with other boosted attributes.

        " }, "stringListConfiguration":{ "shape":"StringListAttributeBoostingConfiguration", - "documentation":"

        Provides information on boosting STRING_LIST type document attributes.

        " + "documentation":"

        Provides information on boosting STRING_LIST type document attributes.

        STRING_LIST attributes are not supported when using NativeIndexConfiguration version 2, which focuses on DATE attributes for recency and STRING attributes for source prioritization.

        " } }, - "documentation":"

        Provides information on boosting supported Amazon Q Business document attribute types. When an end user chat query matches document attributes that have been boosted, Amazon Q Business prioritizes generating responses from content that matches the boosted document attributes.

        For STRING and STRING_LIST type document attributes to be used for boosting on the console and the API, they must be enabled for search using the DocumentAttributeConfiguration object of the UpdateIndex API. If you haven't enabled searching on these attributes, you can't boost attributes of these data types on either the console or the API.

        For more information on how boosting document attributes work in Amazon Q Business, see Boosting using document attributes.

        ", + "documentation":"

        Provides information on boosting supported Amazon Q Business document attribute types. When an end user chat query matches document attributes that have been boosted, Amazon Q Business prioritizes generating responses from content that matches the boosted document attributes.

        In version 2, boosting uses numeric values (ONE, TWO) to indicate priority tiers that establish clear hierarchical relationships between boosted attributes. This allows for more precise control over how different attributes influence search results.

        For STRING and STRING_LIST type document attributes to be used for boosting on the console and the API, they must be enabled for search using the DocumentAttributeConfiguration object of the UpdateIndex API. If you haven't enabled searching on these attributes, you can't boost attributes of these data types on either the console or the API.

        For more information on how boosting document attributes work in Amazon Q Business, see Boosting using document attributes.

        ", "union":true }, "DocumentAttributeBoostingLevel":{ @@ -4480,7 +4818,9 @@ "LOW", "MEDIUM", "HIGH", - "VERY_HIGH" + "VERY_HIGH", + "ONE", + "TWO" ] }, "DocumentAttributeBoostingOverrideMap":{ @@ -5001,6 +5341,56 @@ } } }, + "GetChatResponseConfigurationRequest":{ + "type":"structure", + "required":[ + "applicationId", + "chatResponseConfigurationId" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"

        The unique identifier of the Amazon Q Business application containing the chat response configuration to retrieve.

        ", + "location":"uri", + "locationName":"applicationId" + }, + "chatResponseConfigurationId":{ + "shape":"ChatResponseConfigurationId", + "documentation":"

        The unique identifier of the chat response configuration to retrieve from the specified application.

        ", + "location":"uri", + "locationName":"chatResponseConfigurationId" + } + } + }, + "GetChatResponseConfigurationResponse":{ + "type":"structure", + "members":{ + "chatResponseConfigurationId":{ + "shape":"ChatResponseConfigurationId", + "documentation":"

        The unique identifier of the retrieved chat response configuration.

        " + }, + "chatResponseConfigurationArn":{ + "shape":"ChatResponseConfigurationArn", + "documentation":"

        The Amazon Resource Name (ARN) of the retrieved chat response configuration, which uniquely identifies the resource across all Amazon Web Services services.

        " + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"

        The human-readable name of the retrieved chat response configuration, making it easier to identify among multiple configurations.

        " + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp indicating when the chat response configuration was initially created.

        " + }, + "inUseConfiguration":{ + "shape":"ChatResponseConfigurationDetail", + "documentation":"

        The currently active configuration settings that are being used to generate responses in the Amazon Q Business application.

        " + }, + "lastUpdateConfiguration":{ + "shape":"ChatResponseConfigurationDetail", + "documentation":"

        Information about the most recent update to the configuration, including timestamp and modification details.

        " + } + } + }, "GetDataAccessorRequest":{ "type":"structure", "required":[ @@ -5053,6 +5443,10 @@ "shape":"ActionConfigurationList", "documentation":"

        The list of action configurations specifying the allowed actions and any associated filters.

        " }, + "authenticationDetail":{ + "shape":"DataAccessorAuthenticationDetail", + "documentation":"

        The authentication configuration details for the data accessor. This specifies how the ISV authenticates when accessing data through this data accessor.

        " + }, "createdAt":{ "shape":"Timestamp", "documentation":"

        The timestamp when the data accessor was created.

        " @@ -5718,7 +6112,7 @@ }, "lambdaArn":{ "shape":"LambdaArn", - "documentation":"

        The Amazon Resource Name (ARN) of the Lambda function sduring ingestion. For more information, see Using Lambda functions for Amazon Q Business document enrichment.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the Lambda function during ingestion. For more information, see Using Lambda functions for Amazon Q Business document enrichment.

        " }, "s3BucketName":{ "shape":"S3BucketName", @@ -5761,6 +6155,12 @@ }, "documentation":"

        Information about the IAM Identity Center Application used to configure authentication for a plugin.

        " }, + "IdcTrustedTokenIssuerArn":{ + "type":"string", + "max":1284, + "min":0, + "pattern":"arn:aws:sso::[0-9]{12}:trustedTokenIssuer/(sso)?ins-[a-zA-Z0-9-.]{16}/tti-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, "IdentityProviderConfiguration":{ "type":"structure", "members":{ @@ -5936,6 +6336,50 @@ "min":10, "pattern":"arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}" }, + "Instruction":{ + "type":"string", + "max":1000, + "min":5, + "pattern":"[\\s\\S]*" + }, + "InstructionCollection":{ + "type":"structure", + "members":{ + "responseLength":{ + "shape":"Instruction", + "documentation":"

        Specifies the desired length of responses generated by Amazon Q Business. This parameter allows administrators to control whether responses are concise and brief or more detailed and comprehensive.

        " + }, + "targetAudience":{ + "shape":"Instruction", + "documentation":"

        Defines the intended audience for the responses, allowing Amazon Q Business to tailor its language, terminology, and explanations appropriately. This could range from technical experts to general users with varying levels of domain knowledge.

        " + }, + "perspective":{ + "shape":"Instruction", + "documentation":"

        Determines the point of view or perspective from which Amazon Q Business generates responses, such as first-person, second-person, or third-person perspective, affecting how information is presented to users.

        " + }, + "outputStyle":{ + "shape":"Instruction", + "documentation":"

        Specifies the formatting and structural style of responses, such as bullet points, paragraphs, step-by-step instructions, or other organizational formats that enhance readability and comprehension.

        " + }, + "identity":{ + "shape":"Instruction", + "documentation":"

        Defines the persona or identity that Amazon Q Business should adopt when responding to users, allowing for customization of the assistant's character, role, or representation within an organization.

        " + }, + "tone":{ + "shape":"Instruction", + "documentation":"

        Controls the emotional tone and communication style of responses, such as formal, casual, technical, friendly, or professional, to align with organizational communication standards and user expectations.

        " + }, + "customInstructions":{ + "shape":"Instruction", + "documentation":"

        Allows administrators to provide specific, custom instructions that guide how Amazon Q Business should respond in particular scenarios or to certain types of queries, enabling fine-grained control over response generation.

        " + }, + "examples":{ + "shape":"Instruction", + "documentation":"

        Provides sample responses or templates that Amazon Q Business can reference when generating responses, helping to establish consistent patterns and formats for different types of user queries.

        " + } + }, + "documentation":"

        A set of instructions that define how Amazon Q Business should generate and format responses to user queries. This collection includes parameters for controlling response characteristics such as length, audience targeting, perspective, style, identity, tone, and custom instructions.

        " + }, "Integer":{ "type":"integer", "box":true @@ -6072,6 +6516,43 @@ } } }, + "ListChatResponseConfigurationsRequest":{ + "type":"structure", + "required":["applicationId"], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"

        The unique identifier of the Amazon Q Business application for which to list available chat response configurations.

        ", + "location":"uri", + "locationName":"applicationId" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

        The maximum number of chat response configurations to return in a single response. This parameter helps control pagination of results when many configurations exist.

        ", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

        A pagination token used to retrieve the next set of results when the number of configurations exceeds the specified maxResults value.

        ", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListChatResponseConfigurationsResponse":{ + "type":"structure", + "members":{ + "chatResponseConfigurations":{ + "shape":"ChatResponseConfigurations", + "documentation":"

        A list of chat response configuration summaries, each containing key information about an available configuration in the specified application.

        " + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

        A pagination token that can be used in a subsequent request to retrieve additional chat response configurations if the results were truncated due to the maxResults parameter.

        " + } + } + }, "ListConversationsRequest":{ "type":"structure", "required":["applicationId"], @@ -7116,6 +7597,10 @@ "shape":"IndexId", "documentation":"

        The identifier for the Amazon Q Business index.

        " }, + "version":{ + "shape":"Long", + "documentation":"

        A read-only field that specifies the version of the NativeIndexConfiguration.

        Amazon Q Business introduces enhanced document retrieval capabilities in version 2 of NativeIndexConfiguration, focusing on streamlined metadata boosting that prioritizes recency and source relevance to deliver more accurate responses to your queries. Version 2 has the following differences from version 1:

        • Version 2 supports a single Date field (created_at OR last_updated_at) for recency boosting

        • Version 2 supports a single String field with an ordered list of up to 5 values

        • Version 2 introduces number-based boost levels (ONE, TWO) alongside the text-based levels

        • Version 2 allows specifying prioritization between Date and String fields

        • Version 2 maintains backward compatibility with existing configurations

        " + }, "boostingOverride":{ "shape":"DocumentAttributeBoostingOverrideMap", "documentation":"

        Overrides the default boosts applied by Amazon Q Business to supported document attribute data types.

        " @@ -7145,14 +7630,14 @@ "members":{ "boostingLevel":{ "shape":"DocumentAttributeBoostingLevel", - "documentation":"

        Specifies the duration, in seconds, of a boost applies to a NUMBER type document attribute.

        " + "documentation":"

        Specifies the priority of boosted document attributes in relation to other boosted attributes. This parameter determines how strongly the attribute influences document ranking in search results. NUMBER attributes can serve as additional boosting factors when needed, but are not supported when using NativeIndexConfiguration version 2.

        " }, "boostingType":{ "shape":"NumberAttributeBoostingType", - "documentation":"

        Specifies how much a document attribute is boosted.

        " + "documentation":"

        Specifies whether higher or lower numeric values should be prioritized when boosting. Valid values are ASCENDING (higher numbers are more important) and DESCENDING (lower numbers are more important).

        " } }, - "documentation":"

        Provides information on boosting NUMBER type document attributes.

        For more information on how boosting document attributes work in Amazon Q Business, see Boosting using document attributes.

        " + "documentation":"

        Provides information on boosting NUMBER type document attributes.

        In the current boosting implementation, boosting focuses primarily on DATE attributes for recency and STRING attributes for source prioritization. NUMBER attributes can serve as additional boosting factors when needed, but are not supported when using NativeIndexConfiguration version 2.

        For more information on how boosting document attributes work in Amazon Q Business, see Boosting using document attributes.

        " }, "NumberAttributeBoostingType":{ "type":"string", @@ -7233,6 +7718,55 @@ "type":"string", "sensitive":true }, + "PermissionCondition":{ + "type":"structure", + "required":[ + "conditionOperator", + "conditionKey", + "conditionValues" + ], + "members":{ + "conditionOperator":{ + "shape":"PermissionConditionOperator", + "documentation":"

        The operator to use for the condition evaluation. This determines how the condition values are compared.

        " + }, + "conditionKey":{ + "shape":"PermissionConditionKey", + "documentation":"

        The key for the condition. This identifies the attribute that the condition applies to.

        " + }, + "conditionValues":{ + "shape":"PermissionConditionValues", + "documentation":"

        The values to compare against using the specified condition operator.

        " + } + }, + "documentation":"

        Defines a condition that restricts when a permission is effective. Conditions allow you to control access based on specific attributes of the request.

        " + }, + "PermissionConditionKey":{ + "type":"string", + "pattern":"aws:PrincipalTag/qbusiness-dataaccessor:[a-zA-Z]+.*" + }, + "PermissionConditionOperator":{ + "type":"string", + "enum":["StringEquals"] + }, + "PermissionConditionValue":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + }, + "PermissionConditionValues":{ + "type":"list", + "member":{"shape":"PermissionConditionValue"}, + "max":1, + "min":1 + }, + "PermissionConditions":{ + "type":"list", + "member":{"shape":"PermissionCondition"}, + "max":10, + "min":1 + }, "PersonalizationConfiguration":{ "type":"structure", "required":["personalizationControlMode"], @@ -7679,6 +8213,32 @@ }, "exception":true }, + "ResponseConfiguration":{ + "type":"structure", + "members":{ + "instructionCollection":{ + "shape":"InstructionCollection", + "documentation":"

        A collection of instructions that guide how Amazon Q Business generates responses, including parameters for response length, target audience, perspective, output style, identity, tone, and custom instructions.

        " + } + }, + "documentation":"

        Configuration settings to define how Amazon Q Business generates and formats responses to user queries. This includes customization options for response style, tone, length, and other characteristics.

        " + }, + "ResponseConfigurationSummary":{ + "type":"string", + "max":1000, + "min":1 + }, + "ResponseConfigurationType":{ + "type":"string", + "enum":["ALL"] + }, + "ResponseConfigurations":{ + "type":"map", + "key":{"shape":"ResponseConfigurationType"}, + "value":{"shape":"ResponseConfiguration"}, + "max":1, + "min":1 + }, "ResponseScope":{ "type":"string", "enum":[ @@ -8195,11 +8755,11 @@ "members":{ "boostingLevel":{ "shape":"DocumentAttributeBoostingLevel", - "documentation":"

        Specifies how much a document attribute is boosted.

        " + "documentation":"

        Specifies the priority tier ranking of boosting applied to document attributes. For version 2, this parameter indicates the relative ranking between boosted fields (ONE being highest priority, TWO being second highest, etc.) and determines the order in which attributes influence document ranking in search results. For version 1, this parameter specifies the boosting intensity. For version 2, boosting intensity (VERY HIGH, HIGH, MEDIUM, LOW, NONE) are not supported. Note that in version 2, you are not allowed to boost on only one field and make this value TWO.

        " }, "attributeValueBoosting":{ "shape":"StringAttributeValueBoosting", - "documentation":"

        Specifies specific values of a STRING type document attribute being boosted.

        " + "documentation":"

        Specifies specific values of a STRING type document attribute being boosted. When using NativeIndexConfiguration version 2, you can specify up to five values in order of priority.

        " } }, "documentation":"

        Provides information on boosting STRING type document attributes.

        For STRING and STRING_LIST type document attributes to be used for boosting on the console and the API, they must be enabled for search using the DocumentAttributeConfiguration object of the UpdateIndex API. If you haven't enabled searching on these attributes, you can't boost attributes of these data types on either the console or the API.

        For more information on how boosting document attributes work in Amazon Q Business, see Boosting using document attributes.

        " @@ -8217,7 +8777,12 @@ "LOW", "MEDIUM", "HIGH", - "VERY_HIGH" + "VERY_HIGH", + "ONE", + "TWO", + "THREE", + "FOUR", + "FIVE" ] }, "StringListAttributeBoostingConfiguration":{ @@ -8226,10 +8791,10 @@ "members":{ "boostingLevel":{ "shape":"DocumentAttributeBoostingLevel", - "documentation":"

        Specifies how much a document attribute is boosted.

        " + "documentation":"

        Specifies the priority of boosted document attributes in relation to other boosted attributes. This parameter determines how strongly the attribute influences document ranking in search results. STRING_LIST attributes can serve as additional boosting factors when needed, but are not supported when using NativeIndexConfiguration version 2.

        " } }, - "documentation":"

        Provides information on boosting STRING_LIST type document attributes.

        For STRING and STRING_LIST type document attributes to be used for boosting on the console and the API, they must be enabled for search using the DocumentAttributeConfiguration object of the UpdateIndex API. If you haven't enabled searching on these attributes, you can't boost attributes of these data types on either the console or the API.

        For more information on how boosting document attributes work in Amazon Q Business, see Boosting using document attributes.

        " + "documentation":"

        Provides information on boosting STRING_LIST type document attributes.

        In the current boosting implementation, boosting focuses primarily on DATE attributes for recency and STRING attributes for source prioritization. STRING_LIST attributes can serve as additional boosting factors when needed, but are not supported when using NativeIndexConfiguration version 2.

        For STRING and STRING_LIST type document attributes to be used for boosting on the console and the API, they must be enabled for search using the DocumentAttributeConfiguration object of the UpdateIndex API. If you haven't enabled searching on these attributes, you can't boost attributes of these data types on either the console or the API.

        For more information on how boosting document attributes work in Amazon Q Business, see Boosting using document attributes.

        " }, "SubnetId":{ "type":"string", @@ -8684,6 +9249,46 @@ "members":{ } }, + "UpdateChatResponseConfigurationRequest":{ + "type":"structure", + "required":[ + "applicationId", + "chatResponseConfigurationId", + "responseConfigurations" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"

        The unique identifier of the Amazon Q Business application containing the chat response configuration to update.

        ", + "location":"uri", + "locationName":"applicationId" + }, + "chatResponseConfigurationId":{ + "shape":"ChatResponseConfigurationId", + "documentation":"

        The unique identifier of the chat response configuration to update within the specified application.

        ", + "location":"uri", + "locationName":"chatResponseConfigurationId" + }, + "displayName":{ + "shape":"DisplayName", + "documentation":"

        The new human-readable name to assign to the chat response configuration, making it easier to identify among multiple configurations.

        " + }, + "responseConfigurations":{ + "shape":"ResponseConfigurations", + "documentation":"

        The updated collection of response configuration settings that define how Amazon Q Business generates and formats responses to user queries.

        " + }, + "clientToken":{ + "shape":"String", + "documentation":"

        A unique, case-sensitive identifier to ensure idempotency of the request. This helps prevent the same update from being processed multiple times if retries occur.

        ", + "idempotencyToken":true + } + } + }, + "UpdateChatResponseConfigurationResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateDataAccessorRequest":{ "type":"structure", "required":[ @@ -8708,6 +9313,10 @@ "shape":"ActionConfigurationList", "documentation":"

        The updated list of action configurations specifying the allowed actions and any associated filters.

        " }, + "authenticationDetail":{ + "shape":"DataAccessorAuthenticationDetail", + "documentation":"

        The updated authentication configuration details for the data accessor. This specifies how the ISV will authenticate when accessing data through this data accessor.

        " + }, "displayName":{ "shape":"DataAccessorName", "documentation":"

        The updated friendly name for the data accessor.

        " diff --git a/services/qconnect/pom.xml b/services/qconnect/pom.xml index 3a14870ad7a8..0c8e1466405b 100644 --- a/services/qconnect/pom.xml +++ b/services/qconnect/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT qconnect AWS Java SDK :: Services :: Q Connect diff --git a/services/qconnect/src/main/resources/codegen-resources/customization.config b/services/qconnect/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/qconnect/src/main/resources/codegen-resources/customization.config +++ b/services/qconnect/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/qconnect/src/main/resources/codegen-resources/service-2.json b/services/qconnect/src/main/resources/codegen-resources/service-2.json index f594a4190180..d2a52df12f1a 100644 --- a/services/qconnect/src/main/resources/codegen-resources/service-2.json +++ b/services/qconnect/src/main/resources/codegen-resources/service-2.json @@ -44,6 +44,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -64,9 +65,10 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

        Creates and Amazon Q in Connect AI Agent version.

        ", "idempotent":true @@ -84,6 +86,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -104,9 +107,10 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

        Creates an Amazon Q in Connect AI Guardrail version.

        ", "idempotent":true @@ -124,6 +128,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -144,9 +149,10 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

        Creates an Amazon Q in Connect AI Prompt version.

        ", "idempotent":true @@ -164,6 +170,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"} ], "documentation":"

        Creates an Amazon Q in Connect assistant.

        ", @@ -201,6 +208,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -220,6 +228,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -239,6 +248,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"} ], "documentation":"

        Creates a knowledge base.

        When using this API, you cannot reuse Amazon AppIntegrations DataIntegrations with external knowledge bases such as Salesforce and ServiceNow. If you do, you'll get an InvalidRequestException error.

        For example, you're programmatically managing your external knowledge base, and you want to add or remove one of the fields that is being ingested from Salesforce. Do the following:

        1. Call DeleteKnowledgeBase.

        2. Call DeleteDataIntegration.

        3. Call CreateDataIntegration to recreate the DataIntegration or a create different one.

        4. Call CreateKnowledgeBase.

        ", @@ -277,6 +287,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -316,6 +327,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -334,6 +346,7 @@ "errors":[ {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -369,6 +382,7 @@ "output":{"shape":"DeleteAIAgentResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -386,8 +400,9 @@ "input":{"shape":"DeleteAIAgentVersionRequest"}, "output":{"shape":"DeleteAIAgentVersionResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -405,8 +420,9 @@ "input":{"shape":"DeleteAIGuardrailRequest"}, "output":{"shape":"DeleteAIGuardrailResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -424,8 +440,9 @@ "input":{"shape":"DeleteAIGuardrailVersionRequest"}, "output":{"shape":"DeleteAIGuardrailVersionResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -444,6 +461,7 @@ "output":{"shape":"DeleteAIPromptResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -461,8 +479,9 @@ "input":{"shape":"DeleteAIPromptVersionRequest"}, "output":{"shape":"DeleteAIPromptVersionResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -481,6 +500,7 @@ "output":{"shape":"DeleteAssistantResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -498,6 +518,7 @@ "output":{"shape":"DeleteAssistantAssociationResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -514,8 +535,9 @@ "input":{"shape":"DeleteContentRequest"}, "output":{"shape":"DeleteContentResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -533,6 +555,7 @@ "output":{"shape":"DeleteContentAssociationResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -549,8 +572,9 @@ "input":{"shape":"DeleteImportJobRequest"}, "output":{"shape":"DeleteImportJobResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -569,6 +593,7 @@ "errors":[ {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -624,6 +649,7 @@ "output":{"shape":"DeleteQuickResponseResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -641,6 +667,7 @@ "output":{"shape":"GetAIAgentResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -658,6 +685,7 @@ "output":{"shape":"GetAIGuardrailResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -675,6 +703,7 @@ "output":{"shape":"GetAIPromptResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -692,6 +721,7 @@ "output":{"shape":"GetAssistantResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -708,6 +738,7 @@ "output":{"shape":"GetAssistantAssociationResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -724,6 +755,7 @@ "output":{"shape":"GetContentResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -740,6 +772,7 @@ "output":{"shape":"GetContentAssociationResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -756,6 +789,7 @@ "output":{"shape":"GetContentSummaryResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -788,6 +822,7 @@ "output":{"shape":"GetKnowledgeBaseResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -804,6 +839,7 @@ "output":{"shape":"GetMessageTemplateResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -837,6 +873,7 @@ "output":{"shape":"GetQuickResponseResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -856,7 +893,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

        This API will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024, you will need to create a new Assistant in the Amazon Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications.

        Retrieves recommendations for the specified session. To avoid retrieving the same recommendations in subsequent calls, use NotifyRecommendationsReceived. This API supports long-polling behavior with the waitTimeSeconds parameter. Short poll is the default behavior and only returns recommendations already available. To perform a manual query against an assistant, use QueryAssistant.

        ", + "documentation":"

        This API will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024, you will need to create a new Assistant in the Amazon Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications.

        Retrieves recommendations for the specified session. To avoid retrieving the same recommendations in subsequent calls, use NotifyRecommendationsReceived. This API supports long-polling behavior with the waitTimeSeconds parameter. Short poll is the default behavior and only returns recommendations already available. To perform a manual query against an assistant, use QueryAssistant.

        ", "deprecated":true, "deprecatedMessage":"GetRecommendations API will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications." }, @@ -871,6 +908,7 @@ "output":{"shape":"GetSessionResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -887,9 +925,10 @@ "output":{"shape":"ListAIAgentVersionsResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

        List AI Agent versions.

        " }, @@ -904,9 +943,10 @@ "output":{"shape":"ListAIAgentsResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

        Lists AI Agents.

        " }, @@ -921,9 +961,10 @@ "output":{"shape":"ListAIGuardrailVersionsResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

        Lists AI Guardrail versions.

        " }, @@ -938,6 +979,7 @@ "output":{"shape":"ListAIGuardrailsResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -955,9 +997,10 @@ "output":{"shape":"ListAIPromptVersionsResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

        Lists AI Prompt versions.

        " }, @@ -972,9 +1015,10 @@ "output":{"shape":"ListAIPromptsResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} ], "documentation":"

        Lists the AI Prompts available on the Amazon Q in Connect assistant.

        " }, @@ -1005,6 +1049,7 @@ "output":{"shape":"ListAssistantsResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"} ], "documentation":"

        Lists information about assistants.

        " @@ -1020,6 +1065,7 @@ "output":{"shape":"ListContentAssociationsResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -1200,7 +1246,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

        This API will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024, you will need to create a new Assistant in the Amazon Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications.

        Performs a manual search against the specified assistant. To retrieve recommendations for an assistant, use GetRecommendations.

        ", + "documentation":"

        This API will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024, you will need to create a new Assistant in the Amazon Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications.

        Performs a manual search against the specified assistant. To retrieve recommendations for an assistant, use GetRecommendations.

        ", "deprecated":true, "deprecatedMessage":"QueryAssistant API will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications." }, @@ -1266,6 +1312,7 @@ "output":{"shape":"SearchContentResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -1282,6 +1329,7 @@ "output":{"shape":"SearchMessageTemplatesResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -1300,6 +1348,7 @@ "errors":[ {"shape":"RequestTimeoutException"}, {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -1316,6 +1365,7 @@ "output":{"shape":"SearchSessionsResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -1332,8 +1382,8 @@ "output":{"shape":"SendMessageResponse"}, "errors":[ {"shape":"RequestTimeoutException"}, - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -1352,6 +1402,7 @@ "output":{"shape":"StartContentUploadResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -1370,6 +1421,7 @@ {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -1417,8 +1469,9 @@ "input":{"shape":"UpdateAIAgentRequest"}, "output":{"shape":"UpdateAIAgentResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -1436,8 +1489,9 @@ "input":{"shape":"UpdateAIGuardrailRequest"}, "output":{"shape":"UpdateAIGuardrailResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -1455,8 +1509,9 @@ "input":{"shape":"UpdateAIPromptRequest"}, "output":{"shape":"UpdateAIPromptResponse"}, "errors":[ - {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} @@ -1492,6 +1547,7 @@ "output":{"shape":"UpdateContentResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"PreconditionFailedException"}, {"shape":"ResourceNotFoundException"} @@ -1562,6 +1618,7 @@ "errors":[ {"shape":"ConflictException"}, {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"PreconditionFailedException"}, {"shape":"ResourceNotFoundException"} @@ -1579,6 +1636,7 @@ "output":{"shape":"UpdateSessionResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -1595,6 +1653,7 @@ "output":{"shape":"UpdateSessionDataResponse"}, "errors":[ {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"}, {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], @@ -2376,7 +2435,7 @@ }, "locale":{ "shape":"NonEmptyString", - "documentation":"

        The locale to which specifies the language and region settings that determine the response language for QueryAssistant.

        Changing this locale to anything other than en_US, en_GB, or en_AU will turn off recommendations triggered by contact transcripts for agent assistance, as this feature is not supported in multiple languages.

        " + "documentation":"

        The locale to which specifies the language and region settings that determine the response language for QueryAssistant.

        For more information on supported locales, see Language support for Amazon Q in Connect.

        " } }, "documentation":"

        The configuration for the ANSWER_RECOMMENDATION AI Agent type.

        " @@ -7244,7 +7303,7 @@ }, "locale":{ "shape":"NonEmptyString", - "documentation":"

        The locale to which specifies the language and region settings that determine the response language for QueryAssistant.

        " + "documentation":"

        The locale to which specifies the language and region settings that determine the response language for QueryAssistant.

        For more information on supported locales, see Language support for Amazon Q in Connect.

        " } }, "documentation":"

        The configuration for the MANUAL_SEARCH AI Agent type.

        " @@ -9973,6 +10032,18 @@ }, "exception":true }, + "UnauthorizedException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

        You do not have permission to perform this action.

        ", + "error":{ + "httpStatusCode":401, + "senderFault":true + }, + "exception":true + }, "UntagResourceRequest":{ "type":"structure", "required":[ diff --git a/services/qldb/pom.xml b/services/qldb/pom.xml index acbe9a6d51ad..22de73a69d75 100644 --- a/services/qldb/pom.xml +++ b/services/qldb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT qldb AWS Java SDK :: Services :: QLDB diff --git a/services/qldb/src/main/resources/codegen-resources/customization.config b/services/qldb/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/qldb/src/main/resources/codegen-resources/customization.config +++ b/services/qldb/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/qldbsession/pom.xml b/services/qldbsession/pom.xml index a3f99059d8d7..71697541c48c 100644 --- a/services/qldbsession/pom.xml +++ b/services/qldbsession/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT qldbsession AWS Java SDK :: Services :: QLDB Session diff --git a/services/qldbsession/src/main/resources/codegen-resources/customization.config b/services/qldbsession/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/qldbsession/src/main/resources/codegen-resources/customization.config +++ b/services/qldbsession/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/quicksight/pom.xml b/services/quicksight/pom.xml index 6300e7d7857f..cdb8f8c2d5c9 100644 --- a/services/quicksight/pom.xml +++ b/services/quicksight/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT quicksight AWS Java SDK :: Services :: QuickSight diff --git a/services/quicksight/src/main/resources/codegen-resources/customization.config b/services/quicksight/src/main/resources/codegen-resources/customization.config index 6947a48f8f00..788e9cb8d991 100644 --- a/services/quicksight/src/main/resources/codegen-resources/customization.config +++ b/services/quicksight/src/main/resources/codegen-resources/customization.config @@ -145,6 +145,5 @@ "union": true } }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/quicksight/src/main/resources/codegen-resources/service-2.json b/services/quicksight/src/main/resources/codegen-resources/service-2.json index 169a23594cc6..16a29be392b3 100644 --- a/services/quicksight/src/main/resources/codegen-resources/service-2.json +++ b/services/quicksight/src/main/resources/codegen-resources/service-2.json @@ -4209,8 +4209,7 @@ }, "AllSheetsFilterScopeConfiguration":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An empty object that represents that the AllSheets option is the chosen value for the FilterScopeConfiguration parameter. This structure applies the filter to all visuals on all sheets of an Analysis, Dashboard, or Template.

        This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

        " }, "AltText":{ @@ -6118,6 +6117,10 @@ "RoleArn":{ "shape":"RoleArn", "documentation":"

        Use the RoleArn structure to override an account-wide role for a specific Athena data source. For example, say an account administrator has turned off all Athena access with an account-wide role. The administrator can then use RoleArn to bypass the account-wide role and allow Athena access for the single Athena data source that is specified in the structure, even if the account-wide role forbidding Athena access is still active.

        " + }, + "IdentityCenterConfiguration":{ + "shape":"IdentityCenterConfiguration", + "documentation":"

        An optional parameter that configures IAM Identity Center authentication to grant Amazon QuickSight access to your workgroup.

        This parameter can only be specified if your Amazon QuickSight account is configured with IAM Identity Center.

        " } }, "documentation":"

        Parameters for Amazon Athena.

        " @@ -6286,8 +6289,7 @@ }, "AxisDisplayDataDrivenRange":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The options that are saved for future extension.

        " }, "AxisDisplayMinMaxRange":{ @@ -7450,11 +7452,19 @@ "members":{ "ExportToCsv":{ "shape":"CapabilityState", - "documentation":"

        The ability to export to CSV files.

        " + "documentation":"

        The ability to export to CSV files from the UI.

        " }, "ExportToExcel":{ "shape":"CapabilityState", - "documentation":"

        The ability to export to Excel files.

        " + "documentation":"

        The ability to export to Excel files from the UI.

        " + }, + "ExportToPdf":{ + "shape":"CapabilityState", + "documentation":"

        The ability to export to PDF files from the UI.

        " + }, + "PrintReports":{ + "shape":"CapabilityState", + "documentation":"

        The ability to print reports.

        " }, "CreateAndUpdateThemes":{ "shape":"CapabilityState", @@ -7515,6 +7525,22 @@ "CreateSPICEDataset":{ "shape":"CapabilityState", "documentation":"

        The ability to create a SPICE dataset.

        " + }, + "ExportToPdfInScheduledReports":{ + "shape":"CapabilityState", + "documentation":"

        The ability to export to PDF files in scheduled email reports.

        " + }, + "ExportToCsvInScheduledReports":{ + "shape":"CapabilityState", + "documentation":"

        The ability to export to CSV files in scheduled email reports.

        " + }, + "ExportToExcelInScheduledReports":{ + "shape":"CapabilityState", + "documentation":"

        The ability to export to Excel files in scheduled email reports.

        " + }, + "IncludeContentInScheduledReportsEmail":{ + "shape":"CapabilityState", + "documentation":"

        The ability to include content in scheduled email reports.

        " } }, "documentation":"

        A set of actions that correspond to Amazon QuickSight permissions.

        " @@ -11652,7 +11678,7 @@ }, "DataSetId":{ "shape":"ResourceId", - "documentation":"

        The ID of the dataset.

        " + "documentation":"

        The ID of the dataset. Limited to 96 characters.

        " }, "Name":{ "shape":"ResourceName", @@ -30041,7 +30067,8 @@ "type":"string", "enum":[ "REDSHIFT", - "QBUSINESS" + "QBUSINESS", + "ATHENA" ] }, "SessionLifetimeInMinutes":{ @@ -30515,6 +30542,10 @@ "Content":{ "shape":"SheetTextBoxContent", "documentation":"

        The content that is displayed in the text box.

        " + }, + "Interactions":{ + "shape":"TextBoxInteractionOptions", + "documentation":"

        The general textbox interactions setup for a textbox.

        " } }, "documentation":"

        A text box.

        " @@ -32802,6 +32833,26 @@ }, "documentation":"

        The display options of a control.

        " }, + "TextBoxInteractionOptions":{ + "type":"structure", + "members":{ + "TextBoxMenuOption":{ + "shape":"TextBoxMenuOption", + "documentation":"

        The menu options for the textbox.

        " + } + }, + "documentation":"

        The general textbox interactions setup for textbox publish options.

        " + }, + "TextBoxMenuOption":{ + "type":"structure", + "members":{ + "AvailabilityStatus":{ + "shape":"DashboardBehavior", + "documentation":"

        The availability status of the textbox menu. If the value of this property is set to ENABLED, dashboard readers can interact with the textbox menu.

        " + } + }, + "documentation":"

        The menu options for the interactions of a textbox.

        " + }, "TextConditionalFormat":{ "type":"structure", "members":{ diff --git a/services/ram/pom.xml b/services/ram/pom.xml index daa417fc3348..2484ed833603 100644 --- a/services/ram/pom.xml +++ b/services/ram/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ram AWS Java SDK :: Services :: RAM diff --git a/services/ram/src/main/resources/codegen-resources/customization.config b/services/ram/src/main/resources/codegen-resources/customization.config index 620b218509c5..0aab6ae18cac 100644 --- a/services/ram/src/main/resources/codegen-resources/customization.config +++ b/services/ram/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "getResourceShareInvitations", "enableSharingWithAwsOrganization" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/rbin/pom.xml b/services/rbin/pom.xml index 343367761eb6..f057530ed5ce 100644 --- a/services/rbin/pom.xml +++ b/services/rbin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT rbin AWS Java SDK :: Services :: Rbin diff --git a/services/rbin/src/main/resources/codegen-resources/customization.config b/services/rbin/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/rbin/src/main/resources/codegen-resources/customization.config +++ b/services/rbin/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/rds/pom.xml b/services/rds/pom.xml index a5f9072e51f2..bf06d885ed30 100644 --- a/services/rds/pom.xml +++ b/services/rds/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT rds AWS Java SDK :: Services :: Amazon RDS diff --git a/services/rds/src/main/resources/codegen-resources/paginators-1.json b/services/rds/src/main/resources/codegen-resources/paginators-1.json index 5e651ddc195d..596864afe876 100644 --- a/services/rds/src/main/resources/codegen-resources/paginators-1.json +++ b/services/rds/src/main/resources/codegen-resources/paginators-1.json @@ -78,6 +78,12 @@ "output_token": "Marker", "result_key": "DescribeDBLogFiles" }, + "DescribeDBMajorEngineVersions": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "DBMajorEngineVersions" + }, "DescribeDBParameterGroups": { "input_token": "Marker", "limit_key": "MaxRecords", diff --git a/services/rds/src/main/resources/codegen-resources/service-2.json b/services/rds/src/main/resources/codegen-resources/service-2.json index 69a6c9a1f0dc..ef0963fa7819 100644 --- a/services/rds/src/main/resources/codegen-resources/service-2.json +++ b/services/rds/src/main/resources/codegen-resources/service-2.json @@ -462,7 +462,7 @@ {"shape":"TenantDatabaseQuotaExceededFault"}, {"shape":"CertificateNotFoundFault"} ], - "documentation":"

        Creates a new DB instance that acts as a read replica for an existing source DB instance or Multi-AZ DB cluster. You can create a read replica for a DB instance running Db2, MariaDB, MySQL, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the Amazon RDS User Guide.

        Amazon Aurora doesn't support this operation. To create a DB instance for an Aurora DB cluster, use the CreateDBInstance operation.

        All read replica DB instances are created with backups disabled. All other attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance or cluster, except as specified.

        Your source DB instance or cluster must have backup retention enabled.

        " + "documentation":"

        Creates a new DB instance that acts as a read replica for an existing source DB instance or Multi-AZ DB cluster. You can create a read replica for a DB instance running MariaDB, MySQL, Oracle, PostgreSQL, or SQL Server. You can create a read replica for a Multi-AZ DB cluster running MySQL or PostgreSQL. For more information, see Working with read replicas and Migrating from a Multi-AZ DB cluster to a DB instance using a read replica in the Amazon RDS User Guide.

        Amazon RDS for Db2 supports this operation for standby replicas. To create a standby replica for a DB instance running Db2, you must set ReplicaMode to mounted.

        Amazon Aurora doesn't support this operation. To create a DB instance for an Aurora DB cluster, use the CreateDBInstance operation.

        RDS creates read replicas with backups disabled. All other attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance or cluster, except as specified.

        Your source DB instance or cluster must have backup retention enabled.

        " }, "CreateDBParameterGroup":{ "name":"CreateDBParameterGroup", @@ -1302,6 +1302,19 @@ ], "documentation":"

        Returns a list of DB log files for the DB instance.

        This command doesn't apply to RDS Custom.

        " }, + "DescribeDBMajorEngineVersions":{ + "name":"DescribeDBMajorEngineVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBMajorEngineVersionsRequest"}, + "output":{ + "shape":"DescribeDBMajorEngineVersionsResponse", + "resultWrapper":"DescribeDBMajorEngineVersionsResult" + }, + "documentation":"

        Describes the properties of specific major versions of DB engines.

        " + }, "DescribeDBParameterGroups":{ "name":"DescribeDBParameterGroups", "http":{ @@ -1799,7 +1812,7 @@ {"shape":"DBInstanceNotReadyFault"}, {"shape":"DBLogFileNotFoundFault"} ], - "documentation":"

        Downloads all or a portion of the specified log file, up to 1 MB in size.

        This command doesn't apply to RDS Custom.

        " + "documentation":"

        Downloads all or a portion of the specified log file, up to 1 MB in size.

        This command doesn't apply to RDS Custom.

        This operation uses resources on database instances. Because of this, we recommend publishing database logs to CloudWatch and then using the GetLogEvents operation. For more information, see GetLogEvents in the Amazon CloudWatch Logs API Reference.

        " }, "EnableHttpEndpoint":{ "name":"EnableHttpEndpoint", @@ -2836,7 +2849,8 @@ "errors":[ {"shape":"DBClusterNotFoundFault"}, {"shape":"InvalidDBClusterStateFault"}, - {"shape":"InvalidDBInstanceStateFault"} + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBShardGroupStateFault"} ], "documentation":"

        Starts an Amazon Aurora DB cluster that was stopped using the Amazon Web Services console, the stop-db-cluster CLI command, or the StopDBCluster operation.

        For more information, see Stopping and Starting an Aurora Cluster in the Amazon Aurora User Guide.

        This operation only applies to Aurora DB clusters.

        " }, @@ -2945,7 +2959,8 @@ "errors":[ {"shape":"DBClusterNotFoundFault"}, {"shape":"InvalidDBClusterStateFault"}, - {"shape":"InvalidDBInstanceStateFault"} + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InvalidDBShardGroupStateFault"} ], "documentation":"

        Stops an Amazon Aurora DB cluster. When you stop a DB cluster, Aurora retains the DB cluster's metadata, including its endpoints and DB parameter groups. Aurora also retains the transaction logs so you can do a point-in-time restore if necessary.

        For more information, see Stopping and Starting an Aurora Cluster in the Amazon Aurora User Guide.

        This operation only applies to Aurora DB clusters.

        " }, @@ -3737,7 +3752,7 @@ }, "ConnectionBorrowTimeout":{ "shape":"IntegerOptional", - "documentation":"

        The number of seconds for a proxy to wait for a connection to become available in the connection pool. This setting only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions.

        Default: 120

        Constraints:

        • Must be between 0 and 3600.

        " + "documentation":"

        The number of seconds for a proxy to wait for a connection to become available in the connection pool. This setting only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions.

        Default: 120

        Constraints:

        • Must be between 0 and 300.

        " }, "SessionPinningFilters":{ "shape":"StringList", @@ -3745,7 +3760,7 @@ }, "InitQuery":{ "shape":"String", - "documentation":"

        Add an initialization query, or modify the current one. You can specify one or more SQL statements for the proxy to run when opening each new database connection. The setting is typically used with SET statements to make sure that each connection has identical settings. Make sure that the query you add is valid. To include multiple variables in a single SET statement, use comma separators.

        For example: SET variable1=value1, variable2=value2

        For multiple statements, use semicolons as the separator.

        Default: no initialization query

        " + "documentation":"

        Add an initialization query, or modify the current one. You can specify one or more SQL statements for the proxy to run when opening each new database connection. The setting is typically used with SET statements to make sure that each connection has identical settings. Make sure the query added here is valid. This is an optional field, so you can choose to leave it empty. For including multiple variables in a single SET statement, use a comma separator.

        For example: SET variable1=value1, variable2=value2

        Default: no initialization query

        Since you can access initialization query as part of target group configuration, it is not protected by authentication or cryptographic methods. Anyone with access to view or manage your proxy target group configuration can view the initialization query. You should not add sensitive data, such as passwords or long-lived encryption keys, to this option.

        " } }, "documentation":"

        Specifies the settings that control the size and behavior of the connection pool associated with a DBProxyTargetGroup.

        " @@ -3771,7 +3786,7 @@ }, "InitQuery":{ "shape":"String", - "documentation":"

        One or more SQL statements for the proxy to run when opening each new database connection. Typically used with SET statements to make sure that each connection has identical settings such as time zone and character set. This setting is empty by default. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single SET statement, such as SET x=1, y=2.

        " + "documentation":"

        One or more SQL statements for the proxy to run when opening each new database connection. The setting is typically used with SET statements to make sure that each connection has identical settings. The query added here must be valid. For including multiple variables in a single SET statement, use a comma separator. This is an optional field.

        For example: SET variable1=value1, variable2=value2

        Since you can access initialization query as part of target group configuration, it is not protected by authentication or cryptographic methods. Anyone with access to view or manage your proxy target group configuration can view the initialization query. You should not add sensitive data, such as passwords or long-lived encryption keys, to this option.

        " } }, "documentation":"

        Displays the settings that control the size and behavior of the connection pool associated with a DBProxyTarget.

        " @@ -3929,6 +3944,14 @@ "CopyOptionGroup":{ "shape":"BooleanOptional", "documentation":"

        Specifies whether to copy the DB option group associated with the source DB snapshot to the target Amazon Web Services account and associate with the target DB snapshot. The associated option group can be copied only with cross-account snapshot copy calls.

        " + }, + "SnapshotAvailabilityZone":{ + "shape":"String", + "documentation":"

        Specifies the name of the Availability Zone where RDS stores the DB snapshot. This value is valid only for snapshots that RDS stores on a Dedicated Local Zone.

        " + }, + "SnapshotTarget":{ + "shape":"String", + "documentation":"

        Configures the location where RDS will store copied snapshots.

        Valid Values:

        • local (Dedicated Local Zone)

        • outposts (Amazon Web Services Outposts)

        • region (Amazon Web Services Region)

        " } }, "documentation":"

        " @@ -4353,7 +4376,7 @@ }, "EngineLifecycleSupport":{ "shape":"String", - "documentation":"

        The life cycle type for this DB cluster.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date.

        You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

        Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " + "documentation":"

        The life cycle type for this DB cluster.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date.

        You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

        Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " } }, "documentation":"

        " @@ -4499,7 +4522,7 @@ }, "MultiAZ":{ "shape":"BooleanOptional", - "documentation":"

        Specifies whether the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

        This setting doesn't apply to the following DB instances:

        • Amazon Aurora (DB instance Availability Zones (AZs) are managed by the DB cluster.)

        • RDS Custom

        " + "documentation":"

        Specifies whether the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment.

        This setting doesn't apply to Amazon Aurora because the DB instance Availability Zones (AZs) are managed by the DB cluster.

        " }, "EngineVersion":{ "shape":"String", @@ -4651,7 +4674,7 @@ }, "BackupTarget":{ "shape":"String", - "documentation":"

        The location for storing automated backups and manual snapshots.

        Valid Values:

        • outposts (Amazon Web Services Outposts)

        • region (Amazon Web Services Region)

        Default: region

        For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

        " + "documentation":"

        The location for storing automated backups and manual snapshots.

        Valid Values:

        • local (Dedicated Local Zone)

        • outposts (Amazon Web Services Outposts)

        • region (Amazon Web Services Region)

        Default: region

        For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

        " }, "NetworkType":{ "shape":"String", @@ -4687,7 +4710,7 @@ }, "EngineLifecycleSupport":{ "shape":"String", - "documentation":"

        The life cycle type for this DB instance.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB instance will fail if the DB major version is past its end of standard support date.

        This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

        You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide.

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " + "documentation":"

        The life cycle type for this DB instance.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB instance will fail if the DB major version is past its end of standard support date.

        This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

        You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Amazon RDS Extended Support with Amazon RDS in the Amazon RDS User Guide.

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " } }, "documentation":"

        " @@ -4702,7 +4725,7 @@ }, "SourceDBInstanceIdentifier":{ "shape":"String", - "documentation":"

        The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to 15 read replicas, with the exception of Oracle and SQL Server, which can have up to five.

        Constraints:

        • Must be the identifier of an existing Db2, MariaDB, MySQL, Oracle, PostgreSQL, or SQL Server DB instance.

        • Can't be specified if the SourceDBClusterIdentifier parameter is also specified.

        • For the limitations of Oracle read replicas, see Version and licensing considerations for RDS for Oracle replicas in the Amazon RDS User Guide.

        • For the limitations of SQL Server read replicas, see Read replica limitations with SQL Server in the Amazon RDS User Guide.

        • The specified DB instance must have automatic backups enabled, that is, its backup retention period must be greater than 0.

        • If the source DB instance is in the same Amazon Web Services Region as the read replica, specify a valid DB instance identifier.

        • If the source DB instance is in a different Amazon Web Services Region from the read replica, specify a valid DB instance ARN. For more information, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide. This doesn't apply to SQL Server or RDS Custom, which don't support cross-Region replicas.

        " + "documentation":"

        The identifier of the DB instance that will act as the source for the read replica. Each DB instance can have up to 15 read replicas, except for the following engines:

        • Db2 - Can have up to three replicas.

        • Oracle - Can have up to five read replicas.

        • SQL Server - Can have up to five read replicas.

        Constraints:

        • Must be the identifier of an existing Db2, MariaDB, MySQL, Oracle, PostgreSQL, or SQL Server DB instance.

        • Can't be specified if the SourceDBClusterIdentifier parameter is also specified.

        • For the limitations of Oracle read replicas, see Version and licensing considerations for RDS for Oracle replicas in the Amazon RDS User Guide.

        • For the limitations of SQL Server read replicas, see Read replica limitations with SQL Server in the Amazon RDS User Guide.

        • The specified DB instance must have automatic backups enabled, that is, its backup retention period must be greater than 0.

        • If the source DB instance is in the same Amazon Web Services Region as the read replica, specify a valid DB instance identifier.

        • If the source DB instance is in a different Amazon Web Services Region from the read replica, specify a valid DB instance ARN. For more information, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide. This doesn't apply to SQL Server or RDS Custom, which don't support cross-Region replicas.

        " }, "DBInstanceClass":{ "shape":"String", @@ -4734,7 +4757,7 @@ }, "DBParameterGroupName":{ "shape":"String", - "documentation":"

        The name of the DB parameter group to associate with this read replica DB instance.

        For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica.

        For Multi-AZ DB cluster same Region read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the default DBParameterGroup.

        Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas, for Multi-AZ DB cluster read replica instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom.

        Constraints:

        • Must be 1 to 255 letters, numbers, or hyphens.

        • First character must be a letter.

        • Can't end with a hyphen or contain two consecutive hyphens.

        " + "documentation":"

        The name of the DB parameter group to associate with this read replica DB instance.

        For the Db2 DB engine, if your source DB instance uses the Bring Your Own License model, then a custom parameter group must be associated with the replica. For a same Amazon Web Services Region replica, if you don't specify a custom parameter group, Amazon RDS associates the custom parameter group associated with the source DB instance. For a cross-Region replica, you must specify a custom parameter group. This custom parameter group must include your IBM Site ID and IBM Customer ID. For more information, see IBM IDs for Bring Your Own License for Db2.

        For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica.

        For Multi-AZ DB cluster same Region read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the default DBParameterGroup.

        Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas, for Multi-AZ DB cluster read replica instances, for Db2 DB instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom.

        Constraints:

        • Must be 1 to 255 letters, numbers, or hyphens.

        • First character must be a letter.

        • Can't end with a hyphen or contain two consecutive hyphens.

        " }, "PubliclyAccessible":{ "shape":"BooleanOptional", @@ -4835,7 +4858,7 @@ }, "ReplicaMode":{ "shape":"ReplicaMode", - "documentation":"

        The open mode of the replica database: mounted or read-only.

        This parameter is only supported for Oracle DB instances.

        Mounted DB replicas are included in Oracle Database Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload.

        You can create a combination of mounted and read-only DB replicas for the same primary DB instance. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide.

        For RDS Custom, you must specify this parameter and set it to mounted. The value won't be set by default. After replica creation, you can manage the open mode manually.

        " + "documentation":"

        The open mode of the replica database.

        This parameter is only supported for Db2 DB instances and Oracle DB instances.

        Db2

        Standby DB replicas are included in Db2 Advanced Edition (AE) and Db2 Standard Edition (SE). The main use case for standby replicas is cross-Region disaster recovery. Because it doesn't accept user connections, a standby replica can't serve a read-only workload.

        You can create a combination of standby and read-only DB replicas for the same primary DB instance. For more information, see Working with read replicas for Amazon RDS for Db2 in the Amazon RDS User Guide.

        To create standby DB replicas for RDS for Db2, set this parameter to mounted.

        Oracle

        Mounted DB replicas are included in Oracle Database Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload.

        You can create a combination of mounted and read-only DB replicas for the same primary DB instance. For more information, see Working with read replicas for Amazon RDS for Oracle in the Amazon RDS User Guide.

        For RDS Custom, you must specify this parameter and set it to mounted. The value won't be set by default. After replica creation, you can manage the open mode manually.

        " }, "MaxAllocatedStorage":{ "shape":"IntegerOptional", @@ -4857,6 +4880,10 @@ "shape":"BooleanOptional", "documentation":"

        Specifies whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts read replica.

        A CoIP provides local or external connectivity to resources in your Outpost subnets through your on-premises network. For some use cases, a CoIP can provide lower latency for connections to the read replica from outside of its virtual private cloud (VPC) on your local network.

        For more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

        For more information about CoIPs, see Customer-owned IP addresses in the Amazon Web Services Outposts User Guide.

        " }, + "BackupTarget":{ + "shape":"String", + "documentation":"

        The location where RDS stores automated backups and manual snapshots.

        Valid Values:

        • local for Dedicated Local Zones

        • region for Amazon Web Services Region

        " + }, "AllocatedStorage":{ "shape":"IntegerOptional", "documentation":"

        The amount of storage (in gibibytes) to allocate initially for the read replica. Follow the allocation rules specified in CreateDBInstance.

        This setting isn't valid for RDS for SQL Server.

        Be sure to allocate enough storage for your read replica so that the create operation can succeed. You can also allocate additional storage for future growth.

        " @@ -5071,7 +5098,7 @@ }, "ComputeRedundancy":{ "shape":"IntegerOptional", - "documentation":"

        Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:

        • 0 - Creates a DB shard group without a standby DB shard group. This is the default value.

        • 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).

        • 2 - Creates a DB shard group with two standby DB shard groups in two different AZs.

        " + "documentation":"

        Specifies whether to create standby standby DB data access shard for the DB shard group. Valid values are the following:

        • 0 - Creates a DB shard group without a standby DB data access shard. This is the default value.

        • 1 - Creates a DB shard group with a standby DB data access shard in a different Availability Zone (AZ).

        • 2 - Creates a DB shard group with two standby DB data access shard in two different AZs.

        " }, "MaxACU":{ "shape":"DoubleOptional", @@ -5208,7 +5235,7 @@ }, "EngineLifecycleSupport":{ "shape":"String", - "documentation":"

        The life cycle type for this global database cluster.

        By default, this value is set to open-source-rds-extended-support, which enrolls your global cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the global cluster will fail if the DB major version is past its end of standard support date.

        This setting only applies to Aurora PostgreSQL-based global databases.

        You can use this setting to enroll your global cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your global cluster past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon Aurora User Guide.

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " + "documentation":"

        The life cycle type for this global database cluster.

        By default, this value is set to open-source-rds-extended-support, which enrolls your global cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the global cluster will fail if the DB major version is past its end of standard support date.

        This setting only applies to Aurora PostgreSQL-based global databases.

        You can use this setting to enroll your global cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your global cluster past the end of standard support for that engine version. For more information, see Amazon RDS Extended Support with Amazon Aurora in the Amazon Aurora User Guide.

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " }, "DeletionProtection":{ "shape":"BooleanOptional", @@ -5665,6 +5692,10 @@ "documentation":"

        The Active Directory Domain membership records associated with the DB cluster.

        " }, "TagList":{"shape":"TagList"}, + "GlobalClusterIdentifier":{ + "shape":"GlobalClusterIdentifier", + "documentation":"

        Contains a user-supplied global database cluster identifier. This identifier is the unique key that identifies a global database cluster.

        " + }, "GlobalWriteForwardingStatus":{ "shape":"WriteForwardingStatus", "documentation":"

        The status of write forwarding for a secondary cluster in an Aurora global database.

        " @@ -5761,7 +5792,7 @@ "CertificateDetails":{"shape":"CertificateDetails"}, "EngineLifecycleSupport":{ "shape":"String", - "documentation":"

        The life cycle type for the DB cluster.

        For more information, see CreateDBCluster.

        " + "documentation":"

        The lifecycle type for the DB cluster.

        For more information, see CreateDBCluster.

        " } }, "documentation":"

        Contains the details of an Amazon Aurora DB cluster or Multi-AZ DB cluster.

        For an Amazon Aurora DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, PromoteReadReplicaDBCluster, RestoreDBClusterFromS3, RestoreDBClusterFromSnapshot, RestoreDBClusterToPointInTime, StartDBCluster, and StopDBCluster.

        For a Multi-AZ DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, RebootDBCluster, RestoreDBClusterFromSnapshot, and RestoreDBClusterToPointInTime.

        For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

        For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

        ", @@ -6836,7 +6867,7 @@ }, "ReplicaMode":{ "shape":"ReplicaMode", - "documentation":"

        The open mode of an Oracle read replica. The default is open-read-only. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide.

        This attribute is only supported in RDS for Oracle.

        " + "documentation":"

        The open mode of a Db2 or an Oracle read replica. The default is open-read-only. For more information, see Working with read replicas for Amazon RDS for Db2 and Working with read replicas for Amazon RDS for Oracle in the Amazon RDS User Guide.

        This attribute is only supported in RDS for Db2, RDS for Oracle, and RDS Custom for Oracle.

        " }, "LicenseModel":{ "shape":"String", @@ -7025,7 +7056,7 @@ }, "BackupTarget":{ "shape":"String", - "documentation":"

        The location where automated backups and manual snapshots are stored: Amazon Web Services Outposts or the Amazon Web Services Region.

        " + "documentation":"

        The location where automated backups and manual snapshots are stored: Dedicated Local Zones, Amazon Web Services Outposts or the Amazon Web Services Region.

        " }, "NetworkType":{ "shape":"String", @@ -7073,7 +7104,7 @@ }, "EngineLifecycleSupport":{ "shape":"String", - "documentation":"

        The life cycle type for the DB instance.

        For more information, see CreateDBInstance.

        " + "documentation":"

        The lifecycle type for the DB instance.

        For more information, see CreateDBInstance.

        " } }, "documentation":"

        Contains the details of an Amazon RDS DB instance.

        This data type is used as a response element in the operations CreateDBInstance, CreateDBInstanceReadReplica, DeleteDBInstance, DescribeDBInstances, ModifyDBInstance, PromoteReadReplica, RebootDBInstance, RestoreDBInstanceFromDBSnapshot, RestoreDBInstanceFromS3, RestoreDBInstanceToPointInTime, StartDBInstance, and StopDBInstance.

        ", @@ -7199,7 +7230,7 @@ }, "BackupTarget":{ "shape":"String", - "documentation":"

        The location where automated backups are stored: Amazon Web Services Outposts or the Amazon Web Services Region.

        " + "documentation":"

        The location where automated backups are stored: Dedicated Local Zones, Amazon Web Services Outposts or the Amazon Web Services Region.

        " }, "StorageThroughput":{ "shape":"IntegerOptional", @@ -7421,6 +7452,31 @@ }, "exception":true }, + "DBMajorEngineVersion":{ + "type":"structure", + "members":{ + "Engine":{ + "shape":"String", + "documentation":"

        The name of the database engine.

        " + }, + "MajorEngineVersion":{ + "shape":"String", + "documentation":"

        The major version number of the database engine.

        " + }, + "SupportedEngineLifecycles":{ + "shape":"SupportedEngineLifecycleList", + "documentation":"

        A list of the lifecycles supported by this engine for the DescribeDBMajorEngineVersions operation.

        " + } + }, + "documentation":"

        This data type is used as a response element in the operation DescribeDBMajorEngineVersions.

        " + }, + "DBMajorEngineVersionsList":{ + "type":"list", + "member":{ + "shape":"DBMajorEngineVersion", + "locationName":"DBMajorEngineVersion" + } + }, "DBParameterGroup":{ "type":"structure", "members":{ @@ -8331,7 +8387,7 @@ }, "SnapshotTarget":{ "shape":"String", - "documentation":"

        Specifies where manual snapshots are stored: Amazon Web Services Outposts or the Amazon Web Services Region.

        " + "documentation":"

        Specifies where manual snapshots are stored: Dedicated Local Zones, Amazon Web Services Outposts or the Amazon Web Services Region.

        " }, "StorageThroughput":{ "shape":"IntegerOptional", @@ -8348,6 +8404,10 @@ "MultiTenant":{ "shape":"BooleanOptional", "documentation":"

        Indicates whether the snapshot is of a DB instance using the multi-tenant configuration (TRUE) or the single-tenant configuration (FALSE).

        " + }, + "SnapshotAvailabilityZone":{ + "shape":"String", + "documentation":"

        Specifies the name of the Availability Zone where RDS stores the DB snapshot. This value is valid only for snapshots that RDS stores on a Dedicated Local Zone.

        " } }, "documentation":"

        Contains the details of an Amazon RDS DB snapshot.

        This data type is used as a response element in the DescribeDBSnapshots action.

        ", @@ -8541,7 +8601,7 @@ }, "Subnets":{ "shape":"SubnetList", - "documentation":"

        Contains a list of Subnet elements.

        " + "documentation":"

        Contains a list of Subnet elements. The list of subnets shown here might not reflect the current state of your VPC. For the most up-to-date information, we recommend checking your VPC configuration directly.

        " }, "DBSubnetGroupArn":{ "shape":"String", @@ -9493,6 +9553,40 @@ }, "documentation":"

        The response from a call to DescribeDBLogFiles.

        " }, + "DescribeDBMajorEngineVersionsRequest":{ + "type":"structure", + "members":{ + "Engine":{ + "shape":"Engine", + "documentation":"

        The database engine to return major version details for.

        Valid Values:

        • aurora-mysql

        • aurora-postgresql

        • custom-sqlserver-ee

        • custom-sqlserver-se

        • custom-sqlserver-web

        • db2-ae

        • db2-se

        • mariadb

        • mysql

        • oracle-ee

        • oracle-ee-cdb

        • oracle-se2

        • oracle-se2-cdb

        • postgres

        • sqlserver-ee

        • sqlserver-se

        • sqlserver-ex

        • sqlserver-web

        " + }, + "MajorEngineVersion":{ + "shape":"MajorEngineVersion", + "documentation":"

        A specific database major engine version to return details for.

        Example: 8.4

        " + }, + "Marker":{ + "shape":"Marker", + "documentation":"

        An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

        " + }, + "MaxRecords":{ + "shape":"MaxRecords", + "documentation":"

        The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results.

        Default: 100

        " + } + } + }, + "DescribeDBMajorEngineVersionsResponse":{ + "type":"structure", + "members":{ + "DBMajorEngineVersions":{ + "shape":"DBMajorEngineVersionsList", + "documentation":"

        A list of DBMajorEngineVersion elements.

        " + }, + "Marker":{ + "shape":"String", + "documentation":"

        An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

        " + } + } + }, "DescribeDBParameterGroupsMessage":{ "type":"structure", "members":{ @@ -10640,6 +10734,11 @@ }, "documentation":"

        This data type represents the information you need to connect to an Amazon RDS DB instance. This data type is used as a response element in the following actions:

        • CreateDBInstance

        • DescribeDBInstances

        • DeleteDBInstance

        For the data structure that represents Amazon Aurora DB cluster endpoints, see DBClusterEndpoint.

        " }, + "Engine":{ + "type":"string", + "max":50, + "min":1 + }, "EngineDefaults":{ "type":"structure", "members":{ @@ -11107,7 +11206,7 @@ }, "EngineLifecycleSupport":{ "shape":"String", - "documentation":"

        The life cycle type for the global cluster.

        For more information, see CreateGlobalCluster.

        " + "documentation":"

        The lifecycle type for the global cluster.

        For more information, see CreateGlobalCluster.

        " }, "DatabaseName":{ "shape":"String", @@ -11867,6 +11966,13 @@ "min":1, "pattern":"[a-zA-Z0-9_:\\-\\/]+" }, + "LifecycleSupportName":{ + "type":"string", + "enum":[ + "open-source-rds-standard-support", + "open-source-rds-extended-support" + ] + }, "LimitlessDatabase":{ "type":"structure", "members":{ @@ -11925,6 +12031,11 @@ }, "Long":{"type":"long"}, "LongOptional":{"type":"long"}, + "MajorEngineVersion":{ + "type":"string", + "max":50, + "min":1 + }, "Marker":{ "type":"string", "max":340, @@ -12611,7 +12722,7 @@ }, "ReplicaMode":{ "shape":"ReplicaMode", - "documentation":"

        A value that sets the open mode of a replica database to either mounted or read-only.

        Currently, this parameter is only supported for Oracle DB instances.

        Mounted DB replicas are included in Oracle Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload. For more information, see Working with Oracle Read Replicas for Amazon RDS in the Amazon RDS User Guide.

        This setting doesn't apply to RDS Custom DB instances.

        " + "documentation":"

        The open mode of a replica database.

        This parameter is only supported for Db2 DB instances and Oracle DB instances.

        Db2

        Standby DB replicas are included in Db2 Advanced Edition (AE) and Db2 Standard Edition (SE). The main use case for standby replicas is cross-Region disaster recovery. Because it doesn't accept user connections, a standby replica can't serve a read-only workload.

        You can create a combination of standby and read-only DB replicas for the same primary DB instance. For more information, see Working with read replicas for Amazon RDS for Db2 in the Amazon RDS User Guide.

        To create standby DB replicas for RDS for Db2, set this parameter to mounted.

        Oracle

        Mounted DB replicas are included in Oracle Database Enterprise Edition. The main use case for mounted replicas is cross-Region disaster recovery. The primary database doesn't use Active Data Guard to transmit information to the mounted replica. Because it doesn't accept user connections, a mounted replica can't serve a read-only workload.

        You can create a combination of mounted and read-only DB replicas for the same primary DB instance. For more information, see Working with read replicas for Amazon RDS for Oracle in the Amazon RDS User Guide.

        For RDS Custom, you must specify this parameter and set it to mounted. The value won't be set by default. After replica creation, you can manage the open mode manually.

        " }, "EnableCustomerOwnedIp":{ "shape":"BooleanOptional", @@ -14901,7 +15012,7 @@ }, "EngineLifecycleSupport":{ "shape":"String", - "documentation":"

        The life cycle type for this DB cluster.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

        You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

        Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " + "documentation":"

        The life cycle type for this DB cluster.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

        You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

        Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " } } }, @@ -15054,7 +15165,7 @@ }, "EngineLifecycleSupport":{ "shape":"String", - "documentation":"

        The life cycle type for this DB cluster.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

        You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

        Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " + "documentation":"

        The life cycle type for this DB cluster.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

        You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

        Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " } }, "documentation":"

        " @@ -15201,7 +15312,7 @@ }, "EngineLifecycleSupport":{ "shape":"String", - "documentation":"

        The life cycle type for this DB cluster.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

        You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

        Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " + "documentation":"

        The life cycle type for this DB cluster.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

        You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

        Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " } }, "documentation":"

        " @@ -15351,7 +15462,7 @@ }, "BackupTarget":{ "shape":"String", - "documentation":"

        Specifies where automated backups and manual snapshots are stored for the restored DB instance.

        Possible values are outposts (Amazon Web Services Outposts) and region (Amazon Web Services Region). The default is region.

        For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

        " + "documentation":"

        Specifies where automated backups and manual snapshots are stored for the restored DB instance.

        Possible values are local (Dedicated Local Zone), outposts (Amazon Web Services Outposts), and region (Amazon Web Services Region). The default is region.

        For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

        " }, "NetworkType":{ "shape":"String", @@ -15379,7 +15490,7 @@ }, "EngineLifecycleSupport":{ "shape":"String", - "documentation":"

        The life cycle type for this DB instance.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

        You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide.

        This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " + "documentation":"

        The life cycle type for this DB instance.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

        You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Amazon RDS Extended Support with Amazon RDS in the Amazon RDS User Guide.

        This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " }, "ManageMasterUserPassword":{ "shape":"BooleanOptional", @@ -15616,7 +15727,7 @@ }, "EngineLifecycleSupport":{ "shape":"String", - "documentation":"

        The life cycle type for this DB instance.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

        You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide.

        This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " + "documentation":"

        The life cycle type for this DB instance.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

        You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Amazon RDS Extended Support Amazon RDS in the Amazon RDS User Guide.

        This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " } } }, @@ -15785,7 +15896,7 @@ }, "BackupTarget":{ "shape":"String", - "documentation":"

        The location for storing automated backups and manual snapshots for the restored DB instance.

        Valid Values:

        • outposts (Amazon Web Services Outposts)

        • region (Amazon Web Services Region)

        Default: region

        For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

        " + "documentation":"

        The location for storing automated backups and manual snapshots for the restored DB instance.

        Valid Values:

        • local (Dedicated Local Zone)

        • outposts (Amazon Web Services Outposts)

        • region (Amazon Web Services Region)

        Default: region

        For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide.

        " }, "NetworkType":{ "shape":"String", @@ -15809,7 +15920,7 @@ }, "EngineLifecycleSupport":{ "shape":"String", - "documentation":"

        The life cycle type for this DB instance.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

        You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide.

        This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " + "documentation":"

        The life cycle type for this DB instance.

        By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

        You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Amazon RDS Extended Support with Amazon RDS in the Amazon RDS User Guide.

        This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

        Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

        Default: open-source-rds-extended-support

        " }, "ManageMasterUserPassword":{ "shape":"BooleanOptional", @@ -16304,7 +16415,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"

        The ID of the Amazon Web Services KMS key to use to encrypt the data exported to Amazon S3. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. The caller of this operation must be authorized to run the following operations. These can be set in the Amazon Web Services KMS key policy:

        • kms:Encrypt

        • kms:Decrypt

        • kms:GenerateDataKey

        • kms:GenerateDataKeyWithoutPlaintext

        • kms:ReEncryptFrom

        • kms:ReEncryptTo

        • kms:CreateGrant

        • kms:DescribeKey

        • kms:RetireGrant

        " + "documentation":"

        The ID of the Amazon Web Services KMS key to use to encrypt the data exported to Amazon S3. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. The caller of this operation must be authorized to run the following operations. These can be set in the Amazon Web Services KMS key policy:

        • kms:CreateGrant

        • kms:DescribeKey

        " }, "S3Prefix":{ "shape":"String", @@ -16527,6 +16638,36 @@ "locationName":"CharacterSet" } }, + "SupportedEngineLifecycle":{ + "type":"structure", + "required":[ + "LifecycleSupportName", + "LifecycleSupportStartDate", + "LifecycleSupportEndDate" + ], + "members":{ + "LifecycleSupportName":{ + "shape":"LifecycleSupportName", + "documentation":"

        The type of lifecycle support that the engine version is in.

        This parameter returns the following values:

        • open-source-rds-standard-support - Indicates RDS standard support or Aurora standard support.

        • open-source-rds-extended-support - Indicates Amazon RDS Extended Support.

        For Amazon RDS for MySQL, Amazon RDS for PostgreSQL, Aurora MySQL, and Aurora PostgreSQL, this parameter returns both open-source-rds-standard-support and open-source-rds-extended-support.

        For Amazon RDS for MariaDB, this parameter only returns the value open-source-rds-standard-support.

        For information about Amazon RDS Extended Support, see Amazon RDS Extended Support with Amazon RDS in the Amazon RDS User Guide and Amazon RDS Extended Support with Amazon Aurora in the Amazon Aurora User Guide.

        " + }, + "LifecycleSupportStartDate":{ + "shape":"TStamp", + "documentation":"

        The start date for the type of support returned by LifecycleSupportName.

        " + }, + "LifecycleSupportEndDate":{ + "shape":"TStamp", + "documentation":"

        The end date for the type of support returned by LifecycleSupportName.

        " + } + }, + "documentation":"

        This data type is used as a response element in the operation DescribeDBMajorEngineVersions.

        You can use the information that this data type returns to plan for upgrades.

        This data type only returns information for the open source engines Amazon RDS for MariaDB, Amazon RDS for MySQL, Amazon RDS for PostgreSQL, Aurora MySQL, and Aurora PostgreSQL.

        " + }, + "SupportedEngineLifecycleList":{ + "type":"list", + "member":{ + "shape":"SupportedEngineLifecycle", + "locationName":"SupportedEngineLifecycle" + } + }, "SupportedTimezonesList":{ "type":"list", "member":{ diff --git a/services/rdsdata/pom.xml b/services/rdsdata/pom.xml index 94589f23c866..e1b58e24a112 100644 --- a/services/rdsdata/pom.xml +++ b/services/rdsdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT rdsdata AWS Java SDK :: Services :: RDS Data diff --git a/services/rdsdata/src/main/resources/codegen-resources/customization.config b/services/rdsdata/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/rdsdata/src/main/resources/codegen-resources/customization.config +++ b/services/rdsdata/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/redshift/pom.xml b/services/redshift/pom.xml index f887c785fb0c..5bfbc691680e 100644 --- a/services/redshift/pom.xml +++ b/services/redshift/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT redshift AWS Java SDK :: Services :: Amazon Redshift diff --git a/services/redshiftdata/pom.xml b/services/redshiftdata/pom.xml index 60d7d12c05a3..b852f344593c 100644 --- a/services/redshiftdata/pom.xml +++ b/services/redshiftdata/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT redshiftdata AWS Java SDK :: Services :: Redshift Data diff --git a/services/redshiftdata/src/main/resources/codegen-resources/customization.config b/services/redshiftdata/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/redshiftdata/src/main/resources/codegen-resources/customization.config +++ b/services/redshiftdata/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/redshiftserverless/pom.xml b/services/redshiftserverless/pom.xml index f3a1d2475598..2143d8577b13 100644 --- a/services/redshiftserverless/pom.xml +++ b/services/redshiftserverless/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT redshiftserverless AWS Java SDK :: Services :: Redshift Serverless diff --git a/services/redshiftserverless/src/main/resources/codegen-resources/customization.config b/services/redshiftserverless/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/redshiftserverless/src/main/resources/codegen-resources/customization.config +++ b/services/redshiftserverless/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/rekognition/pom.xml b/services/rekognition/pom.xml index 455284915682..0b35651c736e 100644 --- a/services/rekognition/pom.xml +++ b/services/rekognition/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT rekognition AWS Java SDK :: Services :: Amazon Rekognition diff --git a/services/rekognition/src/main/resources/codegen-resources/customization.config b/services/rekognition/src/main/resources/codegen-resources/customization.config index 63911995485c..284e705d144f 100644 --- a/services/rekognition/src/main/resources/codegen-resources/customization.config +++ b/services/rekognition/src/main/resources/codegen-resources/customization.config @@ -7,6 +7,5 @@ "describeTableRestoreStatus", "describeClusterSecurityGroups" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/rekognition/src/main/resources/codegen-resources/service-2.json b/services/rekognition/src/main/resources/codegen-resources/service-2.json index 45455d20eca9..220ab5b61595 100644 --- a/services/rekognition/src/main/resources/codegen-resources/service-2.json +++ b/services/rekognition/src/main/resources/codegen-resources/service-2.json @@ -33,7 +33,7 @@ {"shape":"ConflictException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

        Associates one or more faces with an existing UserID. Takes an array of FaceIds. Each FaceId that are present in the FaceIds list is associated with the provided UserID. The maximum number of total FaceIds per UserID is 100.

        The UserMatchThreshold parameter specifies the minimum user match confidence required for the face to be associated with a UserID that has at least one FaceID already associated. This ensures that the FaceIds are associated with the right UserID. The value ranges from 0-100 and default value is 75.

        If successful, an array of AssociatedFace objects containing the associated FaceIds is returned. If a given face is already associated with the given UserID, it will be ignored and will not be returned in the response. If a given face is already associated to a different UserID, isn't found in the collection, doesn’t meet the UserMatchThreshold, or there are already 100 faces associated with the UserID, it will be returned as part of an array of UnsuccessfulFaceAssociations.

        The UserStatus reflects the status of an operation which updates a UserID representation with a list of given faces. The UserStatus can be:

        • ACTIVE - All associations or disassociations of FaceID(s) for a UserID are complete.

        • CREATED - A UserID has been created, but has no FaceID(s) associated with it.

        • UPDATING - A UserID is being updated and there are current associations or disassociations of FaceID(s) taking place.

        " + "documentation":"

        Associates one or more faces with an existing UserID. Takes an array of FaceIds. Each FaceId that are present in the FaceIds list is associated with the provided UserID. The number of FaceIds that can be used as input in a single request is limited to 100.

        Note that the total number of faces that can be associated with a single UserID is also limited to 100. Once a UserID has 100 faces associated with it, no additional faces can be added. If more API calls are made after the limit is reached, a ServiceQuotaExceededException will result.

        The UserMatchThreshold parameter specifies the minimum user match confidence required for the face to be associated with a UserID that has at least one FaceID already associated. This ensures that the FaceIds are associated with the right UserID. The value ranges from 0-100 and default value is 75.

        If successful, an array of AssociatedFace objects containing the associated FaceIds is returned. If a given face is already associated with the given UserID, it will be ignored and will not be returned in the response. If a given face is already associated to a different UserID, isn't found in the collection, doesn’t meet the UserMatchThreshold, or there are already 100 faces associated with the UserID, it will be returned as part of an array of UnsuccessfulFaceAssociations.

        The UserStatus reflects the status of an operation which updates a UserID representation with a list of given faces. The UserStatus can be:

        • ACTIVE - All associations or disassociations of FaceID(s) for a UserID are complete.

        • CREATED - A UserID has been created, but has no FaceID(s) associated with it.

        • UPDATING - A UserID is being updated and there are current associations or disassociations of FaceID(s) taking place.

        " }, "CompareFaces":{ "name":"CompareFaces", @@ -479,7 +479,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

        This operation applies only to Amazon Rekognition Custom Labels.

        Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model.

        You specify which version of a model version to use by using the ProjectVersionArn input parameter.

        You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

        For each object that the model version detects on an image, the API returns a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object provides the label name (Name), the level of confidence that the image contains the object (Confidence), and object location information, if it exists, for the label on the image (Geometry). Note that for the DetectCustomLabelsLabels operation, Polygons are not returned in the Geometry section of the response.

        To filter labels that are returned, specify a value for MinConfidence. DetectCustomLabelsLabels only returns labels with a confidence that's higher than the specified value. The value of MinConfidence maps to the assumed threshold values created during training. For more information, see Assumed threshold in the Amazon Rekognition Custom Labels Developer Guide. Amazon Rekognition Custom Labels metrics expresses an assumed threshold as a floating point value between 0-1. The range of MinConfidence normalizes the threshold value to a percentage value (0-100). Confidence responses from DetectCustomLabels are also returned as a percentage. You can use MinConfidence to change the precision and recall or your model. For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

        If you don't specify a value for MinConfidence, DetectCustomLabels returns labels based on the assumed threshold of each label.

        This is a stateless API operation. That is, the operation does not persist any data.

        This operation requires permissions to perform the rekognition:DetectCustomLabels action.

        For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

        " + "documentation":"

        This operation applies only to Amazon Rekognition Custom Labels.

        Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model.

        You specify which version of a model version to use by using the ProjectVersionArn input parameter.

        You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

        For each object that the model version detects on an image, the API returns a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object provides the label name (Name), the level of confidence that the image contains the object (Confidence), and object location information, if it exists, for the label on the image (Geometry).

        To filter labels that are returned, specify a value for MinConfidence. DetectCustomLabelsLabels only returns labels with a confidence that's higher than the specified value. The value of MinConfidence maps to the assumed threshold values created during training. For more information, see Assumed threshold in the Amazon Rekognition Custom Labels Developer Guide. Amazon Rekognition Custom Labels metrics expresses an assumed threshold as a floating point value between 0-1. The range of MinConfidence normalizes the threshold value to a percentage value (0-100). Confidence responses from DetectCustomLabels are also returned as a percentage. You can use MinConfidence to change the precision and recall or your model. For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

        If you don't specify a value for MinConfidence, DetectCustomLabels returns labels based on the assumed threshold of each label.

        This is a stateless API operation. That is, the operation does not persist any data.

        This operation requires permissions to perform the rekognition:DetectCustomLabels action.

        For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

        " }, "DetectFaces":{ "name":"DetectFaces", @@ -789,7 +789,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        Gets the path tracking results of a Amazon Rekognition Video analysis started by StartPersonTracking.

        The person path tracking operation is started by a call to StartPersonTracking which returns a job identifier (JobId). When the operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartPersonTracking.

        To get the results of the person path tracking operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetPersonTracking and pass the job identifier (JobId) from the initial call to StartPersonTracking.

        GetPersonTracking returns an array, Persons, of tracked persons and the time(s) their paths were tracked in the video.

        GetPersonTracking only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned.

        For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

        By default, the array is sorted by the time(s) a person's path is tracked in the video. You can sort by tracked persons by specifying INDEX for the SortBy input parameter.

        Use the MaxResults parameter to limit the number of items returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetPersonTracking and populate the NextToken request parameter with the token value returned from the previous call to GetPersonTracking.

        " + "documentation":"

        End of support notice: On October 31, 2025, AWS will discontinue support for Amazon Rekognition People Pathing. After October 31, 2025, you will no longer be able to use the Rekognition People Pathing capability. For more information, visit this blog post.

        Gets the path tracking results of a Amazon Rekognition Video analysis started by StartPersonTracking.

        The person path tracking operation is started by a call to StartPersonTracking which returns a job identifier (JobId). When the operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartPersonTracking.

        To get the results of the person path tracking operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetPersonTracking and pass the job identifier (JobId) from the initial call to StartPersonTracking.

        GetPersonTracking returns an array, Persons, of tracked persons and the time(s) their paths were tracked in the video.

        GetPersonTracking only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned.

        For more information, see FaceDetail in the Amazon Rekognition Developer Guide.

        By default, the array is sorted by the time(s) a person's path is tracked in the video. You can sort by tracked persons by specifying INDEX for the SortBy input parameter.

        Use the MaxResults parameter to limit the number of items returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetPersonTracking and populate the NextToken request parameter with the token value returned from the previous call to GetPersonTracking.

        " }, "GetSegmentDetection":{ "name":"GetSegmentDetection", @@ -1299,7 +1299,7 @@ {"shape":"LimitExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

        Starts the asynchronous tracking of a person's path in a stored video.

        Amazon Rekognition Video can track the path of people in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartPersonTracking returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

        To get the results of the person detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetPersonTracking and pass the job identifier (JobId) from the initial call to StartPersonTracking.

        ", + "documentation":"

        End of support notice: On October 31, 2025, AWS will discontinue support for Amazon Rekognition People Pathing. After October 31, 2025, you will no longer be able to use the Rekognition People Pathing capability. For more information, visit this blog post.

        Starts the asynchronous tracking of a person's path in a stored video.

        Amazon Rekognition Video can track the path of people in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartPersonTracking returns a job identifier (JobId) which you use to get the results of the operation. When label detection is finished, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel.

        To get the results of the person detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetPersonTracking and pass the job identifier (JobId) from the initial call to StartPersonTracking.

        ", "idempotent":true }, "StartProjectVersion":{ @@ -1503,8 +1503,7 @@ "shapes":{ "AccessDeniedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You are not authorized to perform the action.

        ", "exception":true }, @@ -1831,6 +1830,50 @@ "type":"list", "member":{"shape":"CelebrityRecognition"} }, + "Challenge":{ + "type":"structure", + "required":[ + "Type", + "Version" + ], + "members":{ + "Type":{ + "shape":"ChallengeType", + "documentation":"

        The type of the challenge being used for the Face Liveness session.

        " + }, + "Version":{ + "shape":"Version", + "documentation":"

        The version of the challenge being used for the Face Liveness session.

        " + } + }, + "documentation":"

        Describes the type and version of the challenge being used for the Face Liveness session.

        " + }, + "ChallengePreference":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"ChallengeType", + "documentation":"

        The types of challenges that have been selected for the Face Liveness session.

        " + }, + "Versions":{ + "shape":"Versions", + "documentation":"

        The version of the challenges that have been selected for the Face Liveness session.

        " + } + }, + "documentation":"

        An ordered list of preferred challenge type and versions.

        " + }, + "ChallengePreferences":{ + "type":"list", + "member":{"shape":"ChallengePreference"} + }, + "ChallengeType":{ + "type":"string", + "enum":[ + "FaceMovementAndLightChallenge", + "FaceMovementChallenge" + ] + }, "ClientRequestToken":{ "type":"string", "max":64, @@ -1973,8 +2016,7 @@ }, "ConflictException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A User with the same Id already exists within the collection, or the update or deletion of the User caused an inconsistent state. **

        ", "exception":true }, @@ -2249,6 +2291,10 @@ "AuditImagesLimit":{ "shape":"AuditImagesLimit", "documentation":"

        Number of audit images to be returned back. Takes an integer between 0-4. Any integer less than 0 will return 0, any integer above 4 will return 4 images in the response. By default, it is set to 0. The limit is best effort and is based on the actual duration of the selfie-video.

        " + }, + "ChallengePreferences":{ + "shape":"ChallengePreferences", + "documentation":"

        Indicates preferred challenge types and versions for the Face Liveness session to be created.

        " } }, "documentation":"

        A session settings object. It contains settings for the operation to be performed. It accepts arguments for OutputConfig and AuditImagesLimit.

        " @@ -2431,8 +2477,7 @@ }, "CreateUserResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "CustomLabel":{ "type":"structure", @@ -2721,8 +2766,7 @@ }, "DeleteDatasetResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteFacesRequest":{ "type":"structure", @@ -2777,8 +2821,7 @@ }, "DeleteProjectPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteProjectRequest":{ "type":"structure", @@ -2830,8 +2873,7 @@ }, "DeleteStreamProcessorResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteUserRequest":{ "type":"structure", @@ -2857,8 +2899,7 @@ }, "DeleteUserResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeCollectionRequest":{ "type":"structure", @@ -3483,8 +3524,7 @@ }, "DistributeDatasetEntriesResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DistributeDatasetMetadataList":{ "type":"list", @@ -3542,7 +3582,7 @@ "documentation":"

        Level of confidence in the determination.

        " } }, - "documentation":"

        The emotions that appear to be expressed on the face, and the confidence level in the determination. The API is only making a determination of the physical appearance of a person's face. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally.

        " + "documentation":"

        The API returns a prediction of an emotion based on a person's facial expressions, along with the confidence level for the predicted emotion. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally. The API is not intended to be used, and you may not use it, in a manner that violates the EU Artificial Intelligence Act or any other applicable law.

        " }, "EmotionName":{ "type":"string", @@ -4211,6 +4251,10 @@ "AuditImages":{ "shape":"AuditImages", "documentation":"

        A set of images from the Face Liveness video that can be used for audit purposes. It includes a bounding box of the face and the Base64-encoded bytes that return an image. If the CreateFaceLivenessSession request included an OutputConfig argument, the image will be uploaded to an S3Object specified in the output configuration. If no Amazon S3 bucket is defined, raw bytes are sent instead.

        " + }, + "Challenge":{ + "shape":"Challenge", + "documentation":"

        Contains information regarding the challenge type used for the Face Liveness check.

        " } } }, @@ -4697,8 +4741,7 @@ }, "IdempotentParameterMismatchException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A ClientRequestToken input parameter was reused with an operation, but at least one of the other input parameters is different from the previous call to the operation.

        ", "exception":true }, @@ -4741,8 +4784,7 @@ }, "ImageTooLargeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment, the image size or resolution exceeds the allowed limit. For more information, see Guidelines and quotas in Amazon Rekognition in the Amazon Rekognition Developer Guide.

        ", "exception":true }, @@ -4832,51 +4874,44 @@ }, "InternalServerError":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Amazon Rekognition experienced a service issue. Try your call again.

        ", "exception":true, "fault":true }, "InvalidImageFormatException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The provided image format is not supported.

        ", "exception":true }, "InvalidManifestException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Indicates that a provided manifest file is empty or larger than the allowed limit.

        ", "exception":true }, "InvalidPaginationTokenException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Pagination token in the request is not valid.

        ", "exception":true }, "InvalidParameterException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Input parameter violated a constraint. Validate your parameter before calling the API operation again.

        ", "exception":true }, "InvalidPolicyRevisionIdException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The supplied revision id for the project policy is invalid.

        ", "exception":true }, "InvalidS3ObjectException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Amazon Rekognition is unable to access the S3 object specified in the request.

        ", "exception":true }, @@ -5149,8 +5184,7 @@ }, "LimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An Amazon Rekognition service limit was exceeded. For example, if you start too many jobs concurrently, subsequent calls to start operations (ex: StartLabelDetection) will raise a LimitExceededException exception (HTTP status code: 400) until the number of concurrently running jobs is below the Amazon Rekognition service limit.

        ", "exception":true }, @@ -5503,8 +5537,7 @@ }, "MalformedPolicyDocumentException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The format of the project policy document that you supplied to PutProjectPolicy is incorrect.

        ", "exception":true }, @@ -6319,8 +6352,7 @@ }, "ProvisionedThroughputExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Rekognition.

        ", "exception":true }, @@ -6444,8 +6476,7 @@ }, "ResourceAlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A resource with the specified ID already exists.

        ", "exception":true }, @@ -6456,22 +6487,19 @@ }, "ResourceInUseException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified resource is already being used.

        ", "exception":true }, "ResourceNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The resource specified in the request cannot be found.

        ", "exception":true }, "ResourceNotReadyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The requested resource isn't ready. For example, this exception occurs when you call DetectCustomLabels with a model version that isn't deployed.

        ", "exception":true }, @@ -6843,15 +6871,13 @@ }, "ServiceQuotaExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The size of the collection exceeds the allowed limit. For more information, see Guidelines and quotas in Amazon Rekognition in the Amazon Rekognition Developer Guide.

        ", "exception":true }, "SessionNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Occurs when a given sessionId is not found.

        ", "exception":true }, @@ -7355,8 +7381,7 @@ }, "StopStreamProcessorResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "StreamProcessingStartSelector":{ "type":"structure", @@ -7555,8 +7580,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -7683,8 +7707,7 @@ }, "ThrottlingException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Amazon Rekognition is temporarily unable to process the request. Try your call again.

        ", "exception":true, "fault":true @@ -7906,8 +7929,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateDatasetEntriesRequest":{ "type":"structure", @@ -7928,8 +7950,7 @@ }, "UpdateDatasetEntriesResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateStreamProcessorRequest":{ "type":"structure", @@ -7959,8 +7980,7 @@ }, "UpdateStreamProcessorResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "Url":{"type":"string"}, "Urls":{ @@ -8038,6 +8058,12 @@ }, "documentation":"

        Contains the Amazon S3 bucket location of the validation data for a model training job.

        The validation data includes error information for individual JSON Lines in the dataset. For more information, see Debugging a Failed Model Training in the Amazon Rekognition Custom Labels Developer Guide.

        You get the ValidationData object for the training dataset (TrainingDataResult) and the test dataset (TestingDataResult) by calling DescribeProjectVersions.

        The assets array contains a single Asset object. The GroundTruthManifest field of the Asset object contains the S3 bucket location of the validation data.

        " }, + "Version":{ + "type":"string", + "max":11, + "min":5, + "pattern":"^(0|[1-9]\\d{0,2})\\.(0|[1-9]\\d{0,2})\\.(0|[1-9]\\d{0,2})$" + }, "VersionDescription":{ "type":"string", "max":255, @@ -8056,6 +8082,20 @@ "max":10, "min":1 }, + "Versions":{ + "type":"structure", + "members":{ + "Minimum":{ + "shape":"Version", + "documentation":"

        The desired minimum version for the challenge.

        " + }, + "Maximum":{ + "shape":"Version", + "documentation":"

        The desired maximum version for the challenge.

        " + } + }, + "documentation":"

        Object specifying the acceptable range of challenge versions.

        " + }, "Video":{ "type":"structure", "members":{ @@ -8121,8 +8161,7 @@ }, "VideoTooLargeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The file size or duration of the supplied media is too large. The maximum file size is 10GB. The maximum duration is 6 hours.

        ", "exception":true } diff --git a/services/repostspace/pom.xml b/services/repostspace/pom.xml index 13b123d777c0..8a742558fe87 100644 --- a/services/repostspace/pom.xml +++ b/services/repostspace/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT repostspace AWS Java SDK :: Services :: Repostspace diff --git a/services/repostspace/src/main/resources/codegen-resources/customization.config b/services/repostspace/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/repostspace/src/main/resources/codegen-resources/customization.config +++ b/services/repostspace/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/resiliencehub/pom.xml b/services/resiliencehub/pom.xml index 9b091f950e80..0d1f4041719f 100644 --- a/services/resiliencehub/pom.xml +++ b/services/resiliencehub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT resiliencehub AWS Java SDK :: Services :: Resiliencehub diff --git a/services/resiliencehub/src/main/resources/codegen-resources/customization.config b/services/resiliencehub/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/resiliencehub/src/main/resources/codegen-resources/customization.config +++ b/services/resiliencehub/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/resourceexplorer2/pom.xml b/services/resourceexplorer2/pom.xml index 4e710388579c..938552b4e29d 100644 --- a/services/resourceexplorer2/pom.xml +++ b/services/resourceexplorer2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT resourceexplorer2 AWS Java SDK :: Services :: Resource Explorer 2 diff --git a/services/resourceexplorer2/src/main/resources/codegen-resources/customization.config b/services/resourceexplorer2/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/resourceexplorer2/src/main/resources/codegen-resources/customization.config +++ b/services/resourceexplorer2/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/resourcegroups/pom.xml b/services/resourcegroups/pom.xml index b03367893e1a..bc2b6955e662 100644 --- a/services/resourcegroups/pom.xml +++ b/services/resourcegroups/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 resourcegroups diff --git a/services/resourcegroups/src/main/resources/codegen-resources/customization.config b/services/resourcegroups/src/main/resources/codegen-resources/customization.config index fedba63f2342..f5f6e2d56258 100644 --- a/services/resourcegroups/src/main/resources/codegen-resources/customization.config +++ b/services/resourcegroups/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listGroups" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/resourcegroupstaggingapi/pom.xml b/services/resourcegroupstaggingapi/pom.xml index a88b303c60c3..0209c3cf394d 100644 --- a/services/resourcegroupstaggingapi/pom.xml +++ b/services/resourcegroupstaggingapi/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT resourcegroupstaggingapi AWS Java SDK :: Services :: AWS Resource Groups Tagging API diff --git a/services/resourcegroupstaggingapi/src/main/resources/codegen-resources/customization.config b/services/resourcegroupstaggingapi/src/main/resources/codegen-resources/customization.config index f000aa93a638..c93cb9be664c 100644 --- a/services/resourcegroupstaggingapi/src/main/resources/codegen-resources/customization.config +++ b/services/resourcegroupstaggingapi/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "getResources", "getTagKeys" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/robomaker/pom.xml b/services/robomaker/pom.xml index 38179bfed94c..6bf2b0c60262 100644 --- a/services/robomaker/pom.xml +++ b/services/robomaker/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT robomaker AWS Java SDK :: Services :: RoboMaker diff --git a/services/robomaker/src/main/resources/codegen-resources/customization.config b/services/robomaker/src/main/resources/codegen-resources/customization.config index e6e5728d64bd..4b0b869a06a0 100644 --- a/services/robomaker/src/main/resources/codegen-resources/customization.config +++ b/services/robomaker/src/main/resources/codegen-resources/customization.config @@ -7,6 +7,5 @@ "listSimulationApplications", "listSimulationJobs" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/rolesanywhere/pom.xml b/services/rolesanywhere/pom.xml index 4619934b814b..9764edea6cfd 100644 --- a/services/rolesanywhere/pom.xml +++ b/services/rolesanywhere/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT rolesanywhere AWS Java SDK :: Services :: Roles Anywhere diff --git a/services/rolesanywhere/src/main/resources/codegen-resources/customization.config b/services/rolesanywhere/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/rolesanywhere/src/main/resources/codegen-resources/customization.config +++ b/services/rolesanywhere/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/route53/pom.xml b/services/route53/pom.xml index 71459e049ddf..70478705c218 100644 --- a/services/route53/pom.xml +++ b/services/route53/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT route53 AWS Java SDK :: Services :: Amazon Route53 diff --git a/services/route53/src/main/resources/codegen-resources/service-2.json b/services/route53/src/main/resources/codegen-resources/service-2.json index 48d67cca03c6..3c6aafe58772 100644 --- a/services/route53/src/main/resources/codegen-resources/service-2.json +++ b/services/route53/src/main/resources/codegen-resources/service-2.json @@ -1523,8 +1523,7 @@ }, "ChangeTagsForResourceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Empty response for the request.

        " }, "Changes":{ @@ -1800,7 +1799,8 @@ "mx-central-1", "us-isof-south-1", "us-isof-east-1", - "ap-southeast-7" + "ap-southeast-7", + "ap-east-2" ], "max":64, "min":1 @@ -2492,8 +2492,7 @@ }, "DeleteCidrCollectionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteHealthCheckRequest":{ "type":"structure", @@ -2510,8 +2509,7 @@ }, "DeleteHealthCheckResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An empty element.

        " }, "DeleteHostedZoneRequest":{ @@ -2580,8 +2578,7 @@ }, "DeleteQueryLoggingConfigResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteReusableDelegationSetRequest":{ "type":"structure", @@ -2598,8 +2595,7 @@ }, "DeleteReusableDelegationSetResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An empty element.

        " }, "DeleteTrafficPolicyInstanceRequest":{ @@ -2617,8 +2613,7 @@ }, "DeleteTrafficPolicyInstanceResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An empty element.

        " }, "DeleteTrafficPolicyRequest":{ @@ -2645,8 +2640,7 @@ }, "DeleteTrafficPolicyResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An empty element.

        " }, "DeleteVPCAssociationAuthorizationRequest":{ @@ -2671,8 +2665,7 @@ }, "DeleteVPCAssociationAuthorizationResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Empty response for the request.

        " }, "Dimension":{ @@ -2967,8 +2960,7 @@ }, "GetCheckerIpRangesRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Empty request.

        " }, "GetCheckerIpRangesResponse":{ @@ -3048,8 +3040,7 @@ }, "GetHealthCheckCountRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A request for the number of health checks that are associated with the current Amazon Web Services account.

        " }, "GetHealthCheckCountResponse":{ @@ -3137,8 +3128,7 @@ }, "GetHostedZoneCountRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A request to retrieve a count of all the hosted zones that are associated with the current Amazon Web Services account.

        " }, "GetHostedZoneCountResponse":{ @@ -3312,8 +3302,7 @@ }, "GetTrafficPolicyInstanceCountRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Request to get the number of traffic policy instances that are associated with the current Amazon Web Services account.

        " }, "GetTrafficPolicyInstanceCountResponse":{ @@ -5570,7 +5559,8 @@ "mx-central-1", "ap-southeast-7", "us-gov-east-1", - "us-gov-west-1" + "us-gov-west-1", + "ap-east-2" ], "max":64, "min":1 @@ -6453,7 +6443,8 @@ "mx-central-1", "us-isof-south-1", "us-isof-east-1", - "ap-southeast-7" + "ap-southeast-7", + "ap-east-2" ], "max":64, "min":1 diff --git a/services/route53domains/pom.xml b/services/route53domains/pom.xml index 4a5d54823e44..6cbf592408fe 100644 --- a/services/route53domains/pom.xml +++ b/services/route53domains/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT route53domains AWS Java SDK :: Services :: Amazon Route53 Domains diff --git a/services/route53domains/src/main/resources/codegen-resources/customization.config b/services/route53domains/src/main/resources/codegen-resources/customization.config index 053c3a606674..f0538c37cbb6 100644 --- a/services/route53domains/src/main/resources/codegen-resources/customization.config +++ b/services/route53domains/src/main/resources/codegen-resources/customization.config @@ -8,6 +8,5 @@ "viewBilling", "getContactReachabilityStatus" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/route53profiles/pom.xml b/services/route53profiles/pom.xml index fb19335047d1..85b9d12e8f23 100644 --- a/services/route53profiles/pom.xml +++ b/services/route53profiles/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT route53profiles AWS Java SDK :: Services :: Route53 Profiles diff --git a/services/route53profiles/src/main/resources/codegen-resources/customization.config b/services/route53profiles/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/route53profiles/src/main/resources/codegen-resources/customization.config +++ b/services/route53profiles/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/route53recoverycluster/pom.xml b/services/route53recoverycluster/pom.xml index 5fa75cc0f366..7ca65cfeea21 100644 --- a/services/route53recoverycluster/pom.xml +++ b/services/route53recoverycluster/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT route53recoverycluster AWS Java SDK :: Services :: Route53 Recovery Cluster diff --git a/services/route53recoverycluster/src/main/resources/codegen-resources/customization.config b/services/route53recoverycluster/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/route53recoverycluster/src/main/resources/codegen-resources/customization.config +++ b/services/route53recoverycluster/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/route53recoverycontrolconfig/pom.xml b/services/route53recoverycontrolconfig/pom.xml index 44b73e3cfe7b..6c54a562a936 100644 --- a/services/route53recoverycontrolconfig/pom.xml +++ b/services/route53recoverycontrolconfig/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT route53recoverycontrolconfig AWS Java SDK :: Services :: Route53 Recovery Control Config diff --git a/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/customization.config b/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/customization.config +++ b/services/route53recoverycontrolconfig/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/route53recoveryreadiness/pom.xml b/services/route53recoveryreadiness/pom.xml index 1019fd2a26c1..4500241a3116 100644 --- a/services/route53recoveryreadiness/pom.xml +++ b/services/route53recoveryreadiness/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT route53recoveryreadiness AWS Java SDK :: Services :: Route53 Recovery Readiness diff --git a/services/route53recoveryreadiness/src/main/resources/codegen-resources/customization.config b/services/route53recoveryreadiness/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/route53recoveryreadiness/src/main/resources/codegen-resources/customization.config +++ b/services/route53recoveryreadiness/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/route53resolver/pom.xml b/services/route53resolver/pom.xml index 4a7e6afd2dc2..fad23ac6c084 100644 --- a/services/route53resolver/pom.xml +++ b/services/route53resolver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT route53resolver AWS Java SDK :: Services :: Route53Resolver diff --git a/services/route53resolver/src/main/resources/codegen-resources/customization.config b/services/route53resolver/src/main/resources/codegen-resources/customization.config index 114c01b071e6..f7a32e6808c0 100644 --- a/services/route53resolver/src/main/resources/codegen-resources/customization.config +++ b/services/route53resolver/src/main/resources/codegen-resources/customization.config @@ -9,6 +9,5 @@ "listResolverRuleAssociations", "listResolverRules" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/route53resolver/src/main/resources/codegen-resources/service-2.json b/services/route53resolver/src/main/resources/codegen-resources/service-2.json index 743d90c73160..794e91115ebd 100644 --- a/services/route53resolver/src/main/resources/codegen-resources/service-2.json +++ b/services/route53resolver/src/main/resources/codegen-resources/service-2.json @@ -1648,7 +1648,7 @@ }, "Direction":{ "shape":"ResolverEndpointDirection", - "documentation":"

        Specify the applicable value:

        • INBOUND: Resolver forwards DNS queries to the DNS service for a VPC from your network

        • OUTBOUND: Resolver forwards DNS queries from the DNS service for a VPC to your network

        " + "documentation":"

        Specify the applicable value:

        • INBOUND: Resolver forwards DNS queries to the DNS service for a VPC from your network.

        • OUTBOUND: Resolver forwards DNS queries from the DNS service for a VPC to your network.

        • INBOUND_DELEGATION: Resolver delegates queries to Route 53 private hosted zones from your network.

        " }, "IpAddresses":{ "shape":"IpAddressesRequest", @@ -1676,7 +1676,7 @@ }, "Protocols":{ "shape":"ProtocolList", - "documentation":"

        The protocols you want to use for the endpoint. DoH-FIPS is applicable for inbound endpoints only.

        For an inbound endpoint you can apply the protocols as follows:

        • Do53 and DoH in combination.

        • Do53 and DoH-FIPS in combination.

        • Do53 alone.

        • DoH alone.

        • DoH-FIPS alone.

        • None, which is treated as Do53.

        For an outbound endpoint you can apply the protocols as follows:

        • Do53 and DoH in combination.

        • Do53 alone.

        • DoH alone.

        • None, which is treated as Do53.

        ", + "documentation":"

        The protocols you want to use for the endpoint. DoH-FIPS is applicable for default inbound endpoints only.

        For a default inbound endpoint you can apply the protocols as follows:

        • Do53 and DoH in combination.

        • Do53 and DoH-FIPS in combination.

        • Do53 alone.

        • DoH alone.

        • DoH-FIPS alone.

        • None, which is treated as Do53.

        For a delegation inbound endpoint you can use Do53 only.

        For an outbound endpoint you can apply the protocols as follows:

        • Do53 and DoH in combination.

        • Do53 alone.

        • DoH alone.

        • None, which is treated as Do53.

        ", "box":true } } @@ -1744,7 +1744,7 @@ }, "RuleType":{ "shape":"RuleTypeOption", - "documentation":"

        When you want to forward DNS queries for specified domain name to resolvers on your network, specify FORWARD.

        When you have a forwarding rule to forward DNS queries for a domain to your network and you want Resolver to process queries for a subdomain of that domain, specify SYSTEM.

        For example, to forward DNS queries for example.com to resolvers on your network, you create a rule and specify FORWARD for RuleType. To then have Resolver process queries for apex.example.com, you create a rule and specify SYSTEM for RuleType.

        Currently, only Resolver can create rules that have a value of RECURSIVE for RuleType.

        " + "documentation":"

        When you want to forward DNS queries for specified domain name to resolvers on your network, specify FORWARD or DELEGATE.

        When you have a forwarding rule to forward DNS queries for a domain to your network and you want Resolver to process queries for a subdomain of that domain, specify SYSTEM.

        For example, to forward DNS queries for example.com to resolvers on your network, you create a rule and specify FORWARD for RuleType. To then have Resolver process queries for apex.example.com, you create a rule and specify SYSTEM for RuleType.

        Currently, only Resolver can create rules that have a value of RECURSIVE for RuleType.

        " }, "DomainName":{ "shape":"DomainName", @@ -1765,6 +1765,11 @@ "shape":"TagList", "documentation":"

        A list of the tag keys and values that you want to associate with the endpoint.

        ", "box":true + }, + "DelegationRecord":{ + "shape":"DelegationRecord", + "documentation":"

        DNS queries with the delegation records that match this domain name are forwarded to the resolvers on your network.

        ", + "box":true } } }, @@ -1782,6 +1787,11 @@ "max":255, "min":1 }, + "DelegationRecord":{ + "type":"string", + "max":256, + "min":1 + }, "DeleteFirewallDomainListRequest":{ "type":"structure", "required":["FirewallDomainListId"], @@ -2934,7 +2944,8 @@ "DELETING", "DELETE_FAILED_FAS_EXPIRED", "UPDATING", - "UPDATE_FAILED" + "UPDATE_FAILED", + "ISOLATED" ] }, "IpAddressUpdate":{ @@ -3812,7 +3823,7 @@ }, "ResourceId":{ "shape":"ResourceId", - "documentation":"

        The ID of the Amazon Virtual Private Cloud VPC that you're configuring Resolver for.

        " + "documentation":"

        The ID of the Amazon Virtual Private Cloud VPC or a Route 53 Profile that you're configuring Resolver for.

        " }, "OwnerId":{ "shape":"AccountId", @@ -3891,7 +3902,7 @@ }, "Direction":{ "shape":"ResolverEndpointDirection", - "documentation":"

        Indicates whether the Resolver endpoint allows inbound or outbound DNS queries:

        • INBOUND: allows DNS queries to your VPC from your network

        • OUTBOUND: allows DNS queries from your VPC to your network

        " + "documentation":"

        Indicates whether the Resolver endpoint allows inbound or outbound DNS queries:

        • INBOUND: allows DNS queries to your VPC from your network

        • OUTBOUND: allows DNS queries from your VPC to your network

        • INBOUND_DELEGATION: Resolver delegates queries to Route 53 private hosted zones from your network.

        " }, "IpAddressCount":{ "shape":"IpAddressCount", @@ -3931,7 +3942,7 @@ }, "Protocols":{ "shape":"ProtocolList", - "documentation":"

        Protocols used for the endpoint. DoH-FIPS is applicable for inbound endpoints only.

        For an inbound endpoint you can apply the protocols as follows:

        • Do53 and DoH in combination.

        • Do53 and DoH-FIPS in combination.

        • Do53 alone.

        • DoH alone.

        • DoH-FIPS alone.

        • None, which is treated as Do53.

        For an outbound endpoint you can apply the protocols as follows:

        • Do53 and DoH in combination.

        • Do53 alone.

        • DoH alone.

        • None, which is treated as Do53.

        " + "documentation":"

        Protocols used for the endpoint. DoH-FIPS is applicable for a default inbound endpoints only.

        For an inbound endpoint you can apply the protocols as follows:

        • Do53 and DoH in combination.

        • Do53 and DoH-FIPS in combination.

        • Do53 alone.

        • DoH alone.

        • DoH-FIPS alone.

        • None, which is treated as Do53.

        For a delegation inbound endpoint you can use Do53 only.

        For an outbound endpoint you can apply the protocols as follows:

        • Do53 and DoH in combination.

        • Do53 alone.

        • DoH alone.

        • None, which is treated as Do53.

        " } }, "documentation":"

        In the response to a CreateResolverEndpoint, DeleteResolverEndpoint, GetResolverEndpoint, Updates the name, or ResolverEndpointType for an endpoint, or UpdateResolverEndpoint request, a complex type that contains settings for an existing inbound or outbound Resolver endpoint.

        " @@ -3940,7 +3951,8 @@ "type":"string", "enum":[ "INBOUND", - "OUTBOUND" + "OUTBOUND", + "INBOUND_DELEGATION" ] }, "ResolverEndpointStatus":{ @@ -4122,7 +4134,7 @@ }, "RuleType":{ "shape":"RuleTypeOption", - "documentation":"

        When you want to forward DNS queries for specified domain name to resolvers on your network, specify FORWARD.

        When you have a forwarding rule to forward DNS queries for a domain to your network and you want Resolver to process queries for a subdomain of that domain, specify SYSTEM.

        For example, to forward DNS queries for example.com to resolvers on your network, you create a rule and specify FORWARD for RuleType. To then have Resolver process queries for apex.example.com, you create a rule and specify SYSTEM for RuleType.

        Currently, only Resolver can create rules that have a value of RECURSIVE for RuleType.

        " + "documentation":"

        When you want to forward DNS queries for specified domain name to resolvers on your network, specify FORWARD or DELEGATE. If a query matches multiple Resolver rules (example.com and www.example.com), outbound DNS queries are routed using the Resolver rule that contains the most specific domain name (www.example.com).

        When you have a forwarding rule to forward DNS queries for a domain to your network and you want Resolver to process queries for a subdomain of that domain, specify SYSTEM.

        For example, to forward DNS queries for example.com to resolvers on your network, you create a rule and specify FORWARD for RuleType. To then have Resolver process queries for apex.example.com, you create a rule and specify SYSTEM for RuleType.

        Currently, only Resolver can create rules that have a value of RECURSIVE for RuleType.

        " }, "Name":{ "shape":"Name", @@ -4151,6 +4163,10 @@ "ModificationTime":{ "shape":"Rfc3339TimeString", "documentation":"

        The date and time that the Resolver rule was last updated, in Unix time format and Coordinated Universal Time (UTC).

        " + }, + "DelegationRecord":{ + "shape":"DelegationRecord", + "documentation":"

        DNS queries with delegation records that point to this domain name are forwarded to resolvers on your network.

        " } }, "documentation":"

        For queries that originate in your VPC, detailed information about a Resolver rule, which specifies how to route DNS queries out of the VPC. The ResolverRule parameter appears in the response to a CreateResolverRule, DeleteResolverRule, GetResolverRule, ListResolverRules, or UpdateResolverRule request.

        " @@ -4297,7 +4313,8 @@ "enum":[ "FORWARD", "SYSTEM", - "RECURSIVE" + "RECURSIVE", + "DELEGATE" ] }, "SecurityGroupIds":{ @@ -4404,8 +4421,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -4484,8 +4500,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateFirewallConfigRequest":{ "type":"structure", @@ -4740,7 +4755,7 @@ "members":{ "ResourceId":{ "shape":"ResourceId", - "documentation":"

        Resource ID of the Amazon VPC that you want to update the Resolver configuration for.

        " + "documentation":"

        The ID of the Amazon Virtual Private Cloud VPC or a Route 53 Profile that you're configuring Resolver for.

        " }, "AutodefinedReverseFlag":{ "shape":"AutodefinedReverseFlag", @@ -4808,7 +4823,7 @@ }, "Protocols":{ "shape":"ProtocolList", - "documentation":"

        The protocols you want to use for the endpoint. DoH-FIPS is applicable for inbound endpoints only.

        For an inbound endpoint you can apply the protocols as follows:

        • Do53 and DoH in combination.

        • Do53 and DoH-FIPS in combination.

        • Do53 alone.

        • DoH alone.

        • DoH-FIPS alone.

        • None, which is treated as Do53.

        For an outbound endpoint you can apply the protocols as follows:

        • Do53 and DoH in combination.

        • Do53 alone.

        • DoH alone.

        • None, which is treated as Do53.

        You can't change the protocol of an inbound endpoint directly from only Do53 to only DoH, or DoH-FIPS. This is to prevent a sudden disruption to incoming traffic that relies on Do53. To change the protocol from Do53 to DoH, or DoH-FIPS, you must first enable both Do53 and DoH, or Do53 and DoH-FIPS, to make sure that all incoming traffic has transferred to using the DoH protocol, or DoH-FIPS, and then remove the Do53.

        ", + "documentation":"

        The protocols you want to use for the endpoint. DoH-FIPS is applicable for default inbound endpoints only.

        For a default inbound endpoint you can apply the protocols as follows:

        • Do53 and DoH in combination.

        • Do53 and DoH-FIPS in combination.

        • Do53 alone.

        • DoH alone.

        • DoH-FIPS alone.

        • None, which is treated as Do53.

        For a delegation inbound endpoint you can use Do53 only.

        For an outbound endpoint you can apply the protocols as follows:

        • Do53 and DoH in combination.

        • Do53 alone.

        • DoH alone.

        • None, which is treated as Do53.

        You can't change the protocol of an inbound endpoint directly from only Do53 to only DoH, or DoH-FIPS. This is to prevent a sudden disruption to incoming traffic that relies on Do53. To change the protocol from Do53 to DoH, or DoH-FIPS, you must first enable both Do53 and DoH, or Do53 and DoH-FIPS, to make sure that all incoming traffic has transferred to using the DoH protocol, or DoH-FIPS, and then remove the Do53.

        ", "box":true } } diff --git a/services/rum/pom.xml b/services/rum/pom.xml index 1d54932f36f0..452e1af15bd8 100644 --- a/services/rum/pom.xml +++ b/services/rum/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT rum AWS Java SDK :: Services :: RUM diff --git a/services/rum/src/main/resources/codegen-resources/customization.config b/services/rum/src/main/resources/codegen-resources/customization.config index 2880fc39d3a3..cdf857bdc287 100644 --- a/services/rum/src/main/resources/codegen-resources/customization.config +++ b/services/rum/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/s3/pom.xml b/services/s3/pom.xml index fb1417f8dc70..4ad75975f8f6 100644 --- a/services/s3/pom.xml +++ b/services/s3/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT s3 AWS Java SDK :: Services :: Amazon S3 @@ -110,6 +110,12 @@ checksums-spi ${awsjavasdk.version} + + software.amazon.awssdk + url-connection-client + ${awsjavasdk.version} + test + software.amazon.awssdk.crt aws-crt diff --git a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java index 28abb2b2314d..7e6ec50e3f8a 100644 --- a/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/S3PresignerIntegrationTest.java @@ -52,6 +52,8 @@ import software.amazon.awssdk.services.s3.presigner.model.PresignedCreateMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedDeleteObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedHeadBucketRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedHeadObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedUploadPartRequest; import software.amazon.awssdk.services.s3.utils.S3TestUtils; @@ -363,6 +365,50 @@ public void abortMultipartUpload_CanBePresigned() throws IOException { assertThat(getMultipartUpload(objectKey)).isNotPresent(); } + @Test + public void headObject_CanBePresigned() throws IOException { + PresignedHeadObjectRequest presigned = + presigner.presignHeadObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .headObjectRequest(hor -> hor.bucket(testBucket) + .key(testGetObjectKey))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + + SdkHttpClient httpClient = ApacheHttpClient.builder().build(); // or UrlConnectionHttpClient.builder().build() + + HttpExecuteRequest request = HttpExecuteRequest.builder() + .request(presigned.httpRequest()) + .build(); + + HttpExecuteResponse response = httpClient.prepareRequest(request).call(); + + assertThat(response.httpResponse().isSuccessful()).isTrue(); + assertThat(response.httpResponse().firstMatchingHeader("Content-Length")).isPresent(); + assertThat(response.httpResponse().firstMatchingHeader("ETag")).isPresent(); + assertThat(response.httpResponse().firstMatchingHeader("Last-Modified")).isPresent(); + + } + + @Test + public void headBucket_CanBePresigned() throws IOException { + PresignedHeadBucketRequest presigned = + presigner.presignHeadBucket(r -> r.signatureDuration(Duration.ofMinutes(5)) + .headBucketRequest(hbr -> hbr.bucket(testBucket))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + + SdkHttpClient httpClient = ApacheHttpClient.builder().build(); // or UrlConnectionHttpClient.builder().build() + + HttpExecuteRequest request = HttpExecuteRequest.builder() + .request(presigned.httpRequest()) + .build(); + + HttpExecuteResponse response = httpClient.prepareRequest(request).call(); + + assertThat(response.httpResponse().isSuccessful()).isTrue(); + assertThat(response.httpResponse().firstMatchingHeader("x-amz-bucket-region")).isPresent(); + } + private Consumer createMultipartUploadRequest(String objectKey) { return r -> r.bucket(testBucket).key(objectKey); } diff --git a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/EmptyFileS3IntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/EmptyFileS3IntegrationTest.java similarity index 97% rename from http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/EmptyFileS3IntegrationTest.java rename to services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/EmptyFileS3IntegrationTest.java index 4a33f114aca0..7de782e87bf6 100644 --- a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/EmptyFileS3IntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/EmptyFileS3IntegrationTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.http.urlconnection; +package software.amazon.awssdk.services.s3.urlconnection; import static org.assertj.core.api.Assertions.assertThat; import static software.amazon.awssdk.testutils.service.S3BucketUtils.temporaryBucketName; diff --git a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/HeadObjectIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/HeadObjectIntegrationTest.java similarity index 97% rename from http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/HeadObjectIntegrationTest.java rename to services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/HeadObjectIntegrationTest.java index 678231eb05bf..fa1720860525 100644 --- a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/HeadObjectIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/HeadObjectIntegrationTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.http.urlconnection; +package software.amazon.awssdk.services.s3.urlconnection; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; diff --git a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/S3WithUrlHttpClientIntegrationTest.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/S3WithUrlHttpClientIntegrationTest.java similarity index 98% rename from http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/S3WithUrlHttpClientIntegrationTest.java rename to services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/S3WithUrlHttpClientIntegrationTest.java index c0ed9d162971..41e73f72edb7 100644 --- a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/S3WithUrlHttpClientIntegrationTest.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/S3WithUrlHttpClientIntegrationTest.java @@ -13,7 +13,7 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.http.urlconnection; +package software.amazon.awssdk.services.s3.urlconnection; import static org.assertj.core.api.Assertions.assertThat; import static software.amazon.awssdk.testutils.service.AwsTestBase.CREDENTIALS_PROVIDER_CHAIN; @@ -34,6 +34,7 @@ import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.http.SdkHttpHeaders; import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3ClientBuilder; diff --git a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java similarity index 97% rename from http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java rename to services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java index d184e8b5da4e..497277075ad5 100644 --- a/http-clients/url-connection-client/src/it/java/software/amazon/awssdk/http/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java +++ b/services/s3/src/it/java/software/amazon/awssdk/services/s3/urlconnection/UrlHttpConnectionS3IntegrationTestBase.java @@ -13,11 +13,12 @@ * permissions and limitations under the License. */ -package software.amazon.awssdk.http.urlconnection; +package software.amazon.awssdk.services.s3.urlconnection; import java.util.Iterator; import java.util.List; import org.junit.jupiter.api.BeforeAll; +import software.amazon.awssdk.http.urlconnection.UrlConnectionHttpClient; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3ClientBuilder; diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/signing/DefaultS3Presigner.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/signing/DefaultS3Presigner.java index 1bbb2e3917cf..48e09c49a6ba 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/signing/DefaultS3Presigner.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/internal/signing/DefaultS3Presigner.java @@ -97,6 +97,8 @@ import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.presigner.S3Presigner; @@ -105,11 +107,15 @@ import software.amazon.awssdk.services.s3.presigner.model.CreateMultipartUploadPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.DeleteObjectPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.HeadBucketPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.HeadObjectPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedAbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCreateMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedDeleteObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedHeadBucketRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedHeadObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedUploadPartRequest; import software.amazon.awssdk.services.s3.presigner.model.PutObjectPresignRequest; @@ -120,6 +126,8 @@ import software.amazon.awssdk.services.s3.transform.CreateMultipartUploadRequestMarshaller; import software.amazon.awssdk.services.s3.transform.DeleteObjectRequestMarshaller; import software.amazon.awssdk.services.s3.transform.GetObjectRequestMarshaller; +import software.amazon.awssdk.services.s3.transform.HeadBucketRequestMarshaller; +import software.amazon.awssdk.services.s3.transform.HeadObjectRequestMarshaller; import software.amazon.awssdk.services.s3.transform.PutObjectRequestMarshaller; import software.amazon.awssdk.services.s3.transform.UploadPartRequestMarshaller; import software.amazon.awssdk.utils.AttributeMap; @@ -141,6 +149,8 @@ public final class DefaultS3Presigner extends DefaultSdkPresigner implements S3P private final S3Configuration serviceConfiguration; private final List clientInterceptors; private final GetObjectRequestMarshaller getObjectRequestMarshaller; + private final HeadObjectRequestMarshaller headObjectRequestMarshaller; + private final HeadBucketRequestMarshaller headBucketRequestMarshaller; private final PutObjectRequestMarshaller putObjectRequestMarshaller; private final CreateMultipartUploadRequestMarshaller createMultipartUploadRequestMarshaller; private final UploadPartRequestMarshaller uploadPartRequestMarshaller; @@ -193,6 +203,10 @@ private DefaultS3Presigner(Builder b) { // Copied from DefaultS3Client#getObject this.getObjectRequestMarshaller = new GetObjectRequestMarshaller(protocolFactory); + this.headObjectRequestMarshaller = new HeadObjectRequestMarshaller(protocolFactory); + + this.headBucketRequestMarshaller = new HeadBucketRequestMarshaller(protocolFactory); + // Copied from DefaultS3Client#putObject this.putObjectRequestMarshaller = new PutObjectRequestMarshaller(protocolFactory); @@ -273,6 +287,28 @@ public PresignedGetObjectRequest presignGetObject(GetObjectPresignRequest reques .build(); } + @Override + public PresignedHeadObjectRequest presignHeadObject(HeadObjectPresignRequest request) { + return presign(PresignedHeadObjectRequest.builder(), + request, + request.headObjectRequest(), + HeadObjectRequest.class, + headObjectRequestMarshaller::marshall, + "HeadObject") + .build(); + } + + @Override + public PresignedHeadBucketRequest presignHeadBucket(HeadBucketPresignRequest request) { + return presign(PresignedHeadBucketRequest.builder(), + request, + request.headBucketRequest(), + HeadBucketRequest.class, + headBucketRequestMarshaller::marshall, + "HeadBucket") + .build(); + } + @Override public PresignedPutObjectRequest presignPutObject(PutObjectPresignRequest request) { return presign(PresignedPutObjectRequest.builder(), diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java index 7bbed9488dfe..e23516178e92 100644 --- a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/S3Presigner.java @@ -40,6 +40,8 @@ import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.presigner.model.AbortMultipartUploadPresignRequest; @@ -47,11 +49,15 @@ import software.amazon.awssdk.services.s3.presigner.model.CreateMultipartUploadPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.DeleteObjectPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.HeadBucketPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.HeadObjectPresignRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedAbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedCreateMultipartUploadRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedDeleteObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedHeadBucketRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedHeadObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedUploadPartRequest; import software.amazon.awssdk.services.s3.presigner.model.PutObjectPresignRequest; @@ -328,7 +334,7 @@ static Builder builder() { *

        * This is a shorter method of invoking {@link #presignGetObject(GetObjectPresignRequest)} without needing * to call {@code GetObjectPresignRequest.builder()} or {@code .build()}. - * + * * @see #presignGetObject(GetObjectPresignRequest) */ default PresignedGetObjectRequest presignGetObject(Consumer request) { @@ -337,6 +343,128 @@ default PresignedGetObjectRequest presignGetObject(Consumer + * + * Example Usage + *

        + * + *

        +     * {@code
        +     *     S3Presigner presigner = ...;
        +     *
        +     *     // Create a HeadObjectRequest to be pre-signed
        +     *     HeadObjectRequest headObjectRequest =
        +     *         HeadObjectRequest.builder()
        +     *                          .bucket("my-bucket")
        +     *                          .key("my-key")
        +     *                          .build();
        +     *
        +     *     // Create a HeadObjectPresignRequest to specify the signature duration
        +     *     HeadObjectPresignRequest headObjectPresignRequest =
        +     *         HeadObjectPresignRequest.builder()
        +     *                                .signatureDuration(Duration.ofMinutes(10))
        +     *                                .headObjectRequest(headObjectRequest)
        +     *                                .build();
        +     *
        +     *     // Generate the presigned request
        +     *     PresignedHeadObjectRequest presignedHeadObjectRequest =
        +     *         presigner.presignHeadObject(headObjectPresignRequest);
        +     *
        +     *     // The presigned URL can be used with an HTTP client to retrieve object metadata
        +     *     SdkHttpClient httpClient = ApacheHttpClient.builder().build();
        +     *     HttpExecuteRequest request = HttpExecuteRequest.builder()
        +     *                                                   .request(presignedHeadObjectRequest.httpRequest())
        +     *                                                   .build();
        +     *     HttpExecuteResponse response = httpClient.prepareRequest(request).call();
        +     *
        +     *     // Extract metadata from response headers
        +     *     String contentLength = response.httpResponse().firstMatchingHeader("Content-Length").orElse("0");
        +     * }
        +     * 
        + */ + default PresignedHeadObjectRequest presignHeadObject(HeadObjectPresignRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Presign a {@link HeadObjectRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

        + * This is a shorter method of invoking {@link #presignHeadObject(HeadObjectPresignRequest)} without needing + * to call {@code HeadObjectPresignRequest.builder()} or {@code .build()}. + * + * @see #presignHeadObject(HeadObjectPresignRequest) + */ + default PresignedHeadObjectRequest presignHeadObject(Consumer request) { + HeadObjectPresignRequest.Builder builder = HeadObjectPresignRequest.builder(); + request.accept(builder); + return presignHeadObject(builder.build()); + } + + /** + * Presign a {@link HeadBucketRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

        + * + * Example Usage + *

        + * + *

        +     * {@code
        +     *     S3Presigner presigner = ...;
        +     *
        +     *     // Create a HeadBucketRequest to be pre-signed
        +     *     HeadBucketRequest headBucketRequest =
        +     *         HeadBucketRequest.builder()
        +     *                          .bucket("my-bucket")
        +     *                          .build();
        +     *
        +     *     // Create a HeadBucketPresignRequest to specify the signature duration
        +     *     HeadBucketPresignRequest headBucketPresignRequest =
        +     *         HeadBucketPresignRequest.builder()
        +     *                                .signatureDuration(Duration.ofMinutes(10))
        +     *                                .headBucketRequest(headBucketRequest)
        +     *                                .build();
        +     *
        +     *     // Generate the presigned request
        +     *     PresignedHeadBucketRequest presignedHeadBucketRequest =
        +     *         presigner.presignHeadBucket(headBucketPresignRequest);
        +     *
        +     *     // The presigned URL can be used with an HTTP client to check bucket existence and access
        +     *     SdkHttpClient httpClient = ApacheHttpClient.builder().build();
        +     *     HttpExecuteRequest request = HttpExecuteRequest.builder()
        +     *                                                   .request(presignedHeadBucketRequest.httpRequest())
        +     *                                                   .build();
        +     *     HttpExecuteResponse response = httpClient.prepareRequest(request).call();
        +     *
        +     *     // Check if bucket exists and is accessible
        +     *     boolean bucketExists = response.httpResponse().isSuccessful();
        +     *     String region = response.httpResponse().firstMatchingHeader("x-amz-bucket-region").orElse("");
        +     * }
        +     * 
        + */ + default PresignedHeadBucketRequest presignHeadBucket(HeadBucketPresignRequest request) { + throw new UnsupportedOperationException(); + } + + /** + * Presign a {@link HeadBucketRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + *

        + * This is a shorter method of invoking {@link #presignHeadBucket(HeadBucketPresignRequest)} without needing + * to call {@code HeadBucketPresignRequest.builder()} or {@code .build()}. + * + * @see #presignHeadBucket(HeadBucketPresignRequest) + */ + default PresignedHeadBucketRequest presignHeadBucket(Consumer request) { + HeadBucketPresignRequest.Builder builder = HeadBucketPresignRequest.builder(); + request.accept(builder); + return presignHeadBucket(builder.build()); + } + /** * Presign a {@link PutObjectRequest} so that it can be executed at a later time without requiring additional * signing or authentication. diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/HeadBucketPresignRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/HeadBucketPresignRequest.java new file mode 100644 index 000000000000..e3686c8cc6a8 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/HeadBucketPresignRequest.java @@ -0,0 +1,151 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Duration; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A request to pre-sign a {@link HeadBucketRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + * + * @see S3Presigner#presignHeadBucket(HeadBucketPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public final class HeadBucketPresignRequest + extends PresignRequest + implements ToCopyableBuilder { + private final HeadBucketRequest headBucketRequest; + + private HeadBucketPresignRequest(DefaultBuilder builder) { + super(builder); + this.headBucketRequest = Validate.notNull(builder.headBucketRequest, "headBucketRequest"); + } + + /** + * Create a builder that can be used to create a {@link HeadBucketPresignRequest}. + * + * @see S3Presigner#presignHeadBucket(HeadBucketPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * Retrieve the {@link HeadBucketRequest} that should be presigned. + */ + public HeadBucketRequest headBucketRequest() { + return headBucketRequest; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + HeadBucketPresignRequest that = (HeadBucketPresignRequest) o; + + return headBucketRequest.equals(that.headBucketRequest); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + headBucketRequest.hashCode(); + return result; + } + + /** + * A builder for a {@link HeadBucketPresignRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignRequest.Builder, + CopyableBuilder { + /** + * Configure the {@link HeadBucketRequest} that should be presigned. + */ + Builder headBucketRequest(HeadBucketRequest headBucketRequest); + + /** + * Configure the {@link HeadBucketRequest} that should be presigned. + *

        + * This is a convenience method for invoking {@link #headBucketRequest(HeadBucketRequest)} without needing to invoke + * {@code HeadBucketRequest.builder()} or {@code build()}. + */ + default Builder headBucketRequest(Consumer headBucketRequest) { + HeadBucketRequest.Builder builder = HeadBucketRequest.builder(); + headBucketRequest.accept(builder); + return headBucketRequest(builder.build()); + } + + @Override + Builder signatureDuration(Duration signatureDuration); + + @Override + HeadBucketPresignRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder extends PresignRequest.DefaultBuilder implements Builder { + private HeadBucketRequest headBucketRequest; + + private DefaultBuilder() { + } + + private DefaultBuilder(HeadBucketPresignRequest request) { + super(request); + this.headBucketRequest = request.headBucketRequest; + } + + @Override + public Builder headBucketRequest(HeadBucketRequest headBucketRequest) { + this.headBucketRequest = headBucketRequest; + return this; + } + + @Override + public HeadBucketPresignRequest build() { + return new HeadBucketPresignRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/HeadObjectPresignRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/HeadObjectPresignRequest.java new file mode 100644 index 000000000000..34a50aeebbcc --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/HeadObjectPresignRequest.java @@ -0,0 +1,151 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Duration; +import java.util.function.Consumer; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.Validate; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A request to pre-sign a {@link HeadObjectRequest} so that it can be executed at a later time without requiring additional + * signing or authentication. + * + * @see S3Presigner#presignHeadObject(HeadObjectPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public final class HeadObjectPresignRequest + extends PresignRequest + implements ToCopyableBuilder { + private final HeadObjectRequest headObjectRequest; + + private HeadObjectPresignRequest(DefaultBuilder builder) { + super(builder); + this.headObjectRequest = Validate.notNull(builder.headObjectRequest, "headObjectRequest"); + } + + /** + * Create a builder that can be used to create a {@link HeadObjectPresignRequest}. + * + * @see S3Presigner#presignHeadObject(HeadObjectPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + /** + * Retrieve the {@link HeadObjectRequest} that should be presigned. + */ + public HeadObjectRequest headObjectRequest() { + return headObjectRequest; + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + HeadObjectPresignRequest that = (HeadObjectPresignRequest) o; + + return headObjectRequest.equals(that.headObjectRequest); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + headObjectRequest.hashCode(); + return result; + } + + /** + * A builder for a {@link HeadObjectPresignRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignRequest.Builder, + CopyableBuilder { + /** + * Configure the {@link HeadObjectRequest} that should be presigned. + */ + Builder headObjectRequest(HeadObjectRequest headObjectRequest); + + /** + * Configure the {@link HeadObjectRequest} that should be presigned. + *

        + * This is a convenience method for invoking {@link #headObjectRequest(HeadObjectRequest)} without needing to invoke + * {@code HeadObjectRequest.builder()} or {@code build()}. + */ + default Builder headObjectRequest(Consumer headObjectRequest) { + HeadObjectRequest.Builder builder = HeadObjectRequest.builder(); + headObjectRequest.accept(builder); + return headObjectRequest(builder.build()); + } + + @Override + Builder signatureDuration(Duration signatureDuration); + + @Override + HeadObjectPresignRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder extends PresignRequest.DefaultBuilder implements Builder { + private HeadObjectRequest headObjectRequest; + + private DefaultBuilder() { + } + + private DefaultBuilder(HeadObjectPresignRequest request) { + super(request); + this.headObjectRequest = request.headObjectRequest; + } + + @Override + public Builder headObjectRequest(HeadObjectRequest headObjectRequest) { + this.headObjectRequest = headObjectRequest; + return this; + } + + @Override + public HeadObjectPresignRequest build() { + return new HeadObjectPresignRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedHeadBucketRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedHeadBucketRequest.java new file mode 100644 index 000000000000..22d75a65b2f8 --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedHeadBucketRequest.java @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Instant; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignedRequest; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A pre-signed a {@link HeadBucketRequest} that can be executed at a later time without requiring additional signing or + * authentication. + * + * @see S3Presigner#presignHeadBucket(HeadBucketPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public class PresignedHeadBucketRequest + extends PresignedRequest + implements ToCopyableBuilder { + private PresignedHeadBucketRequest(DefaultBuilder builder) { + super(builder); + } + + /** + * Create a builder that can be used to create a {@link PresignedHeadBucketRequest}. + * + * @see S3Presigner#presignHeadBucket(HeadBucketPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + /** + * A builder for a {@link PresignedHeadBucketRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignedRequest.Builder, + CopyableBuilder { + @Override + Builder expiration(Instant expiration); + + @Override + Builder isBrowserExecutable(Boolean isBrowserExecutable); + + @Override + Builder signedHeaders(Map> signedHeaders); + + @Override + Builder signedPayload(SdkBytes signedPayload); + + @Override + Builder httpRequest(SdkHttpRequest httpRequest); + + @Override + PresignedHeadBucketRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder + extends PresignedRequest.DefaultBuilder + implements Builder { + private DefaultBuilder() { + } + + private DefaultBuilder(PresignedHeadBucketRequest request) { + super(request); + } + + @Override + public PresignedHeadBucketRequest build() { + return new PresignedHeadBucketRequest(this); + } + } +} diff --git a/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedHeadObjectRequest.java b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedHeadObjectRequest.java new file mode 100644 index 000000000000..60ff18284fde --- /dev/null +++ b/services/s3/src/main/java/software/amazon/awssdk/services/s3/presigner/model/PresignedHeadObjectRequest.java @@ -0,0 +1,107 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.presigner.model; + +import java.time.Instant; +import java.util.List; +import java.util.Map; +import software.amazon.awssdk.annotations.Immutable; +import software.amazon.awssdk.annotations.NotThreadSafe; +import software.amazon.awssdk.annotations.SdkInternalApi; +import software.amazon.awssdk.annotations.SdkPublicApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.awscore.presigner.PresignedRequest; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectRequest; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.utils.builder.CopyableBuilder; +import software.amazon.awssdk.utils.builder.ToCopyableBuilder; + +/** + * A pre-signed a {@link HeadObjectRequest} that can be executed at a later time without requiring additional signing or + * authentication. + * + * @see S3Presigner#presignHeadObject(HeadObjectPresignRequest) + * @see #builder() + */ +@SdkPublicApi +@Immutable +@ThreadSafe +public class PresignedHeadObjectRequest + extends PresignedRequest + implements ToCopyableBuilder { + private PresignedHeadObjectRequest(DefaultBuilder builder) { + super(builder); + } + + /** + * Create a builder that can be used to create a {@link PresignedHeadObjectRequest}. + * + * @see S3Presigner#presignHeadObject(HeadObjectPresignRequest) + */ + public static Builder builder() { + return new DefaultBuilder(); + } + + @Override + public Builder toBuilder() { + return new DefaultBuilder(this); + } + + /** + * A builder for a {@link PresignedHeadObjectRequest}, created with {@link #builder()}. + */ + @SdkPublicApi + @NotThreadSafe + public interface Builder extends PresignedRequest.Builder, + CopyableBuilder { + @Override + Builder expiration(Instant expiration); + + @Override + Builder isBrowserExecutable(Boolean isBrowserExecutable); + + @Override + Builder signedHeaders(Map> signedHeaders); + + @Override + Builder signedPayload(SdkBytes signedPayload); + + @Override + Builder httpRequest(SdkHttpRequest httpRequest); + + @Override + PresignedHeadObjectRequest build(); + } + + @SdkInternalApi + private static final class DefaultBuilder + extends PresignedRequest.DefaultBuilder + implements Builder { + private DefaultBuilder() { + } + + private DefaultBuilder(PresignedHeadObjectRequest request) { + super(request); + } + + @Override + public PresignedHeadObjectRequest build() { + return new PresignedHeadObjectRequest(this); + } + } +} diff --git a/services/s3/src/main/resources/codegen-resources/service-2.json b/services/s3/src/main/resources/codegen-resources/service-2.json index 3a28172f165c..93a39759b2a6 100644 --- a/services/s3/src/main/resources/codegen-resources/service-2.json +++ b/services/s3/src/main/resources/codegen-resources/service-2.json @@ -27,7 +27,6 @@ "errors":[ {"shape":"NoSuchUpload"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html", "documentation":"

        This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.

        To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty.

        • Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload operation to abort all the in-progress multipart uploads.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to AbortMultipartUpload:

        " }, "CompleteMultipartUpload":{ @@ -38,7 +37,6 @@ }, "input":{"shape":"CompleteMultipartUploadRequest"}, "output":{"shape":"CompleteMultipartUploadOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadComplete.html", "documentation":"

        Completes a multipart upload by assembling previously uploaded parts.

        You first initiate the multipart upload and then upload all parts using the UploadPart operation or the UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you call this CompleteMultipartUpload operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the PartNumber value and the ETag value that are returned after that part was uploaded.

        The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK response has been sent. This means that a 200 OK response can contain either a success or an error. The error response might be embedded in the 200 OK response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).

        Note that if CompleteMultipartUpload fails, applications should be prepared to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best Practices.

        You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload can still return a 200 OK response.

        For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

          If you provide an additional checksum value in your MultipartUpload requests and the object is encrypted with Key Management Service, you must have permission to use the kms:Decrypt action for the CompleteMultipartUpload request to succeed.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Special errors
        • Error Code: EntityTooSmall

          • Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.

          • HTTP Status Code: 400 Bad Request

        • Error Code: InvalidPart

          • Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag.

          • HTTP Status Code: 400 Bad Request

        • Error Code: InvalidPartOrder

          • Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.

          • HTTP Status Code: 400 Bad Request

        • Error Code: NoSuchUpload

          • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

          • HTTP Status Code: 404 Not Found

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to CompleteMultipartUpload:

        " }, "CopyObject":{ @@ -52,9 +50,7 @@ "errors":[ {"shape":"ObjectNotInActiveTierError"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html", - "documentation":"

        Creates a copy of an object that is already stored in Amazon S3.

        You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

        You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.

        • Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        • VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.

        Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.

        Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

        Authentication and authorization

        All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

        Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation.

        Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

        Permissions

        You must have read access to the source object and write access to the destination bucket.

        • General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation.

          • If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied.

          • If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket.

        • Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation.

          • If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

          • If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket.

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

          For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.

        Response and special errors

        When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds.

        • If the copy is successful, you receive a response with information about the copied object.

        • A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error.

          • If the error occurs before the copy action starts, you receive a standard Amazon S3 error.

          • If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.

            If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).

        Charge

        The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.

        HTTP Host header syntax
        • Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        • Amazon S3 on Outposts - When you use this action with S3 on Outposts through the REST API, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. The hostname isn't required when you use the Amazon Web Services CLI or SDKs.

        The following operations are related to CopyObject:

        ", - "alias":"PutObjectCopy", + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will discontinue support for creating new Email Grantee Access Control Lists (ACL). Email Grantee ACLs created prior to this date will continue to work and remain accessible through the Amazon Web Services Management Console, Command Line Interface (CLI), SDKs, and REST API. However, you will no longer be able to create new Email Grantee ACLs.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        Creates a copy of an object that is already stored in Amazon S3.

        You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

        You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.

        • Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        • VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.

        Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.

        Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

        Authentication and authorization

        All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

        Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation.

        Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

        Permissions

        You must have read access to the source object and write access to the destination bucket.

        • General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation.

          • If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied.

          • If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket.

        • Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation.

          • If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

          • If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket.

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

          For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.

        Response and special errors

        When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds.

        • If the copy is successful, you receive a response with information about the copied object.

        • A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error.

          • If the error occurs before the copy action starts, you receive a standard Amazon S3 error.

          • If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.

            If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).

        Charge

        The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.

        HTTP Host header syntax
        • Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        • Amazon S3 on Outposts - When you use this action with S3 on Outposts through the REST API, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. The hostname isn't required when you use the Amazon Web Services CLI or SDKs.

        The following operations are related to CopyObject:

        ", "staticContextParams":{ "DisableS3ExpressSessionAuth":{"value":true} } @@ -71,9 +67,7 @@ {"shape":"BucketAlreadyExists"}, {"shape":"BucketAlreadyOwnedByYou"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html", - "documentation":"

        This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see CreateBucket .

        Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

        There are two types of buckets: general purpose buckets and directory buckets. For more information about these bucket types, see Creating, configuring, and working with Amazon S3 buckets in the Amazon S3 User Guide.

        • General purpose buckets - If you send your CreateBucket request to the s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets in the Amazon S3 User Guide.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - In addition to the s3:CreateBucket permission, the following permissions are required in a policy when your CreateBucket request includes specific headers:

          • Access control lists (ACLs) - In your CreateBucket request, if you specify an access control list (ACL) and set it to public-read, public-read-write, authenticated-read, or if you explicitly specify any other custom ACLs, both s3:CreateBucket and s3:PutBucketAcl permissions are required. In your CreateBucket request, if you set the ACL to private, or if you don't specify any ACLs, only the s3:CreateBucket permission is required.

          • Object Lock - In your CreateBucket request, if you set x-amz-bucket-object-lock-enabled to true, the s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required.

          • S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership header, then the s3:PutBucketOwnershipControls permission is required.

            To set an ACL on a bucket as part of a CreateBucket request, you must explicitly set S3 Object Ownership for the bucket to a different value than the default, BucketOwnerEnforced. Additionally, if your desired bucket ACL grants public access, you must first create the bucket (without the bucket ACL) and then explicitly disable Block Public Access on the bucket before using PutBucketAcl to set the ACL. If you try to create a bucket with a public ACL, the request will fail.

            For the majority of modern use cases in S3, we recommend that you keep all Block Public Access settings enabled and keep ACLs disabled. If you would like to share data with users outside of your account, you can use bucket policies as needed. For more information, see Controlling ownership of objects and disabling ACLs for your bucket and Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.

          • S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. Specifically, you can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock API. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.

        • Directory bucket permissions - You must have the s3express:CreateBucket permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

          The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public Access are not supported for directory buckets. For directory buckets, all Block Public Access settings are enabled at the bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs disabled). These settings can't be modified.

          For more information about permissions for creating and working with directory buckets, see Directory buckets in the Amazon S3 User Guide. For more information about supported S3 features for directory buckets, see Features of S3 Express One Zone in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

        The following operations are related to CreateBucket:

        ", - "alias":"PutBucket", + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will discontinue support for creating new Email Grantee Access Control Lists (ACL). Email Grantee ACLs created prior to this date will continue to work and remain accessible through the Amazon Web Services Management Console, Command Line Interface (CLI), SDKs, and REST API. However, you will no longer be able to create new Email Grantee ACLs.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see CreateBucket .

        Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

        There are two types of buckets: general purpose buckets and directory buckets. For more information about these bucket types, see Creating, configuring, and working with Amazon S3 buckets in the Amazon S3 User Guide.

        • General purpose buckets - If you send your CreateBucket request to the s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets in the Amazon S3 User Guide.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - In addition to the s3:CreateBucket permission, the following permissions are required in a policy when your CreateBucket request includes specific headers:

          • Access control lists (ACLs) - In your CreateBucket request, if you specify an access control list (ACL) and set it to public-read, public-read-write, authenticated-read, or if you explicitly specify any other custom ACLs, both s3:CreateBucket and s3:PutBucketAcl permissions are required. In your CreateBucket request, if you set the ACL to private, or if you don't specify any ACLs, only the s3:CreateBucket permission is required.

          • Object Lock - In your CreateBucket request, if you set x-amz-bucket-object-lock-enabled to true, the s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required.

          • S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership header, then the s3:PutBucketOwnershipControls permission is required.

            To set an ACL on a bucket as part of a CreateBucket request, you must explicitly set S3 Object Ownership for the bucket to a different value than the default, BucketOwnerEnforced. Additionally, if your desired bucket ACL grants public access, you must first create the bucket (without the bucket ACL) and then explicitly disable Block Public Access on the bucket before using PutBucketAcl to set the ACL. If you try to create a bucket with a public ACL, the request will fail.

            For the majority of modern use cases in S3, we recommend that you keep all Block Public Access settings enabled and keep ACLs disabled. If you would like to share data with users outside of your account, you can use bucket policies as needed. For more information, see Controlling ownership of objects and disabling ACLs for your bucket and Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.

          • S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. Specifically, you can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock API. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.

        • Directory bucket permissions - You must have the s3express:CreateBucket permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

          The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public Access are not supported for directory buckets. For directory buckets, all Block Public Access settings are enabled at the bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs disabled). These settings can't be modified.

          For more information about permissions for creating and working with directory buckets, see Directory buckets in the Amazon S3 User Guide. For more information about supported S3 features for directory buckets, see Features of S3 Express One Zone in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

        The following operations are related to CreateBucket:

        ", "staticContextParams":{ "DisableAccessPoints":{"value":true}, "UseS3ExpressControlEndpoint":{"value":true} @@ -103,9 +97,7 @@ }, "input":{"shape":"CreateMultipartUploadRequest"}, "output":{"shape":"CreateMultipartUploadOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html", - "documentation":"

        This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.

        After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload.

        If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.

        • Directory buckets - S3 Lifecycle is not supported by directory buckets.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Request signing

        For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service (KMS) KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        Encryption
        • General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload request.

          • Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request.

            • x-amz-server-side-encryption

            • x-amz-server-side-encryption-aws-kms-key-id

            • x-amz-server-side-encryption-context

            • If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to protect the data.

            • To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.

            • If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role.

            • All GET and PUT requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

            For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.

          • Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request.

            • x-amz-server-side-encryption-customer-algorithm

            • x-amz-server-side-encryption-customer-key

            • x-amz-server-side-encryption-customer-key-MD5

            For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide.

        • Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

          In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket.

          When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket.

          For directory buckets, when you perform a CreateMultipartUpload operation and an UploadPartCopy operation, the request headers you provide in the CreateMultipartUpload request must match the default encryption configuration of the destination bucket.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to CreateMultipartUpload:

        ", - "alias":"InitiateMultipartUpload" + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will discontinue support for creating new Email Grantee Access Control Lists (ACL). Email Grantee ACLs created prior to this date will continue to work and remain accessible through the Amazon Web Services Management Console, Command Line Interface (CLI), SDKs, and REST API. However, you will no longer be able to create new Email Grantee ACLs.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.

        After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload.

        If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.

        • Directory buckets - S3 Lifecycle is not supported by directory buckets.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Request signing

        For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service (KMS) KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        Encryption
        • General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload request.

          • Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request.

            • x-amz-server-side-encryption

            • x-amz-server-side-encryption-aws-kms-key-id

            • x-amz-server-side-encryption-context

            • If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to protect the data.

            • To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.

            • If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role.

            • All GET and PUT requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.

            For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.

          • Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request.

            • x-amz-server-side-encryption-customer-algorithm

            • x-amz-server-side-encryption-customer-key

            • x-amz-server-side-encryption-customer-key-MD5

            For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide.

        • Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

          In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket.

          When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket.

          For directory buckets, when you perform a CreateMultipartUpload operation and an UploadPartCopy operation, the request headers you provide in the CreateMultipartUpload request must match the default encryption configuration of the destination bucket.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to CreateMultipartUpload:

        " }, "CreateSession":{ "name":"CreateSession", @@ -131,7 +123,6 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETE.html", "documentation":"

        Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted.

        • Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - You must have the s3:DeleteBucket permission on the specified bucket in a policy.

        • Directory bucket permissions - You must have the s3express:DeleteBucket permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

        The following operations are related to DeleteBucket:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -158,7 +149,6 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketCorsRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html", "documentation":"

        This operation is not supported for directory buckets.

        Deletes the cors configuration information set for the bucket.

        To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

        For information about cors, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.

        Related Resources

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -211,7 +201,6 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketLifecycleRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETElifecycle.html", "documentation":"

        Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.

        Permissions
        • General purpose bucket permissions - By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must have the s3:PutLifecycleConfiguration permission.

          For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

        • Directory bucket permissions - You must have the s3express:PutLifecycleConfiguration permission in an IAM identity-based policy to use this operation. Cross-account access to this API operation isn't supported. The resource owner can optionally grant access permissions to others by creating a role or user for them as long as they are within the same account as the owner and resource.

          For more information about directory bucket policies and permissions, see Authorizing Regional endpoint APIs with IAM in the Amazon S3 User Guide.

          Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

        For more information about the object expiration, see Elements to Describe Lifecycle Actions.

        Related actions include:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -264,7 +253,6 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketPolicyRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html", "documentation":"

        Deletes the policy of a specified bucket.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions

        If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the DeleteBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

        If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

        To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.

        • General purpose bucket permissions - The s3:DeleteBucketPolicy permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation, you must have the s3express:DeleteBucketPolicy permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

        The following operations are related to DeleteBucketPolicy

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -291,7 +279,6 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketTaggingRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEtagging.html", "documentation":"

        This operation is not supported for directory buckets.

        Deletes the tags from the bucket.

        To use this operation, you must have permission to perform the s3:PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

        The following operations are related to DeleteBucketTagging:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -305,7 +292,6 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketWebsiteRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html", "documentation":"

        This operation is not supported for directory buckets.

        This action removes the website configuration for a bucket. Amazon S3 returns a 200 OK response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 response if the bucket specified in the request does not exist.

        This DELETE action requires the S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite permission.

        For more information about hosting websites, see Hosting Websites on Amazon S3.

        The following operations are related to DeleteBucketWebsite:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -320,7 +306,6 @@ }, "input":{"shape":"DeleteObjectRequest"}, "output":{"shape":"DeleteObjectOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectDELETE.html", "documentation":"

        Removes an object from a bucket. The behavior depends on the bucket's versioning state:

        • If bucket versioning is not enabled, the operation permanently deletes the object.

        • If bucket versioning is enabled, the operation inserts a delete marker, which becomes the current version of the object. To permanently delete an object in a versioned bucket, you must include the object’s versionId in the request. For more information about versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket.

        • If bucket versioning is suspended, the operation removes the object that has a null versionId, if there is one, and inserts a delete marker that becomes the current version of the object. If there isn't an object with a null versionId, and all versions of the object have a versionId, Amazon S3 does not remove the object and only inserts a delete marker. To permanently delete an object that has a versionId, you must include the object’s versionId in the request. For more information about versioning-suspended buckets, see Deleting objects from versioning-suspended buckets.

        • Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        To remove a specific version, you must use the versionId query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker to true.

        If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3 User Guide. To see sample requests that use versioning, see Sample Request.

        Directory buckets - MFA delete is not supported by directory buckets.

        You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions.

        Directory buckets - S3 Lifecycle is not supported by directory buckets.

        Permissions
        • General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers.

          • s3:DeleteObject - To delete an object from a bucket, you must always have the s3:DeleteObject permission.

          • s3:DeleteObjectVersion - To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following action is related to DeleteObject:

        " }, "DeleteObjectTagging":{ @@ -342,9 +327,7 @@ }, "input":{"shape":"DeleteObjectsRequest"}, "output":{"shape":"DeleteObjectsOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html", "documentation":"

        This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

        The request can contain a list of up to 1,000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. If the object specified in the request isn't found, Amazon S3 confirms the deletion by returning the result as deleted.

        • Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.

        When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.

        Directory buckets - MFA delete is not supported by directory buckets.

        Permissions
        • General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers.

          • s3:DeleteObject - To delete an object from a bucket, you must always specify the s3:DeleteObject permission.

          • s3:DeleteObjectVersion - To delete a specific version of an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion permission.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        Content-MD5 request header
        • General purpose bucket - The Content-MD5 request header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.

        • Directory bucket - The Content-MD5 request header or a additional checksum request header (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, or x-amz-checksum-sha256) is required for all Multi-Object Delete requests.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to DeleteObjects:

        ", - "alias":"DeleteMultipleObjects", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -384,8 +367,7 @@ }, "input":{"shape":"GetBucketAclRequest"}, "output":{"shape":"GetBucketAclOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETacl.html", - "documentation":"

        This operation is not supported for directory buckets.

        This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

        When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

        When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes.

        If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

        The following operations are related to GetBucketAcl:

        ", + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        This operation is not supported for directory buckets.

        This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

        When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

        When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes.

        If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

        The following operations are related to GetBucketAcl:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -411,7 +393,6 @@ }, "input":{"shape":"GetBucketCorsRequest"}, "output":{"shape":"GetBucketCorsOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETcors.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the bucket.

        To use this operation, you must have permission to perform the s3:GetBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

        When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

        When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes.

        For more information about CORS, see Enabling Cross-Origin Resource Sharing.

        The following operations are related to GetBucketCors:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -464,7 +445,6 @@ }, "input":{"shape":"GetBucketLifecycleRequest"}, "output":{"shape":"GetBucketLifecycleOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlifecycle.html", "documentation":"

        For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter element, you should see the updated version of this topic. This topic is provided for backward compatibility.

        This operation is not supported for directory buckets.

        Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

        To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

        GetBucketLifecycle has the following special error:

        • Error code: NoSuchLifecycleConfiguration

          • Description: The lifecycle configuration does not exist.

          • HTTP Status Code: 404 Not Found

          • SOAP Fault Code Prefix: Client

        The following operations are related to GetBucketLifecycle:

        ", "deprecated":true, "staticContextParams":{ @@ -492,7 +472,6 @@ }, "input":{"shape":"GetBucketLocationRequest"}, "output":{"shape":"GetBucketLocationOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlocation.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the Region the bucket resides in. You set the bucket's Region using the LocationConstraint request parameter in a CreateBucket request. For more information, see CreateBucket.

        When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

        When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes.

        We recommend that you use HeadBucket to return the Region that a bucket resides in. For backward compatibility, Amazon S3 continues to support GetBucketLocation.

        The following operations are related to GetBucketLocation:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -506,8 +485,7 @@ }, "input":{"shape":"GetBucketLoggingRequest"}, "output":{"shape":"GetBucketLoggingOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlogging.html", - "documentation":"

        This operation is not supported for directory buckets.

        Returns the logging status of a bucket and the permissions users have to view and modify that status.

        The following operations are related to GetBucketLogging:

        ", + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        This operation is not supported for directory buckets.

        Returns the logging status of a bucket and the permissions users have to view and modify that status.

        The following operations are related to GetBucketLogging:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -546,7 +524,6 @@ }, "input":{"shape":"GetBucketNotificationConfigurationRequest"}, "output":{"shape":"NotificationConfigurationDeprecated"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETnotification.html", "documentation":"

        This operation is not supported for directory buckets.

        No longer used, see GetBucketNotificationConfiguration.

        ", "deprecated":true, "staticContextParams":{ @@ -574,7 +551,7 @@ }, "input":{"shape":"GetBucketOwnershipControlsRequest"}, "output":{"shape":"GetBucketOwnershipControlsOutput"}, - "documentation":"

        This operation is not supported for directory buckets.

        Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying permissions in a policy.

        For information about Amazon S3 Object Ownership, see Using Object Ownership.

        The following operations are related to GetBucketOwnershipControls:

        ", + "documentation":"

        This operation is not supported for directory buckets.

        Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying permissions in a policy.

        A bucket doesn't have OwnershipControls settings in the following cases:

        • The bucket was created before the BucketOwnerEnforced ownership setting was introduced and you've never explicitly applied this value

        • You've manually deleted the bucket ownership control value using the DeleteBucketOwnershipControls API operation.

        By default, Amazon S3 sets OwnershipControls for all newly created buckets.

        For information about Amazon S3 Object Ownership, see Using Object Ownership.

        The following operations are related to GetBucketOwnershipControls:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -587,7 +564,6 @@ }, "input":{"shape":"GetBucketPolicyRequest"}, "output":{"shape":"GetBucketPolicyOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETpolicy.html", "documentation":"

        Returns the policy of a specified bucket.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions

        If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

        If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

        To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.

        • General purpose bucket permissions - The s3:GetBucketPolicy permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetBucketPolicy permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

        Example bucket policies

        General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.

        Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

        The following action is related to GetBucketPolicy:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -627,7 +603,6 @@ }, "input":{"shape":"GetBucketRequestPaymentRequest"}, "output":{"shape":"GetBucketRequestPaymentOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentGET.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the request payment configuration of a bucket. To use this version of the operation, you must be the bucket owner. For more information, see Requester Pays Buckets.

        The following operations are related to GetBucketRequestPayment:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -641,7 +616,6 @@ }, "input":{"shape":"GetBucketTaggingRequest"}, "output":{"shape":"GetBucketTaggingOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETtagging.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the tag set associated with the bucket.

        To use this operation, you must have permission to perform the s3:GetBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

        GetBucketTagging has the following special error:

        • Error code: NoSuchTagSet

          • Description: There is no tag set associated with the bucket.

        The following operations are related to GetBucketTagging:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -655,7 +629,6 @@ }, "input":{"shape":"GetBucketVersioningRequest"}, "output":{"shape":"GetBucketVersioningOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETversioningStatus.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the versioning state of a bucket.

        To retrieve the versioning state of a bucket, you must be the bucket owner.

        This implementation also returns the MFA Delete status of the versioning state. If the MFA Delete status is enabled, the bucket owner must use an authentication device to change the versioning state of the bucket.

        The following operations are related to GetBucketVersioning:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -669,7 +642,6 @@ }, "input":{"shape":"GetBucketWebsiteRequest"}, "output":{"shape":"GetBucketWebsiteOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETwebsite.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.

        This GET action requires the S3:GetBucketWebsite permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite permission.

        The following operations are related to GetBucketWebsite:

        ", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} @@ -687,7 +659,6 @@ {"shape":"NoSuchKey"}, {"shape":"InvalidObjectState"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html", "documentation":"

        Retrieves an object from Amazon S3.

        In the GetObject request, specify the full key name for the object.

        General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the object key name as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the object key name as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification in the Amazon S3 User Guide.

        Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named amzn-s3-demo-bucket--usw2-az1--x-s3, specify the object key name as /photos/2006/February/sample.jpg. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject, you must have the READ access to the object (or version). If you grant READ access to the anonymous user, the GetObject operation returns the object without using an authorization header. For more information, see Specifying permissions in a policy in the Amazon S3 User Guide.

          If you include a versionId in your request header, you must have the s3:GetObjectVersion permission to access a specific version of an object. The s3:GetObject permission is not required in this scenario.

          If you request the current version of an object without a specific versionId in the request header, only the s3:GetObject permission is required. The s3:GetObjectVersion permission is not required in this scenario.

          If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

          • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error.

          • If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Access Denied error.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted using SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Storage classes

        If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectState error. For information about restoring archived objects, see Restoring Archived Objects in the Amazon S3 User Guide.

        Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage class) in Dedicated Local Zones. Unsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request.

        Encryption

        Encryption request headers, like x-amz-server-side-encryption, should not be sent for the GetObject requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject requests for the object that uses these types of keys, you’ll get an HTTP 400 Bad Request error.

        Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

        Overriding response header values through the request

        There are times when you want to override certain response header values of a GetObject response. For example, you might override the Content-Disposition response header value through your GetObject request.

        You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object.

        The response headers that you can override for the GetObject response are Cache-Control, Content-Disposition, Content-Encoding, Content-Language, Content-Type, and Expires.

        To override values for a set of response headers in the GetObject response, you can use the following query parameters in the request.

        • response-cache-control

        • response-content-disposition

        • response-content-encoding

        • response-content-language

        • response-content-type

        • response-expires

        When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to GetObject:

        ", "httpChecksum":{ "requestValidationModeMember":"ChecksumMode", @@ -711,7 +682,6 @@ "errors":[ {"shape":"NoSuchKey"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETacl.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns the access control list (ACL) of an object. To use this operation, you must have s3:GetObjectAcl permissions or READ_ACP access to the object. For more information, see Mapping of ACL permissions and access policy permissions in the Amazon S3 User Guide

        This functionality is not supported for Amazon S3 on Outposts.

        By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.

        If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

        The following operations are related to GetObjectAcl:

        " }, "GetObjectAttributes":{ @@ -725,7 +695,7 @@ "errors":[ {"shape":"NoSuchKey"} ], - "documentation":"

        Retrieves all the metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.

        GetObjectAttributes combines the functionality of HeadObject and ListParts. All of the data returned with each of those individual calls can be returned with a single call to GetObjectAttributes.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - To use GetObjectAttributes, you must have READ access to the object. The permissions that you need to use this operation depend on whether the bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes permissions for this operation. If the bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes permissions. For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide. If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

          • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found (\"no such key\") error.

          • If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden (\"access denied\") error.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Encryption

        Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a GET request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

        If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

        • x-amz-server-side-encryption-customer-algorithm

        • x-amz-server-side-encryption-customer-key

        • x-amz-server-side-encryption-customer-key-MD5

        For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

        Directory bucket permissions - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

        Versioning

        Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request.

        Conditional request headers

        Consider the following when using request headers:

        • If both of the If-Match and If-Unmodified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK and the data requested:

          • If-Match condition evaluates to true.

          • If-Unmodified-Since condition evaluates to false.

          For more information about conditional requests, see RFC 7232.

        • If both of the If-None-Match and If-Modified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified:

          • If-None-Match condition evaluates to false.

          • If-Modified-Since condition evaluates to true.

          For more information about conditional requests, see RFC 7232.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following actions are related to GetObjectAttributes:

        " + "documentation":"

        Retrieves all of the metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.

        GetObjectAttributes combines the functionality of HeadObject and ListParts. All of the data returned with both of those individual calls can be returned with a single call to GetObjectAttributes.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - To use GetObjectAttributes, you must have READ access to the object.

          The other permissions that you need to use this operation depend on whether the bucket is versioned and if a version ID is passed in the GetObjectAttributes request.

          • If you pass a version ID in your request, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes permissions.

          • If you do not pass a version ID in your request, you need the s3:GetObject and s3:GetObjectAttributes permissions.

          For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide.

          If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

          • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found (\"no such key\") error.

          • If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden (\"access denied\") error.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Encryption

        Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a GET request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

        If you encrypted an object when you stored the object in Amazon S3 by using server-side encryption with customer-provided encryption keys (SSE-C), then when you retrieve the metadata from the object, you must use the following headers. These headers provide the server with the encryption key required to retrieve the object's metadata. The headers are:

        • x-amz-server-side-encryption-customer-algorithm

        • x-amz-server-side-encryption-customer-key

        • x-amz-server-side-encryption-customer-key-MD5

        For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

        Directory bucket permissions - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

        Versioning

        Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request.

        Conditional request headers

        Consider the following when using request headers:

        • If both of the If-Match and If-Unmodified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK and the data requested:

          • If-Match condition evaluates to true.

          • If-Unmodified-Since condition evaluates to false.

          For more information about conditional requests, see RFC 7232.

        • If both of the If-None-Match and If-Modified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified:

          • If-None-Match condition evaluates to false.

          • If-Modified-Since condition evaluates to true.

          For more information about conditional requests, see RFC 7232.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following actions are related to GetObjectAttributes:

        " }, "GetObjectLegalHold":{ "name":"GetObjectLegalHold", @@ -775,7 +745,6 @@ }, "input":{"shape":"GetObjectTorrentRequest"}, "output":{"shape":"GetObjectTorrentOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETtorrent.html", "documentation":"

        This operation is not supported for directory buckets.

        Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files.

        You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.

        To use GET, you must have READ access to the object.

        This functionality is not supported for Amazon S3 on Outposts.

        The following action is related to GetObjectTorrent:

        " }, "GetPublicAccessBlock":{ @@ -802,7 +771,6 @@ "errors":[ {"shape":"NoSuchBucket"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketHEAD.html", "documentation":"

        You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission to access it.

        If the bucket does not exist or you do not have permission to access it, the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. A message body is not included, so you cannot determine the exception beyond these HTTP response codes.

        Authentication and authorization

        General purpose buckets - Request to public buckets that grant the s3:ListBucket permission publicly do not need to be signed. All other HeadBucket requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

        Directory buckets - You must use IAM credentials to authenticate and authorize your access to the HeadBucket API operation, instead of using the temporary security credentials through the CreateSession API operation.

        Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

        Permissions

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        " }, "HeadObject":{ @@ -816,7 +784,6 @@ "errors":[ {"shape":"NoSuchKey"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html", "documentation":"

        The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata.

        A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not possible to retrieve the exact exception of these error codes.

        Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

        Permissions

        • General purpose bucket permissions - To use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. For more information about the permissions to S3 API operations by S3 resource types, see Required permissions for Amazon S3 API operations in the Amazon S3 User Guide.

          If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

          • If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error.

          • If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden error.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If you enable x-amz-checksum-mode in the request and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.

        Encryption

        Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a HEAD request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

        If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

        • x-amz-server-side-encryption-customer-algorithm

        • x-amz-server-side-encryption-customer-key

        • x-amz-server-side-encryption-customer-key-MD5

        For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.

        Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

        Versioning
        • If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

        • If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header.

        • Directory buckets - Delete marker is not supported for directory buckets.

        • Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        The following actions are related to HeadObject:

        " }, "ListBucketAnalyticsConfigurations":{ @@ -876,9 +843,7 @@ }, "input":{"shape":"ListBucketsRequest"}, "output":{"shape":"ListBucketsOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTServiceGET.html", - "documentation":"

        This operation is not supported for directory buckets.

        Returns a list of all buckets owned by the authenticated sender of the request. To grant IAM permission to use this operation, you must add the s3:ListAllMyBuckets policy action.

        For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets.

        We strongly recommend using only paginated ListBuckets requests. Unpaginated ListBuckets requests are only supported for Amazon Web Services accounts set to the default general purpose bucket quota of 10,000. If you have an approved general purpose bucket quota above 10,000, you must send paginated ListBuckets requests to list your account’s buckets. All unpaginated ListBuckets requests will be rejected for Amazon Web Services accounts with a general purpose bucket quota greater than 10,000.

        ", - "alias":"GetService" + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        This operation is not supported for directory buckets.

        Returns a list of all buckets owned by the authenticated sender of the request. To grant IAM permission to use this operation, you must add the s3:ListAllMyBuckets policy action.

        For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets.

        We strongly recommend using only paginated ListBuckets requests. Unpaginated ListBuckets requests are only supported for Amazon Web Services accounts set to the default general purpose bucket quota of 10,000. If you have an approved general purpose bucket quota above 10,000, you must send paginated ListBuckets requests to list your account’s buckets. All unpaginated ListBuckets requests will be rejected for Amazon Web Services accounts with a general purpose bucket quota greater than 10,000.

        " }, "ListDirectoryBuckets":{ "name":"ListDirectoryBuckets", @@ -901,8 +866,7 @@ }, "input":{"shape":"ListMultipartUploadsRequest"}, "output":{"shape":"ListMultipartUploadsOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html", - "documentation":"

        This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload request, but has not yet been completed or aborted.

        Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload operation to abort all the in-progress multipart uploads.

        The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads request, the response returns an IsTruncated element with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads requests. In these requests, include two query parameters: key-marker and upload-id-marker. Set the value of key-marker to the NextKeyMarker value from the previous response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the previous response.

        Directory buckets - The upload-id-marker element and the NextUploadIdMarker element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response.

        For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        Sorting of multipart uploads in response
        • General purpose bucket - In the ListMultipartUploads response, the multipart uploads are sorted based on two criteria:

          • Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.

          • Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.

        • Directory bucket - In the ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based on the object keys.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to ListMultipartUploads:

        " + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload request, but has not yet been completed or aborted.

        Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload operation to abort all the in-progress multipart uploads.

        The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads request, the response returns an IsTruncated element with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads requests. In these requests, include two query parameters: key-marker and upload-id-marker. Set the value of key-marker to the NextKeyMarker value from the previous response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the previous response.

        Directory buckets - The upload-id-marker element and the NextUploadIdMarker element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response.

        For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        Sorting of multipart uploads in response
        • General purpose bucket - In the ListMultipartUploads response, the multipart uploads are sorted based on two criteria:

          • Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.

          • Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.

        • Directory bucket - In the ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based on the object keys.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to ListMultipartUploads:

        " }, "ListObjectVersions":{ "name":"ListObjectVersions", @@ -912,9 +876,7 @@ }, "input":{"shape":"ListObjectVersionsRequest"}, "output":{"shape":"ListObjectVersionsOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETVersion.html", - "documentation":"

        This operation is not supported for directory buckets.

        Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.

        To use this operation, you must have permission to perform the s3:ListBucketVersions action. Be aware of the name difference.

        A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

        To use this operation, you must have READ access to the bucket.

        The following operations are related to ListObjectVersions:

        ", - "alias":"GetBucketObjectVersions" + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        This operation is not supported for directory buckets.

        Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.

        To use this operation, you must have permission to perform the s3:ListBucketVersions action. Be aware of the name difference.

        A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

        To use this operation, you must have READ access to the bucket.

        The following operations are related to ListObjectVersions:

        " }, "ListObjects":{ "name":"ListObjects", @@ -927,9 +889,7 @@ "errors":[ {"shape":"NoSuchBucket"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html", - "documentation":"

        This operation is not supported for directory buckets.

        Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

        This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

        The following operations are related to ListObjects:

        ", - "alias":"GetBucket" + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        This operation is not supported for directory buckets.

        Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

        This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

        The following operations are related to ListObjects:

        " }, "ListObjectsV2":{ "name":"ListObjectsV2", @@ -952,8 +912,7 @@ }, "input":{"shape":"ListPartsRequest"}, "output":{"shape":"ListPartsOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListParts.html", - "documentation":"

        Lists the parts that have been uploaded for a specific multipart upload.

        To use this operation, you must provide the upload ID in the request. You obtain this uploadID by sending the initiate multipart upload request through CreateMultipartUpload.

        The ListParts request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. To list remaining uploaded parts, in subsequent ListParts requests, include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.

        For more information on multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

          If the upload was created using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt action for the ListParts request to succeed.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to ListParts:

        " + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        Lists the parts that have been uploaded for a specific multipart upload.

        To use this operation, you must provide the upload ID in the request. You obtain this uploadID by sending the initiate multipart upload request through CreateMultipartUpload.

        The ListParts request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. To list remaining uploaded parts, in subsequent ListParts requests, include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.

        For more information on multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

          If the upload was created using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt action for the ListParts request to succeed.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to ListParts:

        " }, "PutBucketAccelerateConfiguration":{ "name":"PutBucketAccelerateConfiguration", @@ -978,8 +937,7 @@ "requestUri":"/{Bucket}?acl" }, "input":{"shape":"PutBucketAclRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTacl.html", - "documentation":"

        This operation is not supported for directory buckets.

        Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have the WRITE_ACP permission.

        You can use one of the following two ways to set a bucket's permissions:

        • Specify the ACL in the request body

        • Specify permissions using request headers

        You cannot specify access permission using both the body and the request headers.

        Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

        If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.

        Permissions

        You can set access permissions by using one of the following methods:

        • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

        • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

          You specify each grantee as a type=value pair, where the type is one of the following:

          • id – if the value specified is the canonical user ID of an Amazon Web Services account

          • uri – if you are granting permissions to a predefined group

          • emailAddress – if the value specified is the email address of an Amazon Web Services account

            Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

            • US East (N. Virginia)

            • US West (N. California)

            • US West (Oregon)

            • Asia Pacific (Singapore)

            • Asia Pacific (Sydney)

            • Asia Pacific (Tokyo)

            • Europe (Ireland)

            • South America (São Paulo)

            For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

          For example, the following x-amz-grant-write header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two Amazon Web Services accounts identified by their email addresses.

          x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"

        You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

        Grantee Values

        You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

        • By the person's ID:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

          DisplayName is optional and ignored in the request

        • By URI:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

        • By Email address:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>&</Grantee>

          The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

          • US East (N. Virginia)

          • US West (N. California)

          • US West (Oregon)

          • Asia Pacific (Singapore)

          • Asia Pacific (Sydney)

          • Asia Pacific (Tokyo)

          • Europe (Ireland)

          • South America (São Paulo)

          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

        The following operations are related to PutBucketAcl:

        ", + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will discontinue support for creating new Email Grantee Access Control Lists (ACL). Email Grantee ACLs created prior to this date will continue to work and remain accessible through the Amazon Web Services Management Console, Command Line Interface (CLI), SDKs, and REST API. However, you will no longer be able to create new Email Grantee ACLs.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        This operation is not supported for directory buckets.

        Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have the WRITE_ACP permission.

        You can use one of the following two ways to set a bucket's permissions:

        • Specify the ACL in the request body

        • Specify permissions using request headers

        You cannot specify access permission using both the body and the request headers.

        Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

        If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.

        Permissions

        You can set access permissions by using one of the following methods:

        • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

        • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

          You specify each grantee as a type=value pair, where the type is one of the following:

          • id – if the value specified is the canonical user ID of an Amazon Web Services account

          • uri – if you are granting permissions to a predefined group

          • emailAddress – if the value specified is the email address of an Amazon Web Services account

            Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

            • US East (N. Virginia)

            • US West (N. California)

            • US West (Oregon)

            • Asia Pacific (Singapore)

            • Asia Pacific (Sydney)

            • Asia Pacific (Tokyo)

            • Europe (Ireland)

            • South America (São Paulo)

            For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

          For example, the following x-amz-grant-write header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two Amazon Web Services accounts identified by their email addresses.

          x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"

        You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

        Grantee Values

        You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways. For examples of how to specify these grantee values in JSON format, see the Amazon Web Services CLI example in Enabling Amazon S3 server access logging in the Amazon S3 User Guide.

        • By the person's ID:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

          DisplayName is optional and ignored in the request

        • By URI:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

        • By Email address:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>&</Grantee>

          The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

          • US East (N. Virginia)

          • US West (N. California)

          • US West (Oregon)

          • Asia Pacific (Singapore)

          • Asia Pacific (Sydney)

          • Asia Pacific (Tokyo)

          • Europe (Ireland)

          • South America (São Paulo)

          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

        The following operations are related to PutBucketAcl:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -1007,7 +965,6 @@ "requestUri":"/{Bucket}?cors" }, "input":{"shape":"PutBucketCorsRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html", "documentation":"

        This operation is not supported for directory buckets.

        Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.

        To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

        You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability.

        To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the bucket. The cors subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.

        When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the bucket and uses the first CORSRule rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:

        • The request's Origin header must match AllowedOrigin elements.

        • The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method header in case of a pre-flight OPTIONS request must be one of the AllowedMethod elements.

        • Every header specified in the Access-Control-Request-Headers request header of a pre-flight request must match an AllowedHeader element.

        For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.

        The following operations are related to PutBucketCors:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1064,7 +1021,6 @@ "requestUri":"/{Bucket}?lifecycle" }, "input":{"shape":"PutBucketLifecycleRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html", "documentation":"

        This operation is not supported for directory buckets.

        For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.

        This operation is not supported for directory buckets.

        Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon S3 User Guide.

        By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the Amazon Web Services account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration permission.

        You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

        • s3:DeleteObject

        • s3:DeleteObjectVersion

        • s3:PutLifecycleConfiguration

        For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.

        For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.

        The following operations are related to PutBucketLifecycle:

        ", "deprecated":true, "httpChecksum":{ @@ -1099,8 +1055,7 @@ "requestUri":"/{Bucket}?logging" }, "input":{"shape":"PutBucketLoggingRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html", - "documentation":"

        This operation is not supported for directory buckets.

        Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.

        The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The Permissions request element specifies the kind of access the grantee has to the logs.

        If the target bucket for log delivery uses the bucket owner enforced setting for S3 Object Ownership, you can't use the Grantee request element to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the Amazon S3 User Guide.

        Grantee Values

        You can specify the person (grantee) to whom you're assigning access rights (by using request elements) in the following ways:

        • By the person's ID:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

          DisplayName is optional and ignored in the request.

        • By Email address:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>

          The grantee is resolved to the CanonicalUser and, in a response to a GETObjectAcl request, appears as the CanonicalUser.

        • By URI:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

        To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:

        <BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />

        For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.

        For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.

        The following operations are related to PutBucketLogging:

        ", + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will discontinue support for creating new Email Grantee Access Control Lists (ACL). Email Grantee ACLs created prior to this date will continue to work and remain accessible through the Amazon Web Services Management Console, Command Line Interface (CLI), SDKs, and REST API. However, you will no longer be able to create new Email Grantee ACLs.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        This operation is not supported for directory buckets.

        Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.

        The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The Permissions request element specifies the kind of access the grantee has to the logs.

        If the target bucket for log delivery uses the bucket owner enforced setting for S3 Object Ownership, you can't use the Grantee request element to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the Amazon S3 User Guide.

        Grantee Values

        You can specify the person (grantee) to whom you're assigning access rights (by using request elements) in the following ways. For examples of how to specify these grantee values in JSON format, see the Amazon Web Services CLI example in Enabling Amazon S3 server access logging in the Amazon S3 User Guide.

        • By the person's ID:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

          DisplayName is optional and ignored in the request.

        • By Email address:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>

          The grantee is resolved to the CanonicalUser and, in a response to a GETObjectAcl request, appears as the CanonicalUser.

        • By URI:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

        To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:

        <BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />

        For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.

        For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.

        The following operations are related to PutBucketLogging:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -1128,7 +1083,6 @@ "requestUri":"/{Bucket}?notification" }, "input":{"shape":"PutBucketNotificationRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTnotification.html", "documentation":"

        This operation is not supported for directory buckets.

        No longer used, see the PutBucketNotificationConfiguration operation.

        ", "deprecated":true, "httpChecksum":{ @@ -1159,7 +1113,10 @@ }, "input":{"shape":"PutBucketOwnershipControlsRequest"}, "documentation":"

        This operation is not supported for directory buckets.

        Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying permissions in a policy.

        For information about Amazon S3 Object Ownership, see Using object ownership.

        The following operations are related to PutBucketOwnershipControls:

        ", - "httpChecksum":{"requestChecksumRequired":true}, + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + }, "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -1171,7 +1128,6 @@ "requestUri":"/{Bucket}?policy" }, "input":{"shape":"PutBucketPolicyRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html", "documentation":"

        Applies an Amazon S3 bucket policy to an Amazon S3 bucket.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region-code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions

        If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must both have the PutBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

        If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

        To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.

        • General purpose bucket permissions - The s3:PutBucketPolicy permission is required in a policy. For more information about general purpose buckets bucket policies, see Using Bucket Policies and User Policies in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutBucketPolicy permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

        Example bucket policies

        General purpose buckets example bucket policies - See Bucket policy examples in the Amazon S3 User Guide.

        Directory bucket example bucket policies - See Example bucket policies for S3 Express One Zone in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

        The following operations are related to PutBucketPolicy:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1204,7 +1160,6 @@ "requestUri":"/{Bucket}?requestPayment" }, "input":{"shape":"PutBucketRequestPaymentRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentPUT.html", "documentation":"

        This operation is not supported for directory buckets.

        Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.

        The following operations are related to PutBucketRequestPayment:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1221,7 +1176,6 @@ "requestUri":"/{Bucket}?tagging" }, "input":{"shape":"PutBucketTaggingRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTtagging.html", "documentation":"

        This operation is not supported for directory buckets.

        Sets the tags for a bucket.

        Use tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this, sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging and Using Cost Allocation in Amazon S3 Bucket Tags.

        When this operation sets the tags for a bucket, it will overwrite any current tags the bucket already has. You cannot use this operation to add tags to an existing list of tags.

        To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

        PutBucketTagging has the following special errors. For more Amazon S3 errors see, Error Responses.

        • InvalidTag - The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

        • MalformedXML - The XML provided does not match the schema.

        • OperationAborted - A conflicting conditional action is currently in progress against this resource. Please try again.

        • InternalError - The service was unable to apply the provided tag to the bucket.

        The following operations are related to PutBucketTagging:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1238,7 +1192,6 @@ "requestUri":"/{Bucket}?versioning" }, "input":{"shape":"PutBucketVersioningRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html", "documentation":"

        This operation is not supported for directory buckets.

        When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. While this change is propagating, you might encounter intermittent HTTP 404 NoSuchKey errors for requests to objects created or updated after enabling versioning. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations (PUT or DELETE) on objects in the bucket.

        Sets the versioning state of an existing bucket.

        You can set the versioning state with one of the following values:

        Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.

        Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.

        If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.

        In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket.

        If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.

        The following operations are related to PutBucketVersioning:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1255,7 +1208,6 @@ "requestUri":"/{Bucket}?website" }, "input":{"shape":"PutBucketWebsiteRequest"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html", "documentation":"

        This operation is not supported for directory buckets.

        Sets the configuration of the website that is specified in the website subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.

        This PUT action requires the S3:PutBucketWebsite permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite permission.

        To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.

        • WebsiteConfiguration

        • RedirectAllRequestsTo

        • HostName

        • Protocol

        If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.

        • WebsiteConfiguration

        • IndexDocument

        • Suffix

        • ErrorDocument

        • Key

        • RoutingRules

        • RoutingRule

        • Condition

        • HttpErrorCodeReturnedEquals

        • KeyPrefixEquals

        • Redirect

        • Protocol

        • HostName

        • ReplaceKeyPrefixWith

        • ReplaceKeyWith

        • HttpRedirectCode

        Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon S3 User Guide.

        The maximum request length is limited to 128 KB.

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1279,8 +1231,7 @@ {"shape":"TooManyParts"}, {"shape":"EncryptionTypeMismatch"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", - "documentation":"

        Adds an object to a bucket.

        • Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.

        • If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:

        • S3 Object Lock - To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock in the Amazon S3 User Guide.

          This functionality is not supported for directory buckets.

        • If-None-Match - Uploads the object only if the object key name does not already exist in the specified bucket. Otherwise, Amazon S3 returns a 412 Precondition Failed error. If a conflicting operation occurs during the upload, S3 returns a 409 ConditionalRequestConflict response. On a 409 failure, retry the upload.

          Expects the * character (asterisk).

          For more information, see Add preconditions to S3 operations with conditional requests in the Amazon S3 User Guide or RFC 7232.

          This functionality is not supported for S3 on Outposts.

        • S3 Versioning - When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.

          This functionality is not supported for directory buckets.

        Permissions
        • General purpose bucket permissions - The following permissions are required in your policies when your PutObject request includes specific headers.

          • s3:PutObject - To successfully complete the PutObject request, you must always have the s3:PutObject permission on a bucket to add an object to it.

          • s3:PutObjectAcl - To successfully change the objects ACL of your PutObject request, you must have the s3:PutObjectAcl.

          • s3:PutObjectTagging - To successfully set the tag-set with your PutObject request, you must have the s3:PutObjectTagging.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Data integrity with Content-MD5
        • General purpose bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

        • Directory bucket - This functionality is not supported for directory buckets.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        For more information about related Amazon S3 APIs, see the following:

        ", + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will discontinue support for creating new Email Grantee Access Control Lists (ACL). Email Grantee ACLs created prior to this date will continue to work and remain accessible through the Amazon Web Services Management Console, Command Line Interface (CLI), SDKs, and REST API. However, you will no longer be able to create new Email Grantee ACLs.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        Adds an object to a bucket.

        • Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.

        • If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner.

        • Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:

        • S3 Object Lock - To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock in the Amazon S3 User Guide.

          This functionality is not supported for directory buckets.

        • If-None-Match - Uploads the object only if the object key name does not already exist in the specified bucket. Otherwise, Amazon S3 returns a 412 Precondition Failed error. If a conflicting operation occurs during the upload, S3 returns a 409 ConditionalRequestConflict response. On a 409 failure, retry the upload.

          Expects the * character (asterisk).

          For more information, see Add preconditions to S3 operations with conditional requests in the Amazon S3 User Guide or RFC 7232.

          This functionality is not supported for S3 on Outposts.

        • S3 Versioning - When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning.

          This functionality is not supported for directory buckets.

        Permissions
        • General purpose bucket permissions - The following permissions are required in your policies when your PutObject request includes specific headers.

          • s3:PutObject - To successfully complete the PutObject request, you must always have the s3:PutObject permission on a bucket to add an object to it.

          • s3:PutObjectAcl - To successfully change the objects ACL of your PutObject request, you must have the s3:PutObjectAcl.

          • s3:PutObjectTagging - To successfully set the tag-set with your PutObject request, you must have the s3:PutObjectTagging.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Data integrity with Content-MD5
        • General purpose bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

        • Directory bucket - This functionality is not supported for directory buckets.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        For more information about related Amazon S3 APIs, see the following:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":false @@ -1297,8 +1248,7 @@ "errors":[ {"shape":"NoSuchKey"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html", - "documentation":"

        This operation is not supported for directory buckets.

        Uses the acl subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have the WRITE_ACP permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon S3 User Guide.

        This functionality is not supported for Amazon S3 on Outposts.

        Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.

        If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.

        Permissions

        You can set access permissions using one of the following methods:

        • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

        • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

          You specify each grantee as a type=value pair, where the type is one of the following:

          • id – if the value specified is the canonical user ID of an Amazon Web Services account

          • uri – if you are granting permissions to a predefined group

          • emailAddress – if the value specified is the email address of an Amazon Web Services account

            Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

            • US East (N. Virginia)

            • US West (N. California)

            • US West (Oregon)

            • Asia Pacific (Singapore)

            • Asia Pacific (Sydney)

            • Asia Pacific (Tokyo)

            • Europe (Ireland)

            • South America (São Paulo)

            For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

          For example, the following x-amz-grant-read header grants list objects permission to the two Amazon Web Services accounts identified by their email addresses.

          x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

        You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

        Grantee Values

        You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

        • By the person's ID:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

          DisplayName is optional and ignored in the request.

        • By URI:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

        • By Email address:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>

          The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

          • US East (N. Virginia)

          • US West (N. California)

          • US West (Oregon)

          • Asia Pacific (Singapore)

          • Asia Pacific (Sydney)

          • Asia Pacific (Tokyo)

          • Europe (Ireland)

          • South America (São Paulo)

          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

        Versioning

        The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId subresource.

        The following operations are related to PutObjectAcl:

        ", + "documentation":"

        This operation is not supported for directory buckets.

        Uses the acl subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have the WRITE_ACP permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon S3 User Guide.

        This functionality is not supported for Amazon S3 on Outposts.

        Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.

        If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.

        Permissions

        You can set access permissions using one of the following methods:

        • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.

        • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

          You specify each grantee as a type=value pair, where the type is one of the following:

          • id – if the value specified is the canonical user ID of an Amazon Web Services account

          • uri – if you are granting permissions to a predefined group

          • emailAddress – if the value specified is the email address of an Amazon Web Services account

            Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

            • US East (N. Virginia)

            • US West (N. California)

            • US West (Oregon)

            • Asia Pacific (Singapore)

            • Asia Pacific (Sydney)

            • Asia Pacific (Tokyo)

            • Europe (Ireland)

            • South America (São Paulo)

            For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

          For example, the following x-amz-grant-read header grants list objects permission to the two Amazon Web Services accounts identified by their email addresses.

          x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

        You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

        Grantee Values

        You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways. For examples of how to specify these grantee values in JSON format, see the Amazon Web Services CLI example in Enabling Amazon S3 server access logging in the Amazon S3 User Guide.

        • By the person's ID:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

          DisplayName is optional and ignored in the request.

        • By URI:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

        • By Email address:

          <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>

          The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

          • US East (N. Virginia)

          • US West (N. California)

          • US West (Oregon)

          • Asia Pacific (Singapore)

          • Asia Pacific (Sydney)

          • Asia Pacific (Tokyo)

          • Europe (Ireland)

          • South America (São Paulo)

          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

        Versioning

        The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId subresource.

        The following operations are related to PutObjectAcl:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -1376,6 +1326,19 @@ "UseS3ExpressControlEndpoint":{"value":true} } }, + "RenameObject":{ + "name":"RenameObject", + "http":{ + "method":"PUT", + "requestUri":"/{Bucket}/{Key+}?renameObject" + }, + "input":{"shape":"RenameObjectRequest"}, + "output":{"shape":"RenameObjectOutput"}, + "errors":[ + {"shape":"IdempotencyParameterMismatch"} + ], + "documentation":"

        Renames an existing object in a directory bucket that uses the S3 Express One Zone storage class. You can use RenameObject by specifying an existing object’s name as the source and the new name of the object as the destination within the same directory bucket.

        RenameObject is only supported for objects stored in the S3 Express One Zone storage class.

        To prevent overwriting an object, you can use the If-None-Match conditional header.

        • If-None-Match - Renames the object only if an object with the specified name does not already exist in the directory bucket. If you don't want to overwrite an existing object, you can add the If-None-Match conditional header with the value ‘*’ in the RenameObject request. Amazon S3 then returns a 412 Precondition Failed error if the object with the specified name already exists. For more information, see RFC 7232.

        Permissions

        To grant access to the RenameObject operation on a directory bucket, we recommend that you use the CreateSession operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the directory bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. The Amazon Web Services CLI and SDKs will create and manage your session including refreshing the session token automatically to avoid service interruptions when a session expires. In your bucket policy, you can specify the s3express:SessionMode condition key to control who can create a ReadWrite or ReadOnly session. A ReadWrite session is required for executing all the Zonal endpoint API operations, including RenameObject. For more information about authorization, see CreateSession . To learn more about Zonal endpoint API operations, see Authorizing Zonal endpoint API operations with CreateSession in the Amazon S3 User Guide.

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        " + }, "RestoreObject":{ "name":"RestoreObject", "http":{ @@ -1387,9 +1350,7 @@ "errors":[ {"shape":"ObjectAlreadyInActiveTierError"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", "documentation":"

        This operation is not supported for directory buckets.

        Restores an archived copy of an object back into Amazon S3

        This functionality is not supported for Amazon S3 on Outposts.

        This action performs the following types of requests:

        • restore an archive - Restore an archived object

        For more information about the S3 structure in the request body, see the following:

        Permissions

        To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

        Restoring objects

        Objects that you archive to the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier.

        To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

        When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body:

        • Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

        • Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.

        • Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.

        For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide.

        You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.

        To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.

        After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

        If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.

        Responses

        A successful action returns either the 200 OK or 202 Accepted status code.

        • If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response.

        • If the object is previously restored, Amazon S3 returns 200 OK in the response.

        • Special errors:

          • Code: RestoreAlreadyInProgress

          • Cause: Object restore is already in progress.

          • HTTP Status Code: 409 Conflict

          • SOAP Fault Code Prefix: Client

          • Code: GlacierExpeditedRetrievalNotAvailable

          • Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)

          • HTTP Status Code: 503

          • SOAP Fault Code Prefix: N/A

        The following operations are related to RestoreObject:

        ", - "alias":"PostObjectRestore", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":false @@ -1417,7 +1378,6 @@ }, "input":{"shape":"UploadPartRequest"}, "output":{"shape":"UploadPartOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html", "documentation":"

        Uploads a part in a multipart upload.

        In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.

        You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request.

        Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten.

        For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.

        After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

        For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide .

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Permissions
        • General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs.

          These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide.

        • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

        Data integrity

        General purpose bucket - To ensure that data is not corrupted traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).

        Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.

        Encryption
        • General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C).

          Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.

          If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers.

          • x-amz-server-side-encryption-customer-algorithm

          • x-amz-server-side-encryption-customer-key

          • x-amz-server-side-encryption-customer-key-MD5

          For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.

        • Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms).

        Special errors
        • Error Code: NoSuchUpload

          • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

          • HTTP Status Code: 404 Not Found

          • SOAP Fault Code Prefix: Client

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to UploadPart:

        ", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", @@ -1432,7 +1392,6 @@ }, "input":{"shape":"UploadPartCopyRequest"}, "output":{"shape":"UploadPartCopyOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html", "documentation":"

        Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source in your request. To specify a byte range, you add the request header x-amz-copy-source-range in your request.

        For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.

        Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.

        You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.

        For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.

        Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

        Authentication and authorization

        All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

        Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the temporary security credentials through the CreateSession API operation.

        Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

        Permissions

        You must have READ access to the source object and WRITE access to the destination bucket.

        • General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy operation.

          • If the source object is in a general purpose bucket, you must have the s3:GetObject permission to read the source object that is being copied.

          • If the destination bucket is a general purpose bucket, you must have the s3:PutObject permission to write the object copy to the destination bucket.

          • To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide.

        • Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy operation.

          • If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

          • If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key cannot be set to ReadOnly on the copy destination.

          If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

          For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.

        Encryption
        • General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.

        • Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

          For directory buckets, when you perform a CreateMultipartUpload operation and an UploadPartCopy operation, the request headers you provide in the CreateMultipartUpload request must match the default encryption configuration of the destination bucket.

          S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through UploadPartCopy. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.

        Special errors
        • Error Code: NoSuchUpload

          • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

          • HTTP Status Code: 404 Not Found

        • Error Code: InvalidRequest

          • Description: The specified copy source is not supported as a byte-range copy source.

          • HTTP Status Code: 400 Bad Request

        HTTP Host header syntax

        Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

        The following operations are related to UploadPartCopy:

        ", "staticContextParams":{ "DisableS3ExpressSessionAuth":{"value":true} @@ -1730,16 +1689,14 @@ }, "BucketAlreadyExists":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The requested bucket name is not available. The bucket namespace is shared by all users of the system. Select a different name and try again.

        ", "error":{"httpStatusCode":409}, "exception":true }, "BucketAlreadyOwnedByYou":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all Amazon Web Services Regions except in the North Virginia Region. For legacy compatibility, if you re-create an existing bucket that you already own in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).

        ", "error":{"httpStatusCode":409}, "exception":true @@ -2050,6 +2007,7 @@ "FULL_OBJECT" ] }, + "ClientToken":{"type":"string"}, "CloudFunction":{"type":"string"}, "CloudFunctionConfiguration":{ "type":"structure", @@ -2144,7 +2102,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms).

        ", + "documentation":"

        The server-side encryption algorithm used when storing this object in Amazon S3.

        When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -2378,8 +2336,7 @@ "ContentType":{"type":"string"}, "ContinuationEvent":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        ", "event":true }, @@ -2410,7 +2367,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

        ", + "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 or Amazon FSx.

        When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -2597,7 +2554,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response.

        Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a different default encryption configuration, Amazon S3 uses the corresponding encryption key to encrypt the target object copy.

        With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption in the Amazon S3 User Guide.

        General purpose buckets

        • For general purpose buckets, there are the following supported options for server-side encryption: server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), and server-side encryption with customer-provided encryption keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy.

        • When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence.

        Directory buckets

        • For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

        • To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

        ", + "documentation":"

        The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response.

        Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a different default encryption configuration, Amazon S3 uses the corresponding encryption key to encrypt the target object copy.

        With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption in the Amazon S3 User Guide.

        General purpose buckets

        • For general purpose buckets, there are the following supported options for server-side encryption: server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), and server-side encryption with customer-provided encryption keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy.

        • When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence.

        Directory buckets

        • For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

        • To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration.

        • S3 access points for Amazon FSx - When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx. All Amazon FSx file systems have encryption configured by default and are encrypted at rest. Data is automatically encrypted before being written to the file system, and automatically decrypted as it is read. These processes are handled transparently by Amazon FSx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -2807,7 +2764,7 @@ }, "Location":{ "shape":"LocationInfo", - "documentation":"

        Specifies the location where the bucket will be created.

        Directory buckets - The location type is Availability Zone or Local Zone. To use the Local Zone location type, your account must be enabled for Dedicated Local Zones. Otherwise, you get an HTTP 403 Forbidden error with the error code AccessDenied. To learn more, see Enable accounts for Dedicated Local Zones in the Amazon S3 User Guide.

        This functionality is only supported by directory buckets.

        " + "documentation":"

        Specifies the location where the bucket will be created.

        Directory buckets - The location type is Availability Zone or Local Zone. To use the Local Zone location type, your account must be enabled for Local Zones. Otherwise, you get an HTTP 403 Forbidden error with the error code AccessDenied. To learn more, see Enable accounts for Local Zones in the Amazon S3 User Guide.

        This functionality is only supported by directory buckets.

        " }, "Bucket":{ "shape":"BucketInfo", @@ -2965,7 +2922,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms).

        ", + "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 or Amazon FSx.

        When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -3113,7 +3070,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms).

        • Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

          In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket.

          When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket.

        ", + "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 or Amazon FSx.

        • Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

          In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket.

          When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket.

        • S3 access points for Amazon FSx - When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx. All Amazon FSx file systems have encryption configured by default and are encrypted at rest. Data is automatically encrypted before being written to the file system, and automatically decrypted as it is read. These processes are handled transparently by Amazon FSx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -3220,7 +3177,7 @@ "members":{ "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm used when you store objects in the directory bucket.

        ", + "documentation":"

        The server-side encryption algorithm used when you store objects in the directory bucket.

        When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -3268,7 +3225,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm to use when you store objects in the directory bucket.

        For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). By default, Amazon S3 encrypts data with SSE-S3. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

        ", + "documentation":"

        The server-side encryption algorithm to use when you store objects in the directory bucket.

        For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). By default, Amazon S3 encrypts data with SSE-S3. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

        S3 access points for Amazon FSx - When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx. All Amazon FSx file systems have encryption configured by default and are encrypted at rest. Data is automatically encrypted before being written to the file system, and automatically decrypted as it is read. These processes are handled transparently by Amazon FSx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -3431,6 +3388,12 @@ "documentation":"

        The ID used to identify the S3 Intelligent-Tiering configuration.

        ", "location":"querystring", "locationName":"id" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

        The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

        ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -4031,8 +3994,7 @@ }, "EncryptionTypeMismatch":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The existing object was created with a different encryption type. Subsequent write requests must include the appropriate encryption parameters in the request or while creating the session.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -4043,8 +4005,7 @@ }, "EndEvent":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A message that indicates the request is complete and no more messages will be sent. You should not assume that the request is complete until the client receives an EndEvent.

        ", "event":true }, @@ -4137,8 +4098,7 @@ }, "EventBridgeConfiguration":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A container for specifying the configuration for Amazon EventBridge.

        " }, "EventList":{ @@ -4422,6 +4382,12 @@ "documentation":"

        The ID used to identify the S3 Intelligent-Tiering configuration.

        ", "location":"querystring", "locationName":"id" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

        The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

        ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -5064,7 +5030,7 @@ }, "Parts":{ "shape":"PartsList", - "documentation":"

        A container for elements related to a particular part. A response can contain zero or more Parts elements.

        • General purpose buckets - For GetObjectAttributes, if a additional checksum (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, or x-amz-checksum-sha256) isn't applied to the object specified in the request, the response doesn't return Part.

        • Directory buckets - For GetObjectAttributes, no matter whether a additional checksum is applied to the object specified in the request, the response returns Part.

        ", + "documentation":"

        A container for elements related to a particular part. A response can contain zero or more Parts elements.

        • General purpose buckets - For GetObjectAttributes, if an additional checksum (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, or x-amz-checksum-sha256) isn't applied to the object specified in the request, the response doesn't return the Part element.

        • Directory buckets - For GetObjectAttributes, regardless of whether an additional checksum is applied to the object specified in the request, the response returns the Part element.

        ", "locationName":"Part" } }, @@ -5099,13 +5065,13 @@ }, "MaxParts":{ "shape":"MaxParts", - "documentation":"

        Sets the maximum number of parts to return.

        ", + "documentation":"

        Sets the maximum number of parts to return. For more information, see Uploading and copying objects using multipart upload in Amazon S3 in the Amazon Simple Storage Service user guide.

        ", "location":"header", "locationName":"x-amz-max-parts" }, "PartNumberMarker":{ "shape":"PartNumberMarker", - "documentation":"

        Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.

        ", + "documentation":"

        Specifies the part after which listing should begin. Only parts with higher part numbers will be listed. For more information, see Uploading and copying objects using multipart upload in Amazon S3 in the Amazon Simple Storage Service user guide.

        ", "location":"header", "locationName":"x-amz-part-number-marker" }, @@ -5373,7 +5339,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3.

        ", + "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 or Amazon FSx.

        When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -6047,7 +6013,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

        ", + "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 or Amazon FSx.

        When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -6104,6 +6070,12 @@ "location":"header", "locationName":"x-amz-mp-parts-count" }, + "TagCount":{ + "shape":"TagCount", + "documentation":"

        The number of tags, if any, on the object, when you have the relevant permission to read object tags.

        You can use GetObjectTagging to retrieve the tag set associated with an object.

        This functionality is not supported for directory buckets.

        ", + "location":"header", + "locationName":"x-amz-tagging-count" + }, "ObjectLockMode":{ "shape":"ObjectLockMode", "documentation":"

        The Object Lock mode, if any, that's in effect for this object. This header is only returned if the requester has the s3:GetObjectRetention permission. For more information about S3 Object Lock, see Object Lock.

        This functionality is not supported for directory buckets.

        ", @@ -6264,6 +6236,13 @@ "HttpErrorCodeReturnedEquals":{"type":"string"}, "HttpRedirectCode":{"type":"string"}, "ID":{"type":"string"}, + "IdempotencyParameterMismatch":{ + "type":"structure", + "members":{}, + "documentation":"

        Parameters on this idempotent request are inconsistent with parameters used in previous request(s).

        For a list of error codes and more information on Amazon S3 errors, see Error codes.

        Idempotency ensures that an API request completes no more than one time. With an idempotent request, if the original request completes successfully, any subsequent retries complete successfully without performing any further actions.

        ", + "error":{"httpStatusCode":400}, + "exception":true + }, "IfMatch":{"type":"string"}, "IfMatchInitiatedTime":{ "type":"timestamp", @@ -6423,16 +6402,14 @@ }, "InvalidRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You may receive this error in multiple cases. Depending on the reason for the error, you may receive one of the messages below:

        • Cannot specify both a write offset value and user-defined object metadata for existing objects.

        • Checksum Type mismatch occurred, expected checksum Type: sha1, actual checksum Type: crc32c.

        • Request body cannot be empty when 'write offset' is specified.

        ", "error":{"httpStatusCode":400}, "exception":true }, "InvalidWriteOffset":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The write offset value that you specified does not match the current object size.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -6745,7 +6722,7 @@ }, "Filter":{ "shape":"LifecycleRuleFilter", - "documentation":"

        The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified. Filter is required if the LifecycleRule does not contain a Prefix element.

        Tag filters are not supported for directory buckets.

        " + "documentation":"

        The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, ObjectSizeGreaterThan, ObjectSizeLessThan, or And specified. Filter is required if the LifecycleRule does not contain a Prefix element.

        For more information about Tag filters, see Adding filters to Lifecycle rules in the Amazon S3 User Guide.

        Tag filters are not supported for directory buckets.

        " }, "Status":{ "shape":"ExpirationStatus", @@ -6903,6 +6880,12 @@ "documentation":"

        The ContinuationToken that represents a placeholder from where this request should begin.

        ", "location":"querystring", "locationName":"continuation-token" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

        The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

        ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" } } }, @@ -7481,7 +7464,7 @@ }, "ContinuationToken":{ "shape":"Token", - "documentation":"

        If ContinuationToken was sent with the request, it is included in the response. You can use the returned ContinuationToken for pagination of the list response. You can use this ContinuationToken for pagination of the list results.

        " + "documentation":"

        If ContinuationToken was sent with the request, it is included in the response. You can use the returned ContinuationToken for pagination of the list response.

        " }, "NextContinuationToken":{ "shape":"NextToken", @@ -8001,24 +7984,21 @@ "NextVersionIdMarker":{"type":"string"}, "NoSuchBucket":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified bucket does not exist.

        ", "error":{"httpStatusCode":404}, "exception":true }, "NoSuchKey":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified key does not exist.

        ", "error":{"httpStatusCode":404}, "exception":true }, "NoSuchUpload":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified multipart upload does not exist.

        ", "error":{"httpStatusCode":404}, "exception":true @@ -8160,8 +8140,7 @@ }, "ObjectAlreadyInActiveTierError":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        This action is not allowed against this storage tier.

        ", "error":{"httpStatusCode":403}, "exception":true @@ -8317,8 +8296,7 @@ "ObjectLockToken":{"type":"string"}, "ObjectNotInActiveTierError":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The source object of the COPY action is not in the active tier and is only stored in Amazon S3 Glacier.

        ", "error":{"httpStatusCode":403}, "exception":true @@ -8391,7 +8369,8 @@ "OUTPOSTS", "GLACIER_IR", "SNOW", - "EXPRESS_ONEZONE" + "EXPRESS_ONEZONE", + "FSX_OPENZFS" ] }, "ObjectVersion":{ @@ -8498,7 +8477,7 @@ "documentation":"

        Container for the ID of the owner.

        " } }, - "documentation":"

        Container for the owner's display name and ID.

        " + "documentation":"

        End of support notice: Beginning October 1, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

        This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

        Container for the owner's display name and ID.

        " }, "OwnerOverride":{ "type":"string", @@ -8531,8 +8510,7 @@ }, "ParquetInput":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Container for Parquet.

        " }, "Part":{ @@ -8955,6 +8933,12 @@ "location":"querystring", "locationName":"id" }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "documentation":"

        The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

        ", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" + }, "IntelligentTieringConfiguration":{ "shape":"IntelligentTieringConfiguration", "documentation":"

        Container for S3 Intelligent-Tiering configuration.

        ", @@ -9270,6 +9254,12 @@ "documentation":"

        The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) that you want to apply to this Amazon S3 bucket.

        ", "locationName":"OwnershipControls", "xmlNamespace":{"uri":"http://s3.amazonaws.com/doc/2006-03-01/"} + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "documentation":"

        Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide.

        If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter.

        ", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" } }, "payload":"OwnershipControls" @@ -9820,7 +9810,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3.

        ", + "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 or Amazon FSx.

        When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -10039,7 +10029,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).

        • General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.

        • Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

          In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket.

          When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket.

        ", + "documentation":"

        The server-side encryption algorithm that was used when you store this object in Amazon S3 or Amazon FSx.

        • General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.

        • Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

          In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket.

          When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket.

        • S3 access points for Amazon FSx - When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx. All Amazon FSx file systems have encryption configured by default and are encrypted at rest. Data is automatically encrypted before being written to the file system, and automatically decrypted as it is read. These processes are handled transparently by Amazon FSx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -10435,6 +10425,109 @@ "max":20, "min":0 }, + "RenameObjectOutput":{ + "type":"structure", + "members":{} + }, + "RenameObjectRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key", + "RenameSource" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "documentation":"

        The bucket name of the directory bucket containing the object.

        You must use virtual-hosted-style requests in the format Bucket-name.s3express-zone-id.region-code.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket-base-name--zone-id--x-s3 (for example, amzn-s3-demo-bucket--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide.

        ", + "contextParam":{"name":"Bucket"}, + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "documentation":"

        Key name of the object to rename.

        ", + "contextParam":{"name":"Key"}, + "location":"uri", + "locationName":"Key" + }, + "RenameSource":{ + "shape":"RenameSource", + "documentation":"

        Specifies the source for the rename operation. The value must be URL encoded.

        ", + "location":"header", + "locationName":"x-amz-rename-source" + }, + "DestinationIfMatch":{ + "shape":"IfMatch", + "documentation":"

        Renames the object only if the ETag (entity tag) value provided during the operation matches the ETag of the object in S3. The If-Match header field makes the request method conditional on ETags. If the ETag values do not match, the operation returns a 412 Precondition Failed error.

        Expects the ETag value as a string.

        ", + "location":"header", + "locationName":"If-Match" + }, + "DestinationIfNoneMatch":{ + "shape":"IfNoneMatch", + "documentation":"

        Renames the object only if the destination does not already exist in the specified directory bucket. If the object does exist when you send a request with If-None-Match:*, the S3 API will return a 412 Precondition Failed error, preventing an overwrite. The If-None-Match header prevents overwrites of existing data by validating that there's not an object with the same key name already in your directory bucket.

        Expects the * character (asterisk).

        ", + "location":"header", + "locationName":"If-None-Match" + }, + "DestinationIfModifiedSince":{ + "shape":"IfModifiedSince", + "documentation":"

        Renames the object if the destination exists and if it has been modified since the specified time.

        ", + "location":"header", + "locationName":"If-Modified-Since" + }, + "DestinationIfUnmodifiedSince":{ + "shape":"IfUnmodifiedSince", + "documentation":"

        Renames the object if it hasn't been modified since the specified time.

        ", + "location":"header", + "locationName":"If-Unmodified-Since" + }, + "SourceIfMatch":{ + "shape":"RenameSourceIfMatch", + "documentation":"

        Renames the object if the source exists and if its entity tag (ETag) matches the specified ETag.

        ", + "location":"header", + "locationName":"x-amz-rename-source-if-match" + }, + "SourceIfNoneMatch":{ + "shape":"RenameSourceIfNoneMatch", + "documentation":"

        Renames the object if the source exists and if its entity tag (ETag) is different than the specified ETag. If an asterisk (*) character is provided, the operation will fail and return a 412 Precondition Failed error.

        ", + "location":"header", + "locationName":"x-amz-rename-source-if-none-match" + }, + "SourceIfModifiedSince":{ + "shape":"RenameSourceIfModifiedSince", + "documentation":"

        Renames the object if the source exists and if it has been modified since the specified time.

        ", + "location":"header", + "locationName":"x-amz-rename-source-if-modified-since" + }, + "SourceIfUnmodifiedSince":{ + "shape":"RenameSourceIfUnmodifiedSince", + "documentation":"

        Renames the object if the source exists and hasn't been modified since the specified time.

        ", + "location":"header", + "locationName":"x-amz-rename-source-if-unmodified-since" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

        A unique string with a max of 64 ASCII characters in the ASCII range of 33 - 126.

        RenameObject supports idempotency using a client token. To make an idempotent API request using RenameObject, specify a client token in the request. You should not reuse the same client token for other API requests. If you retry a request that completed successfully using the same client token and the same parameters, the retry succeeds without performing any further actions. If you retry a successful request using the same client token, but one or more of the parameters are different, the retry fails and an IdempotentParameterMismatch error is returned.

        ", + "idempotencyToken":true, + "location":"header", + "locationName":"x-amz-client-token" + } + } + }, + "RenameSource":{ + "type":"string", + "pattern":"\\/?.+\\/.+" + }, + "RenameSourceIfMatch":{"type":"string"}, + "RenameSourceIfModifiedSince":{ + "type":"timestamp", + "timestampFormat":"rfc822" + }, + "RenameSourceIfNoneMatch":{"type":"string"}, + "RenameSourceIfUnmodifiedSince":{ + "type":"timestamp", + "timestampFormat":"rfc822" + }, "ReplaceKeyPrefixWith":{"type":"string"}, "ReplaceKeyWith":{"type":"string"}, "ReplicaKmsKeyID":{"type":"string"}, @@ -10609,7 +10702,7 @@ }, "RequestCharged":{ "type":"string", - "documentation":"

        If present, indicates that the requester was successfully charged for the request.

        This functionality is not supported for directory buckets.

        ", + "documentation":"

        If present, indicates that the requester was successfully charged for the request. For more information, see Using Requester Pays buckets for storage transfers and usage in the Amazon Simple Storage Service user guide.

        This functionality is not supported for directory buckets.

        ", "enum":["requester"] }, "RequestPayer":{ @@ -10959,8 +11052,7 @@ }, "SSES3":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Specifies the use of SSE-S3 to encrypt delivered inventory reports.

        ", "locationName":"SSE-S3" }, @@ -11122,6 +11214,7 @@ "type":"string", "enum":[ "AES256", + "aws:fsx", "aws:kms", "aws:kms:dsse" ] @@ -11222,8 +11315,7 @@ }, "SimplePrefix":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        To use simple format for S3 keys for log objects, set SimplePrefix to an empty object.

        [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]

        ", "locationName":"SimplePrefix" }, @@ -11315,7 +11407,8 @@ "OUTPOSTS", "GLACIER_IR", "SNOW", - "EXPRESS_ONEZONE" + "EXPRESS_ONEZONE", + "FSX_OPENZFS" ] }, "StorageClassAnalysis":{ @@ -11472,8 +11565,7 @@ "Token":{"type":"string"}, "TooManyParts":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You have attempted to add more parts than the maximum of 10000 that are allowed for this object. You can use the CopyObject operation to copy this object to another and then add more data to the newly copied object.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -11593,7 +11685,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms).

        ", + "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 or Amazon FSx.

        When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -11760,7 +11852,7 @@ "members":{ "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms).

        ", + "documentation":"

        The server-side encryption algorithm used when you store this object in Amazon S3 or Amazon FSx.

        When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx.

        ", "location":"header", "locationName":"x-amz-server-side-encryption" }, @@ -12208,7 +12300,7 @@ }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", - "documentation":"

        The server-side encryption algorithm used when storing requested object in Amazon S3 (for example, AES256, aws:kms).

        ", + "documentation":"

        The server-side encryption algorithm used when storing requested object in Amazon S3 or Amazon FSx.

        When accessing data stored in Amazon FSx file systems using S3 access points, the only valid server side encryption option is aws:fsx.

        ", "location":"header", "locationName":"x-amz-fwd-header-x-amz-server-side-encryption" }, diff --git a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java index 07b5bf8a9f12..ddd09c1d134f 100644 --- a/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java +++ b/services/s3/src/test/java/software/amazon/awssdk/services/s3/S3PresignerTest.java @@ -56,6 +56,8 @@ import software.amazon.awssdk.services.s3.presigner.S3Presigner; import software.amazon.awssdk.services.s3.presigner.model.PresignedDeleteObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedHeadBucketRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedHeadObjectRequest; import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest; @RunWith(MockitoJUnitRunner.class) @@ -442,6 +444,55 @@ public void deleteObject_Sigv4PresignerHonorsSignatureDuration() { }); } + @Test + public void headObject_compareWithGetObject_sameUrlDifferentMethod() { + PresignedHeadObjectRequest headRequest = + presigner.presignHeadObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .headObjectRequest(ho -> ho.bucket("test-bucket") + .key("test-key"))); + + PresignedGetObjectRequest getRequest = + presigner.presignGetObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .getObjectRequest(go -> go.bucket("test-bucket") + .key("test-key"))); + + String headUrl = headRequest.url().toString(); + String getUrl = getRequest.url().toString(); + + assertThat(headUrl).contains("X-Amz-Algorithm="); + assertThat(getUrl).contains("X-Amz-Algorithm="); + assertThat(headRequest.httpRequest().method().name()).isEqualTo("HEAD"); + assertThat(getRequest.httpRequest().method().name()).isEqualTo("GET"); + } + + @Test + public void headObject_withVersionId_includesVersionIdInQueryString() { + String versionId = "version-12345"; + + PresignedHeadObjectRequest presigned = + presigner.presignHeadObject(r -> r.signatureDuration(Duration.ofMinutes(5)) + .headObjectRequest(ho -> ho.bucket("versioned-bucket") + .key("versioned-object") + .versionId(versionId))); + + assertThat(presigned.url().toString()).contains("versionId=" + versionId); + assertThat(presigned.httpRequest().rawQueryParameters().get("versionId").get(0)).isEqualTo(versionId); + } + + @Test + public void headBucket_withExpectedBucketOwner_includesHeaderInSignature() { + String accountId = "123456789012"; + + PresignedHeadBucketRequest presigned = + presigner.presignHeadBucket(r -> r.signatureDuration(Duration.ofMinutes(5)) + .headBucketRequest(hb -> hb.bucket("owner-bucket") + .expectedBucketOwner(accountId))); + + assertThat(presigned.isBrowserExecutable()).isFalse(); + assertThat(presigned.signedHeaders().keySet()).containsExactlyInAnyOrder("host", "x-amz-expected-bucket-owner"); + assertThat(presigned.signedHeaders().get("x-amz-expected-bucket-owner")).containsExactly(accountId); + } + @Test public void getObject_S3ConfigurationCanBeOverriddenToLeverageTransferAcceleration() { S3Presigner presigner = presignerBuilder().serviceConfiguration(S3Configuration.builder() diff --git a/services/s3control/pom.xml b/services/s3control/pom.xml index a0d810f29da4..1cae5efb9fe5 100644 --- a/services/s3control/pom.xml +++ b/services/s3control/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT s3control AWS Java SDK :: Services :: Amazon S3 Control diff --git a/services/s3control/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/s3control/src/main/resources/codegen-resources/endpoint-rule-set.json index ec5924e2bc86..95118d72f628 100644 --- a/services/s3control/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/s3control/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -468,6 +468,29 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: DualStack and custom endpoint are not supported", + "type": "error" + }, { "conditions": [ { @@ -483,6 +506,42 @@ "error": "S3Express does not support Dual-stack.", "type": "error" }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "endpoint": { + "url": "{url#scheme}://{url#authority}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { @@ -958,6 +1017,80 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: DualStack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "S3Express does not support Dual-stack.", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "endpoint": { + "url": "{url#scheme}://{url#authority}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { diff --git a/services/s3control/src/main/resources/codegen-resources/endpoint-tests.json b/services/s3control/src/main/resources/codegen-resources/endpoint-tests.json index 9fec0db2ac5a..018acbac3b15 100644 --- a/services/s3control/src/main/resources/codegen-resources/endpoint-tests.json +++ b/services/s3control/src/main/resources/codegen-resources/endpoint-tests.json @@ -4528,6 +4528,420 @@ "UseDualStack": false, "UseFIPS": false } + }, + { + "documentation": "Access Point APIs on express bucket routed to custom endpoint if provided", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://my-endpoint.express-control.s3.aws.dev" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev" + }, + "operationName": "CreateAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "Bucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] + } + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev" + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev" + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev" + }, + "operationName": "PutAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157", + "Policy": "my-policy" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev" + }, + "operationName": "GetAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev" + }, + "operationName": "DeleteAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev" + }, + "operationName": "GetAccessPointPolicyStatus", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + } + ], + "params": { + "AccountId": "871317572157", + "AccessPointName": "myaccesspoint--abcd-ab1--xa-s3", + "Endpoint": "https://my-endpoint.express-control.s3.aws.dev", + "Region": "us-east-1", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "Access Point APIs on express bucket routed to custom endpoint if provided for List", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3express", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://my-endpoint.express-control.s3.aws.dev" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev" + }, + "operationName": "ListAccessPointsForDirectoryBuckets", + "operationParams": { + "DirectoryBucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157" + } + } + ], + "params": { + "AccountId": "871317572157", + "Region": "us-east-1", + "UseS3ExpressControlEndpoint": true, + "Endpoint": "https://my-endpoint.express-control.s3.aws.dev", + "RequiresAccountId": true, + "UseDualStack": false, + "UseFIPS": false + } + }, + { + "documentation": "Error on Access Point APIs on express bucket for dual stack", + "expect": { + "error": "S3Express does not support Dual-stack." + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseDualStack": true + }, + "operationName": "CreateAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "Bucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] + } + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseDualStack": true + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseDualStack": true + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseDualStack": true + }, + "operationName": "PutAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157", + "Policy": "my-policy" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseDualStack": true + }, + "operationName": "GetAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseDualStack": true + }, + "operationName": "DeleteAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseDualStack": true + }, + "operationName": "GetAccessPointPolicyStatus", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + } + ], + "params": { + "AccountId": "871317572157", + "AccessPointName": "myaccesspoint--abcd-ab1--xa-s3", + "Region": "us-east-1", + "RequiresAccountId": true, + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "Error Access Point APIs on express bucket for dual stack for List", + "expect": { + "error": "S3Express does not support Dual-stack." + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::UseDualStack": true + }, + "operationName": "ListAccessPointsForDirectoryBuckets", + "operationParams": { + "DirectoryBucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157" + } + } + ], + "params": { + "AccountId": "871317572157", + "Region": "us-east-1", + "UseS3ExpressControlEndpoint": true, + "RequiresAccountId": true, + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "Error on Access Point APIs on express bucket for custom endpoint and dual stack", + "expect": { + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev", + "AWS::UseDualStack": true + }, + "operationName": "CreateAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "Bucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157", + "Scope": { + "Prefixes": [], + "Permissions": [] + } + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev", + "AWS::UseDualStack": true + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev", + "AWS::UseDualStack": true + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev", + "AWS::UseDualStack": true + }, + "operationName": "PutAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157", + "Policy": "my-policy" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev", + "AWS::UseDualStack": true + }, + "operationName": "GetAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev", + "AWS::UseDualStack": true + }, + "operationName": "DeleteAccessPointPolicy", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + }, + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev", + "AWS::UseDualStack": true + }, + "operationName": "GetAccessPointPolicyStatus", + "operationParams": { + "Name": "myaccesspoint--abcd-ab1--xa-s3", + "AccountId": "871317572157" + } + } + ], + "params": { + "AccountId": "871317572157", + "AccessPointName": "myaccesspoint--abcd-ab1--xa-s3", + "Endpoint": "https://my-endpoint.express-control.s3.aws.dev", + "Region": "us-east-1", + "RequiresAccountId": true, + "UseDualStack": true, + "UseFIPS": false + } + }, + { + "documentation": "Error Access Point APIs on express bucket for custom endpoint and dual stack for List", + "expect": { + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "SDK::Endpoint": "https://my-endpoint.express-control.s3.aws.dev", + "AWS::UseDualStack": true + }, + "operationName": "ListAccessPointsForDirectoryBuckets", + "operationParams": { + "DirectoryBucket": "mybucket--abcd-ab1--x-s3", + "AccountId": "871317572157" + } + } + ], + "params": { + "AccountId": "871317572157", + "Region": "us-east-1", + "UseS3ExpressControlEndpoint": true, + "Endpoint": "https://my-endpoint.express-control.s3.aws.dev", + "RequiresAccountId": true, + "UseDualStack": true, + "UseFIPS": false + } } ], "version": "1.0" diff --git a/services/s3control/src/main/resources/codegen-resources/service-2.json b/services/s3control/src/main/resources/codegen-resources/service-2.json index 6c43bdefaa5f..18478313297b 100644 --- a/services/s3control/src/main/resources/codegen-resources/service-2.json +++ b/services/s3control/src/main/resources/codegen-resources/service-2.json @@ -108,7 +108,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"CreateAccessPointResult"}, - "documentation":"

        Creates an access point and associates it to a specified bucket. For more information, see Managing access to shared datasets in general purpose buckets with access points or Managing access to shared datasets in directory buckets with access points in the Amazon S3 User Guide.

        S3 on Outposts only supports VPC-style access points.

        For more information, see Accessing Amazon S3 on Outposts using virtual private cloud (VPC) only access points in the Amazon S3 User Guide.

        All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

        The following actions are related to CreateAccessPoint:

        ", + "documentation":"

        Creates an access point and associates it to a specified bucket. For more information, see Managing access to shared datasets with access points or Managing access to shared datasets in directory buckets with access points in the Amazon S3 User Guide.

        To create an access point and attach it to a volume on an Amazon FSx file system, see CreateAndAttachS3AccessPoint in the Amazon FSx API Reference.

        S3 on Outposts only supports VPC-style access points.

        For more information, see Accessing Amazon S3 on Outposts using virtual private cloud (VPC) only access points in the Amazon S3 User Guide.

        All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

        The following actions are related to CreateAccessPoint:

        ", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1129,7 +1129,7 @@ }, "input":{"shape":"ListAccessPointsRequest"}, "output":{"shape":"ListAccessPointsResult"}, - "documentation":"

        This operation is not supported by directory buckets.

        Returns a list of the access points that are owned by the current account that's associated with the specified bucket. You can retrieve up to 1000 access points per call. If the specified bucket has more than 1,000 access points (or the number specified in maxResults, whichever is less), the response will include a continuation token that you can use to list the additional access points.

        All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

        The following actions are related to ListAccessPoints:

        ", + "documentation":"

        This operation is not supported by directory buckets.

        Returns a list of the access points. You can retrieve up to 1,000 access points per call. If the call returns more than 1,000 access points (or the number specified in maxResults, whichever is less), the response will include a continuation token that you can use to list the additional access points.

        Returns only access points attached to S3 buckets by default. To return all access points specify DataSourceType as ALL.

        All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

        The following actions are related to ListAccessPoints:

        ", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1826,7 +1826,7 @@ "documentation":"

        The virtual private cloud (VPC) configuration for this access point, if one exists.

        This element is empty if this access point is an Amazon S3 on Outposts access point that is used by other Amazon Web Services services.

        " }, "Bucket":{ - "shape":"BucketName", + "shape":"AccessPointBucketName", "documentation":"

        The name of the bucket associated with this access point.

        " }, "AccessPointArn":{ @@ -1840,10 +1840,22 @@ "BucketAccountId":{ "shape":"AccountId", "documentation":"

        The Amazon Web Services account ID associated with the S3 bucket associated with this access point.

        " + }, + "DataSourceId":{ + "shape":"DataSourceId", + "documentation":"

        A unique identifier for the data source of the access point.

        " + }, + "DataSourceType":{ + "shape":"DataSourceType", + "documentation":"

        The type of the data source that the access point is attached to.

        " } }, "documentation":"

        An access point used to access a bucket.

        " }, + "AccessPointBucketName":{ + "type":"string", + "max":255 + }, "AccessPointList":{ "type":"list", "member":{ @@ -2444,7 +2456,7 @@ }, "Name":{ "shape":"AccessPointName", - "documentation":"

        The name you want to assign to this access point.

        For directory buckets, the access point name must consist of a base name that you provide and suffix that includes the ZoneID (Amazon Web Services Availability Zone or Local Zone) of your bucket location, followed by --xa-s3. For more information, see Managing access to shared datasets in directory buckets with access points in the Amazon S3 User Guide.

        ", + "documentation":"

        The name you want to assign to this access point.

        For directory buckets, the access point name must consist of a base name that you provide and suffix that includes the ZoneID (Amazon Web Services Availability Zone or Local Zone) of your bucket location, followed by --xa-s3. For more information, see Managing access to shared datasets in directory buckets with access points in the Amazon S3 User Guide.

        ", "contextParam":{"name":"AccessPointName"}, "location":"uri", "locationName":"name" @@ -2468,7 +2480,7 @@ }, "Scope":{ "shape":"Scope", - "documentation":"

        For directory buckets, you can filter access control to specific prefixes, API operations, or a combination of both. For more information, see Managing access to shared datasets in directory buckets with access points in the Amazon S3 User Guide.

        Scope is not supported for access points for general purpose buckets.

        " + "documentation":"

        For directory buckets, you can filter access control to specific prefixes, API operations, or a combination of both. For more information, see Managing access to shared datasets in directory buckets with access points in the Amazon S3 User Guide.

        Scope is only supported for access points attached to directory buckets.

        " } } }, @@ -2757,6 +2769,11 @@ "documentation":"

        The Amazon Web Services Security Token Service temporary credential that S3 Access Grants vends to grantees and client applications.

        ", "sensitive":true }, + "DataSourceId":{ + "type":"string", + "max":191 + }, + "DataSourceType":{"type":"string"}, "Date":{"type":"timestamp"}, "Days":{"type":"integer"}, "DaysAfterInitiation":{"type":"integer"}, @@ -3944,7 +3961,7 @@ "documentation":"

        The name of the specified access point.

        " }, "Bucket":{ - "shape":"BucketName", + "shape":"AccessPointBucketName", "documentation":"

        The name of the bucket associated with the specified access point.

        " }, "NetworkOrigin":{ @@ -3975,6 +3992,14 @@ "BucketAccountId":{ "shape":"AccountId", "documentation":"

        The Amazon Web Services account ID associated with the S3 bucket associated with this access point.

        " + }, + "DataSourceId":{ + "shape":"DataSourceId", + "documentation":"

        The unique identifier for the data source of the access point.

        " + }, + "DataSourceType":{ + "shape":"DataSourceType", + "documentation":"

        The type of the data source that the access point is attached to.

        " } } }, @@ -5689,6 +5714,18 @@ "documentation":"

        The maximum number of access points that you want to include in the list. If the specified bucket has more than this number of access points, then the response will include a continuation token in the NextToken field that you can use to retrieve the next page of access points.

        ", "location":"querystring", "locationName":"maxResults" + }, + "DataSourceId":{ + "shape":"DataSourceId", + "documentation":"

        The unique identifier for the data source of the access point.

        ", + "location":"querystring", + "locationName":"dataSourceId" + }, + "DataSourceType":{ + "shape":"DataSourceType", + "documentation":"

        The type of the data source that the access point is attached to. Returns only access points attached to S3 buckets by default. To return all access points specify DataSourceType as ALL.

        ", + "location":"querystring", + "locationName":"dataSourceType" } } }, @@ -6803,7 +6840,7 @@ }, "Policy":{ "shape":"Policy", - "documentation":"

        The policy that you want to apply to the specified access point. For more information about access point policies, see Managing access to shared datasets in general purpose buckets with access points or Managing access to shared datasets in directory bucekts with access points in the Amazon S3 User Guide.

        " + "documentation":"

        The policy that you want to apply to the specified access point. For more information about access point policies, see Managing data access with Amazon S3 access points or Managing access to shared datasets in directory buckets with access points in the Amazon S3 User Guide.

        " } } }, @@ -8144,7 +8181,7 @@ "locationName":"Permissions" } }, - "documentation":"

        You can use the access point scope to restrict access to specific prefixes, API operations, or a combination of both.

        For more information, see Manage the scope of your access points for directory buckets.

        " + "documentation":"

        You can use the access point scope to restrict access to specific prefixes, API operations, or a combination of both.

        For more information, see Manage the scope of your access points for directory buckets.

        " }, "ScopePermission":{ "type":"string", diff --git a/services/s3outposts/pom.xml b/services/s3outposts/pom.xml index 1ed4747bf7f2..4105920f2259 100644 --- a/services/s3outposts/pom.xml +++ b/services/s3outposts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT s3outposts AWS Java SDK :: Services :: S3 Outposts diff --git a/services/s3outposts/src/main/resources/codegen-resources/customization.config b/services/s3outposts/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/s3outposts/src/main/resources/codegen-resources/customization.config +++ b/services/s3outposts/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/s3tables/pom.xml b/services/s3tables/pom.xml index a10eeea0c4e7..98d64b6c316a 100644 --- a/services/s3tables/pom.xml +++ b/services/s3tables/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT s3tables AWS Java SDK :: Services :: S3 Tables diff --git a/services/s3tables/src/main/resources/codegen-resources/customization.config b/services/s3tables/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/s3tables/src/main/resources/codegen-resources/customization.config +++ b/services/s3tables/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/s3tables/src/main/resources/codegen-resources/service-2.json b/services/s3tables/src/main/resources/codegen-resources/service-2.json index a60c1404d504..9683f46bab6e 100644 --- a/services/s3tables/src/main/resources/codegen-resources/service-2.json +++ b/services/s3tables/src/main/resources/codegen-resources/service-2.json @@ -49,7 +49,7 @@ {"shape":"ConflictException"}, {"shape":"BadRequestException"} ], - "documentation":"

        Creates a new table associated with the given namespace in a table bucket. For more information, see Creating an Amazon S3 table in the Amazon Simple Storage Service User Guide.

        Permissions
        • You must have the s3tables:CreateTable permission to use this operation.

        • If you use this operation with the optional metadata request parameter you must have the s3tables:PutTableData permission.

        • If you use this operation with the optional encryptionConfiguration request parameter you must have the s3tables:PutTableEncryption permission.

        Additionally,

        " + "documentation":"

        Creates a new table associated with the given namespace in a table bucket. For more information, see Creating an Amazon S3 table in the Amazon Simple Storage Service User Guide.

        Permissions
        • You must have the s3tables:CreateTable permission to use this operation.

        • If you use this operation with the optional metadata request parameter you must have the s3tables:PutTableData permission.

        • If you use this operation with the optional encryptionConfiguration request parameter you must have the s3tables:PutTableEncryption permission.

        Additionally, If you choose SSE-KMS encryption you must grant the S3 Tables maintenance principal access to your KMS key. For more information, see Permissions requirements for S3 Tables SSE-KMS encryption.

        " }, "CreateTableBucket":{ "name":"CreateTableBucket", @@ -208,7 +208,7 @@ "name":"GetTable", "http":{ "method":"GET", - "requestUri":"/tables/{tableBucketARN}/{namespace}/{name}", + "requestUri":"/get-table", "responseCode":200 }, "input":{"shape":"GetTableRequest"}, @@ -337,7 +337,7 @@ {"shape":"ConflictException"}, {"shape":"BadRequestException"} ], - "documentation":"

        Gets details about the maintenance configuration of a table. For more information, see S3 Tables maintenance in the Amazon Simple Storage Service User Guide.

        Permissions

        You must have the s3tables:GetTableMaintenanceConfiguration permission to use this operation.

        " + "documentation":"

        Gets details about the maintenance configuration of a table. For more information, see S3 Tables maintenance in the Amazon Simple Storage Service User Guide.

        Permissions
        • You must have the s3tables:GetTableMaintenanceConfiguration permission to use this operation.

        • You must have the s3tables:GetTableData permission to use set the compaction strategy to sort or zorder.

        " }, "GetTableMaintenanceJobStatus":{ "name":"GetTableMaintenanceJobStatus", @@ -471,7 +471,7 @@ {"shape":"ConflictException"}, {"shape":"BadRequestException"} ], - "documentation":"

        Sets the encryption configuration for a table bucket.

        Permissions

        You must have the s3tables:PutTableBucketEncryption permission to use this operation.

        If you choose SSE-KMS encryption you must grant the S3 Tables maintenance principal access to your KMS key. For more information, see Permissions requirements for S3 Tables SSE-KMS encryption

        ", + "documentation":"

        Sets the encryption configuration for a table bucket.

        Permissions

        You must have the s3tables:PutTableBucketEncryption permission to use this operation.

        If you choose SSE-KMS encryption you must grant the S3 Tables maintenance principal access to your KMS key. For more information, see Permissions requirements for S3 Tables SSE-KMS encryption in the Amazon Simple Storage Service User Guide.

        ", "idempotent":true }, "PutTableBucketMaintenanceConfiguration":{ @@ -1179,7 +1179,7 @@ }, "namespace":{ "shape":"NamespaceName", - "documentation":"

        The name of the namespace the table is associated with.

        </p> 
        ", + "documentation":"

        The name of the namespace the table is associated with.

        ", "location":"uri", "locationName":"namespace" }, @@ -1297,29 +1297,30 @@ }, "GetTableRequest":{ "type":"structure", - "required":[ - "tableBucketARN", - "namespace", - "name" - ], "members":{ "tableBucketARN":{ "shape":"TableBucketARN", "documentation":"

        The Amazon Resource Name (ARN) of the table bucket associated with the table.

        ", - "location":"uri", + "location":"querystring", "locationName":"tableBucketARN" }, "namespace":{ "shape":"NamespaceName", "documentation":"

        The name of the namespace the table is associated with.

        ", - "location":"uri", + "location":"querystring", "locationName":"namespace" }, "name":{ "shape":"TableName", "documentation":"

        The name of the table.

        ", - "location":"uri", + "location":"querystring", "locationName":"name" + }, + "tableArn":{ + "shape":"TableARN", + "documentation":"

        The Amazon Resource Name (ARN) of the table.

        ", + "location":"querystring", + "locationName":"tableArn" } } }, @@ -1412,10 +1413,23 @@ "targetFileSizeMB":{ "shape":"PositiveInteger", "documentation":"

        The target file size for the table in MB.

        " + }, + "strategy":{ + "shape":"IcebergCompactionStrategy", + "documentation":"

        The compaction strategy to use for the table. This determines how files are selected and combined during compaction operations.

        " } }, "documentation":"

        Contains details about the compaction settings for an Iceberg table.

        " }, + "IcebergCompactionStrategy":{ + "type":"string", + "enum":[ + "auto", + "binpack", + "sort", + "z-order" + ] + }, "IcebergMetadata":{ "type":"structure", "required":["schema"], diff --git a/services/sagemaker/pom.xml b/services/sagemaker/pom.xml index 735bb6c418e8..f6a8abd5faa3 100644 --- a/services/sagemaker/pom.xml +++ b/services/sagemaker/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 sagemaker diff --git a/services/sagemaker/src/main/resources/codegen-resources/customization.config b/services/sagemaker/src/main/resources/codegen-resources/customization.config index 88956e25b94a..5d63887a8f3e 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/customization.config +++ b/services/sagemaker/src/main/resources/codegen-resources/customization.config @@ -21,6 +21,5 @@ "union": true } }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/sagemaker/src/main/resources/codegen-resources/service-2.json b/services/sagemaker/src/main/resources/codegen-resources/service-2.json index 46fad457f98a..f08717507018 100644 --- a/services/sagemaker/src/main/resources/codegen-resources/service-2.json +++ b/services/sagemaker/src/main/resources/codegen-resources/service-2.json @@ -2,6 +2,7 @@ "version":"2.0", "metadata":{ "apiVersion":"2017-07-24", + "auth":["aws.auth#sigv4"], "endpointPrefix":"api.sagemaker", "jsonVersion":"1.1", "protocol":"json", @@ -12,8 +13,7 @@ "signatureVersion":"v4", "signingName":"sagemaker", "targetPrefix":"SageMaker", - "uid":"sagemaker-2017-07-24", - "auth":["aws.auth#sigv4"] + "uid":"sagemaker-2017-07-24" }, "operations":{ "AddAssociation":{ @@ -109,8 +109,8 @@ "input":{"shape":"CreateAppRequest"}, "output":{"shape":"CreateAppResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker AI upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously.

        " }, @@ -177,8 +177,8 @@ "input":{"shape":"CreateClusterRequest"}, "output":{"shape":"CreateClusterResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a SageMaker HyperPod cluster. SageMaker HyperPod is a capability of SageMaker for creating and managing persistent clusters for developing large machine learning models, such as large language models (LLMs) and diffusion models. To learn more, see Amazon SageMaker HyperPod in the Amazon SageMaker Developer Guide.

        " }, @@ -191,8 +191,8 @@ "input":{"shape":"CreateClusterSchedulerConfigRequest"}, "output":{"shape":"CreateClusterSchedulerConfigResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Create cluster policy configuration. This policy is used for task prioritization and fair-share allocation of idle compute. This helps prioritize critical workloads and distributes idle compute across entities.

        " }, @@ -229,8 +229,8 @@ "input":{"shape":"CreateComputeQuotaRequest"}, "output":{"shape":"CreateComputeQuotaResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Create compute allocation definition. This defines how compute is allocated, shared, and borrowed for specified entities. Specifically, how to lend and borrow idle compute and assign a fair-share weight to the specified entities.

        " }, @@ -256,8 +256,8 @@ "input":{"shape":"CreateDataQualityJobDefinitionRequest"}, "output":{"shape":"CreateDataQualityJobDefinitionResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a definition for a job that monitors data quality and drift. For information about model monitor, see Amazon SageMaker AI Model Monitor.

        " }, @@ -283,8 +283,8 @@ "input":{"shape":"CreateDomainRequest"}, "output":{"shape":"CreateDomainResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a Domain. A domain consists of an associated Amazon Elastic File System volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other.

        EFS storage

        When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.

        SageMaker AI uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption.

        VPC configuration

        All traffic between the domain and the Amazon EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to the domain. The following options are available:

        • PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon SageMaker AI, which allows internet access. This is the default value.

        • VpcOnly - All traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway.

          When internet access is disabled, you won't be able to run a Amazon SageMaker AI Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker AI API and runtime or a NAT gateway and your security groups allow outbound connections.

        NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker AI Studio app successfully.

        For more information, see Connect Amazon SageMaker AI Studio Notebooks to Resources in a VPC.

        " }, @@ -387,8 +387,8 @@ "input":{"shape":"CreateFlowDefinitionRequest"}, "output":{"shape":"CreateFlowDefinitionResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a flow definition.

        " }, @@ -430,8 +430,8 @@ "input":{"shape":"CreateHumanTaskUiRequest"}, "output":{"shape":"CreateHumanTaskUiResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Defines the settings you will use for the human review workflow user interface. Reviewers will see a three-panel interface with an instruction area, the item to review, and an input area.

        " }, @@ -472,9 +472,9 @@ "input":{"shape":"CreateImageVersionRequest"}, "output":{"shape":"CreateImageVersionResponse"}, "errors":[ + {"shape":"ResourceNotFound"}, {"shape":"ResourceInUse"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a version of the SageMaker AI image specified by ImageName. The version represents the Amazon ECR container image specified by BaseImage.

        " }, @@ -568,8 +568,8 @@ "input":{"shape":"CreateModelBiasJobDefinitionRequest"}, "output":{"shape":"CreateModelBiasJobDefinitionResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates the definition for a model bias job.

        " }, @@ -582,8 +582,8 @@ "input":{"shape":"CreateModelCardRequest"}, "output":{"shape":"CreateModelCardResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates an Amazon SageMaker Model Card.

        For information about how to use model cards, see Amazon SageMaker Model Card.

        " }, @@ -596,9 +596,9 @@ "input":{"shape":"CreateModelCardExportJobRequest"}, "output":{"shape":"CreateModelCardExportJobResponse"}, "errors":[ + {"shape":"ConflictException"}, {"shape":"ResourceNotFound"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates an Amazon SageMaker Model Card export job.

        " }, @@ -611,8 +611,8 @@ "input":{"shape":"CreateModelExplainabilityJobDefinitionRequest"}, "output":{"shape":"CreateModelExplainabilityJobDefinitionResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates the definition for a model explainability job.

        " }, @@ -652,8 +652,8 @@ "input":{"shape":"CreateModelQualityJobDefinitionRequest"}, "output":{"shape":"CreateModelQualityJobDefinitionResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker AI Model Monitor.

        " }, @@ -666,8 +666,8 @@ "input":{"shape":"CreateMonitoringScheduleRequest"}, "output":{"shape":"CreateMonitoringScheduleResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a schedule that regularly starts Amazon SageMaker AI Processing Jobs to monitor the data captured for an Amazon SageMaker AI Endpoint.

        " }, @@ -720,8 +720,8 @@ "input":{"shape":"CreatePartnerAppRequest"}, "output":{"shape":"CreatePartnerAppResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates an Amazon SageMaker Partner AI App.

        " }, @@ -747,9 +747,9 @@ "input":{"shape":"CreatePipelineRequest"}, "output":{"shape":"CreatePipelineResponse"}, "errors":[ + {"shape":"ConflictException"}, {"shape":"ResourceNotFound"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a pipeline using a JSON pipeline definition.

        " }, @@ -798,9 +798,9 @@ "input":{"shape":"CreateProcessingJobRequest"}, "output":{"shape":"CreateProcessingJobResponse"}, "errors":[ + {"shape":"ResourceNotFound"}, {"shape":"ResourceInUse"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a processing job.

        " }, @@ -826,8 +826,8 @@ "input":{"shape":"CreateSpaceRequest"}, "output":{"shape":"CreateSpaceResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a private space or a space used for real time collaboration in a domain.

        " }, @@ -853,9 +853,9 @@ "input":{"shape":"CreateTrainingJobRequest"}, "output":{"shape":"CreateTrainingJobResponse"}, "errors":[ + {"shape":"ResourceNotFound"}, {"shape":"ResourceInUse"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Starts a model training job. After training completes, SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

        If you choose to host your model using SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than SageMaker, provided that you know how to use them for inference.

        In the request body, you provide the following:

        • AlgorithmSpecification - Identifies the training algorithm to use.

        • HyperParameters - Specify these algorithm-specific parameters to enable the estimation of model parameters during training. Hyperparameters can be tuned to optimize this learning process. For a list of hyperparameters for each training algorithm provided by SageMaker, see Algorithms.

          Do not include any security-sensitive information including account access IDs, secrets, or tokens in any hyperparameter fields. As part of the shared responsibility model, you are responsible for any potential exposure, unauthorized access, or compromise of your sensitive data if caused by security-sensitive information included in the request hyperparameter variable or plain text fields.

        • InputDataConfig - Describes the input required by the training job and the Amazon S3, EFS, or FSx location where it is stored.

        • OutputDataConfig - Identifies the Amazon S3 bucket where you want SageMaker to save the results of model training.

        • ResourceConfig - Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.

        • EnableManagedSpotTraining - Optimize the cost of training machine learning models by up to 80% by using Amazon EC2 Spot instances. For more information, see Managed Spot Training.

        • RoleArn - The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that SageMaker can successfully complete model training.

        • StoppingCondition - To help cap training costs, use MaxRuntimeInSeconds to set a time limit for training. Use MaxWaitTimeInSeconds to specify how long a managed spot training job has to complete.

        • Environment - The environment variables to set in the Docker container.

          Do not include any security-sensitive information including account access IDs, secrets, or tokens in any environment fields. As part of the shared responsibility model, you are responsible for any potential exposure, unauthorized access, or compromise of your sensitive data if caused by security-sensitive information included in the request environment variable or plain text fields.

        • RetryStrategy - The number of times to retry the job when the job fails due to an InternalServerError.

        For more information about SageMaker, see How It Works.

        " }, @@ -868,9 +868,9 @@ "input":{"shape":"CreateTrainingPlanRequest"}, "output":{"shape":"CreateTrainingPlanResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceNotFound"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a new training plan in SageMaker to reserve compute capacity.

        Amazon SageMaker Training Plan is a capability within SageMaker that allows customers to reserve and manage GPU capacity for large-scale AI model training. It provides a way to secure predictable access to computational resources within specific timelines and budgets, without the need to manage underlying infrastructure.

        How it works

        Plans can be created for specific resources such as SageMaker Training Jobs or SageMaker HyperPod clusters, automatically provisioning resources, setting up infrastructure, executing workloads, and handling infrastructure failures.

        Plan creation workflow

        • Users search for available plan offerings based on their requirements (e.g., instance type, count, start time, duration) using the SearchTrainingPlanOfferings API operation.

        • They create a plan that best matches their needs using the ID of the plan offering they want to use.

        • After successful upfront payment, the plan's status becomes Scheduled.

        • The plan can be used to:

          • Queue training jobs.

          • Allocate to an instance group of a SageMaker HyperPod cluster.

        • When the plan start date arrives, it becomes Active. Based on available reserved capacity:

          • Training jobs are launched.

          • Instance groups are provisioned.

        Plan composition

        A plan can consist of one or more Reserved Capacities, each defined by a specific instance type, quantity, Availability Zone, duration, and start and end times. For more information about Reserved Capacity, see ReservedCapacitySummary .

        " }, @@ -883,9 +883,9 @@ "input":{"shape":"CreateTransformJobRequest"}, "output":{"shape":"CreateTransformJobResponse"}, "errors":[ + {"shape":"ResourceNotFound"}, {"shape":"ResourceInUse"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.

        To perform batch transformations, you create a transform job and use the data that you have readily available.

        In the request body, you provide the following:

        • TransformJobName - Identifies the transform job. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account.

        • ModelName - Identifies the model to use. ModelName must be the name of an existing Amazon SageMaker model in the same Amazon Web Services Region and Amazon Web Services account. For information on creating a model, see CreateModel.

        • TransformInput - Describes the dataset to be transformed and the Amazon S3 location where it is stored.

        • TransformOutput - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.

        • TransformResources - Identifies the ML compute instances and AMI image versions for the transform job.

        For more information about how batch transformation works, see Batch Transform.

        " }, @@ -925,8 +925,8 @@ "input":{"shape":"CreateUserProfileRequest"}, "output":{"shape":"CreateUserProfileResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceInUse"} + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Creates a user profile. A user profile represents a single user within a domain, and is the main way to reference a \"person\" for the purposes of sharing, reporting, and other user-oriented features. This entity is created when a user onboards to a domain. If an administrator invites a person by email or imports them from IAM Identity Center, a user profile is automatically created. A user profile is the primary holder of settings for an individual user and has a reference to the user's private Amazon Elastic File System home directory.

        " }, @@ -987,8 +987,8 @@ }, "input":{"shape":"DeleteAppRequest"}, "errors":[ - {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"} ], "documentation":"

        Used to stop and delete an app.

        " }, @@ -1039,8 +1039,8 @@ "input":{"shape":"DeleteClusterRequest"}, "output":{"shape":"DeleteClusterResponse"}, "errors":[ - {"shape":"ResourceNotFound"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} ], "documentation":"

        Delete a SageMaker HyperPod cluster.

        " }, @@ -1134,8 +1134,8 @@ }, "input":{"shape":"DeleteDomainRequest"}, "errors":[ - {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"} ], "documentation":"

        Used to delete a domain. If you onboarded with IAM mode, you will need to delete your domain to onboard again using IAM Identity Center. Use with caution. All of the members of the domain will lose access to their EFS volume, including data, notebooks, and other artifacts.

        " }, @@ -1215,8 +1215,8 @@ "input":{"shape":"DeleteFlowDefinitionRequest"}, "output":{"shape":"DeleteFlowDefinitionResponse"}, "errors":[ - {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"} ], "documentation":"

        Deletes the specified flow definition.

        " }, @@ -1228,8 +1228,8 @@ }, "input":{"shape":"DeleteHubRequest"}, "errors":[ - {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"} ], "documentation":"

        Delete a hub.

        " }, @@ -1241,8 +1241,8 @@ }, "input":{"shape":"DeleteHubContentRequest"}, "errors":[ - {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"} ], "documentation":"

        Delete the contents of a hub.

        " }, @@ -1289,8 +1289,8 @@ "input":{"shape":"DeleteImageRequest"}, "output":{"shape":"DeleteImageResponse"}, "errors":[ - {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"} ], "documentation":"

        Deletes a SageMaker AI image and all versions of the image. The container images aren't deleted.

        " }, @@ -1303,8 +1303,8 @@ "input":{"shape":"DeleteImageVersionRequest"}, "output":{"shape":"DeleteImageVersionResponse"}, "errors":[ - {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"} ], "documentation":"

        Deletes a version of a SageMaker AI image. The container image the version represents isn't deleted.

        " }, @@ -1373,8 +1373,8 @@ }, "input":{"shape":"DeleteModelCardRequest"}, "errors":[ - {"shape":"ResourceNotFound"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} ], "documentation":"

        Deletes an Amazon SageMaker Model Card.

        " }, @@ -1486,8 +1486,8 @@ "input":{"shape":"DeletePartnerAppRequest"}, "output":{"shape":"DeletePartnerAppResponse"}, "errors":[ - {"shape":"ResourceNotFound"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} ], "documentation":"

        Deletes a SageMaker Partner AI App.

        " }, @@ -1500,8 +1500,8 @@ "input":{"shape":"DeletePipelineRequest"}, "output":{"shape":"DeletePipelineResponse"}, "errors":[ - {"shape":"ResourceNotFound"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} ], "documentation":"

        Deletes a pipeline if there are no running instances of the pipeline. To delete a pipeline, you must stop all running instances of the pipeline using the StopPipelineExecution API. When you delete a pipeline, all instances of the pipeline are deleted.

        " }, @@ -1525,8 +1525,8 @@ }, "input":{"shape":"DeleteSpaceRequest"}, "errors":[ - {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"} ], "documentation":"

        Used to delete a space.

        " }, @@ -1587,8 +1587,8 @@ }, "input":{"shape":"DeleteUserProfileRequest"}, "errors":[ - {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"} ], "documentation":"

        Deletes a user profile. When a user profile is deleted, the user loses access to their EFS volume, including data, notebooks, and other artifacts.

        " }, @@ -2562,9 +2562,9 @@ "input":{"shape":"ImportHubContentRequest"}, "output":{"shape":"ImportHubContentResponse"}, "errors":[ + {"shape":"ResourceNotFound"}, {"shape":"ResourceInUse"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Import hub content.

        " }, @@ -3504,9 +3504,9 @@ "input":{"shape":"RetryPipelineExecutionRequest"}, "output":{"shape":"RetryPipelineExecutionResponse"}, "errors":[ + {"shape":"ConflictException"}, {"shape":"ResourceNotFound"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Retry the execution of the pipeline.

        " }, @@ -3542,9 +3542,9 @@ "input":{"shape":"SendPipelineExecutionStepFailureRequest"}, "output":{"shape":"SendPipelineExecutionStepFailureResponse"}, "errors":[ + {"shape":"ConflictException"}, {"shape":"ResourceNotFound"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Notifies the pipeline that the execution of a callback step failed, along with a message describing why. When a callback step is run, the pipeline generates a callback token and includes the token in a message sent to Amazon Simple Queue Service (Amazon SQS).

        " }, @@ -3557,9 +3557,9 @@ "input":{"shape":"SendPipelineExecutionStepSuccessRequest"}, "output":{"shape":"SendPipelineExecutionStepSuccessResponse"}, "errors":[ + {"shape":"ConflictException"}, {"shape":"ResourceNotFound"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Notifies the pipeline that the execution of a callback step succeeded and provides a list of the step's output parameters. When a callback step is run, the pipeline generates a callback token and includes the token in a message sent to Amazon Simple Queue Service (Amazon SQS).

        " }, @@ -3595,8 +3595,8 @@ "input":{"shape":"StartMlflowTrackingServerRequest"}, "output":{"shape":"StartMlflowTrackingServerResponse"}, "errors":[ - {"shape":"ResourceNotFound"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} ], "documentation":"

        Programmatically start an MLflow Tracking Server.

        " }, @@ -3633,9 +3633,9 @@ "input":{"shape":"StartPipelineExecutionRequest"}, "output":{"shape":"StartPipelineExecutionResponse"}, "errors":[ + {"shape":"ConflictException"}, {"shape":"ResourceNotFound"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Starts a pipeline execution.

        " }, @@ -3740,8 +3740,8 @@ "input":{"shape":"StopMlflowTrackingServerRequest"}, "output":{"shape":"StopMlflowTrackingServerResponse"}, "errors":[ - {"shape":"ResourceNotFound"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} ], "documentation":"

        Programmatically stop an MLflow Tracking Server.

        " }, @@ -3787,8 +3787,8 @@ "input":{"shape":"StopPipelineExecutionRequest"}, "output":{"shape":"StopPipelineExecutionResponse"}, "errors":[ - {"shape":"ResourceNotFound"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} ], "documentation":"

        Stops a pipeline execution.

        Callback Step

        A pipeline execution won't stop while a callback step is running. When you call StopPipelineExecution on a pipeline execution with a running callback step, SageMaker Pipelines sends an additional Amazon SQS message to the specified SQS queue. The body of the SQS message contains a \"Status\" field which is set to \"Stopping\".

        You should add logic to your Amazon SQS message consumer to take any needed action (for example, resource cleanup) upon receipt of the message followed by a call to SendPipelineExecutionStepSuccess or SendPipelineExecutionStepFailure.

        Only when SageMaker Pipelines receives one of these calls will it stop the pipeline execution.

        Lambda Step

        A pipeline execution can't be stopped while a lambda step is running because the Lambda function invoked by the lambda step can't be stopped. If you attempt to stop the execution while the Lambda function is running, the pipeline waits for the Lambda function to finish or until the timeout is hit, whichever occurs first, and then stops. If the Lambda function finishes, the pipeline execution status is Stopped. If the timeout is hit the pipeline execution status is Failed.

        " }, @@ -3878,9 +3878,9 @@ "input":{"shape":"UpdateClusterRequest"}, "output":{"shape":"UpdateClusterResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFound"}, - {"shape":"ConflictException"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Updates a SageMaker HyperPod cluster.

        " }, @@ -3893,9 +3893,9 @@ "input":{"shape":"UpdateClusterSchedulerConfigRequest"}, "output":{"shape":"UpdateClusterSchedulerConfigResponse"}, "errors":[ + {"shape":"ConflictException"}, {"shape":"ResourceNotFound"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Update the cluster policy configuration.

        " }, @@ -3908,8 +3908,8 @@ "input":{"shape":"UpdateClusterSoftwareRequest"}, "output":{"shape":"UpdateClusterSoftwareResponse"}, "errors":[ - {"shape":"ResourceNotFound"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} ], "documentation":"

        Updates the platform software of a SageMaker HyperPod cluster for security patching. To learn how to use this API, see Update the SageMaker HyperPod platform software of a cluster.

        The UpgradeClusterSoftware API call may impact your SageMaker HyperPod cluster uptime and availability. Plan accordingly to mitigate potential disruptions to your workloads.

        " }, @@ -3935,9 +3935,9 @@ "input":{"shape":"UpdateComputeQuotaRequest"}, "output":{"shape":"UpdateComputeQuotaResponse"}, "errors":[ + {"shape":"ConflictException"}, {"shape":"ResourceNotFound"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Update the compute allocation definition.

        " }, @@ -3985,9 +3985,9 @@ "input":{"shape":"UpdateDomainRequest"}, "output":{"shape":"UpdateDomainResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, + {"shape":"ResourceNotFound"}, {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Updates the default settings for new user profiles in the domain.

        " }, @@ -4107,8 +4107,8 @@ "input":{"shape":"UpdateImageRequest"}, "output":{"shape":"UpdateImageResponse"}, "errors":[ - {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"} ], "documentation":"

        Updates the properties of a SageMaker AI image. To change the image's tags, use the AddTags and DeleteTags APIs.

        " }, @@ -4121,8 +4121,8 @@ "input":{"shape":"UpdateImageVersionRequest"}, "output":{"shape":"UpdateImageVersionResponse"}, "errors":[ - {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceInUse"} ], "documentation":"

        Updates the properties of a SageMaker AI image version.

        " }, @@ -4175,9 +4175,9 @@ "input":{"shape":"UpdateMlflowTrackingServerRequest"}, "output":{"shape":"UpdateMlflowTrackingServerResponse"}, "errors":[ + {"shape":"ConflictException"}, {"shape":"ResourceNotFound"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Updates properties of an existing MLflow Tracking Server.

        " }, @@ -4190,9 +4190,9 @@ "input":{"shape":"UpdateModelCardRequest"}, "output":{"shape":"UpdateModelCardResponse"}, "errors":[ + {"shape":"ConflictException"}, {"shape":"ResourceNotFound"}, - {"shape":"ResourceLimitExceeded"}, - {"shape":"ConflictException"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Update an Amazon SageMaker Model Card.

        You cannot update both model card content and model card status in a single call.

        " }, @@ -4218,8 +4218,8 @@ "input":{"shape":"UpdateMonitoringAlertRequest"}, "output":{"shape":"UpdateMonitoringAlertResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Update the parameters of a model monitor alert.

        " }, @@ -4232,8 +4232,8 @@ "input":{"shape":"UpdateMonitoringScheduleRequest"}, "output":{"shape":"UpdateMonitoringScheduleResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceNotFound"}, + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Updates a previously created schedule.

        " }, @@ -4272,8 +4272,8 @@ "input":{"shape":"UpdatePartnerAppRequest"}, "output":{"shape":"UpdatePartnerAppResponse"}, "errors":[ - {"shape":"ResourceNotFound"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} ], "documentation":"

        Updates all of the SageMaker Partner AI Apps in an account.

        " }, @@ -4286,8 +4286,8 @@ "input":{"shape":"UpdatePipelineRequest"}, "output":{"shape":"UpdatePipelineResponse"}, "errors":[ - {"shape":"ResourceNotFound"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} ], "documentation":"

        Updates a pipeline.

        " }, @@ -4300,8 +4300,8 @@ "input":{"shape":"UpdatePipelineExecutionRequest"}, "output":{"shape":"UpdatePipelineExecutionResponse"}, "errors":[ - {"shape":"ResourceNotFound"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} ], "documentation":"

        Updates a pipeline execution.

        " }, @@ -4327,9 +4327,9 @@ "input":{"shape":"UpdateSpaceRequest"}, "output":{"shape":"UpdateSpaceResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, + {"shape":"ResourceNotFound"}, {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Updates the settings of a space.

        You can't edit the app type of a space in the SpaceSettings.

        " }, @@ -4384,9 +4384,9 @@ "input":{"shape":"UpdateUserProfileRequest"}, "output":{"shape":"UpdateUserProfileResponse"}, "errors":[ - {"shape":"ResourceLimitExceeded"}, + {"shape":"ResourceNotFound"}, {"shape":"ResourceInUse"}, - {"shape":"ResourceNotFound"} + {"shape":"ResourceLimitExceeded"} ], "documentation":"

        Updates a user profile.

        " }, @@ -4421,6 +4421,7 @@ "Accept":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "AcceptEula":{"type":"boolean"}, @@ -4428,11 +4429,12 @@ "type":"string", "max":12, "min":12, - "pattern":"^\\d+$" + "pattern":"\\d+" }, "ActionArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:action/.*" }, "ActionSource":{ @@ -4573,7 +4575,8 @@ "AdditionalCodeRepositoryNamesOrUrls":{ "type":"list", "member":{"shape":"CodeRepositoryNameOrUrl"}, - "max":3 + "max":3, + "min":0 }, "AdditionalInferenceSpecificationDefinition":{ "type":"structure", @@ -4692,7 +4695,8 @@ }, "AgentCount":{ "shape":"Long", - "documentation":"

        The number of Edge Manager agents.

        " + "documentation":"

        The number of Edge Manager agents.

        ", + "box":true } }, "documentation":"

        Edge Manager agent version.

        " @@ -4749,17 +4753,18 @@ "type":"string", "max":255, "min":1, - "pattern":"^(?!\\s*$).+" + "pattern":"(?!\\s*$).+" }, "AlgorithmArn":{ "type":"string", "max":2048, "min":1, - "pattern":"^arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:algorithm/[\\S]{1,2048}$" + "pattern":"arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:algorithm/[\\S]{1,2048}" }, "AlgorithmImage":{ "type":"string", "max":255, + "min":0, "pattern":".*" }, "AlgorithmSortBy":{ @@ -4788,7 +4793,8 @@ }, "EnableSageMakerMetricsTimeSeries":{ "shape":"Boolean", - "documentation":"

        To generate and save time-series metrics during training, set to true. The default is false and time-series metrics aren't generated except in the following cases:

        " + "documentation":"

        To generate and save time-series metrics during training, set to true. The default is false and time-series metrics aren't generated except in the following cases:

        ", + "box":true }, "ContainerEntrypoint":{ "shape":"TrainingContainerEntrypoint", @@ -4965,6 +4971,7 @@ "AppArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:app/.*" }, "AppDetails":{ @@ -5005,6 +5012,7 @@ "AppImageConfigArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:app-image-config/.*" }, "AppImageConfigDetails":{ @@ -5048,7 +5056,8 @@ "AppImageConfigName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "AppImageConfigSortKey":{ "type":"string", @@ -5243,7 +5252,8 @@ "AppName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "AppNetworkAccessType":{ "type":"string", @@ -5309,32 +5319,36 @@ "ApprovalDescription":{ "type":"string", "max":1024, + "min":0, "pattern":".*" }, "ArnOrName":{ "type":"string", "max":170, "min":1, - "pattern":"(arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:[a-z\\-]*\\/)?([a-zA-Z0-9]([a-zA-Z0-9-]){0,62})(?The configuration for using EMR Serverless to run the AutoML job V2.

        To allow your AutoML job V2 to automatically initiate a remote job on EMR Serverless when additional compute resources are needed to process large datasets, you need to provide an EmrServerlessComputeConfig object, which includes an ExecutionRoleARN attribute, to the AutoMLComputeConfig of the AutoML job V2 input request.

        By seamlessly transitioning to EMR Serverless when required, the AutoML job can handle datasets that would otherwise exceed the initially provisioned resources, without any manual intervention from you.

        EMR Serverless is available for the tabular and time series problem types. We recommend setting up this option for tabular datasets larger than 5 GB and time series datasets larger than 30 GB.

        " } }, - "documentation":"

        This data type is intended for use exclusively by SageMaker Canvas and cannot be used in other contexts at the moment.

        Specifies the compute configuration for an AutoML job V2.

        " + "documentation":"

        This data type is intended for use exclusively by SageMaker Canvas and cannot be used in other contexts at the moment.

        Specifies the compute configuration for an AutoML job V2.

        " }, "AutoMLContainerDefinition":{ "type":"structure", @@ -5919,7 +5941,8 @@ "AutoMLContainerDefinitions":{ "type":"list", "member":{"shape":"AutoMLContainerDefinition"}, - "max":5 + "max":5, + "min":0 }, "AutoMLDataSource":{ "type":"structure", @@ -5944,7 +5967,8 @@ }, "AutoMLFailureReason":{ "type":"string", - "max":1024 + "max":1024, + "min":0 }, "AutoMLInferenceContainerDefinitions":{ "type":"map", @@ -5957,7 +5981,8 @@ "documentation":"

        Information about the recommended inference container definitions.

        " }, "documentation":"

        The mapping of all supported processing unit (CPU, GPU, etc...) to inference container definitions for the candidate. This field is populated for the V2 API only (for example, for jobs created by calling CreateAutoMLJobV2).

        ", - "max":2 + "max":2, + "min":0 }, "AutoMLInputDataConfig":{ "type":"list", @@ -6061,7 +6086,7 @@ "type":"string", "max":32, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,31}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,31}" }, "AutoMLJobObjective":{ "type":"structure", @@ -6253,6 +6278,7 @@ "AutoMLNameContains":{ "type":"string", "max":63, + "min":0, "pattern":"[a-zA-Z0-9\\-]+" }, "AutoMLOutputDataConfig":{ @@ -6392,7 +6418,8 @@ }, "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", - "documentation":"

        Whether to use traffic encryption between the container layers.

        " + "documentation":"

        Whether to use traffic encryption between the container layers.

        ", + "box":true }, "VpcConfig":{ "shape":"VpcConfig", @@ -6487,6 +6514,7 @@ }, "AvailableInstanceCount":{ "type":"integer", + "box":true, "min":0 }, "AwsManagedHumanLoopRequestSource":{ @@ -6504,7 +6532,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, "BatchDataCaptureConfig":{ "type":"structure", @@ -6520,7 +6548,8 @@ }, "GenerateInferenceId":{ "shape":"Boolean", - "documentation":"

        Flag that indicates whether to append inference id to the output.

        " + "documentation":"

        Flag that indicates whether to append inference id to the output.

        ", + "box":true } }, "documentation":"

        Configuration to control how SageMaker captures inference data for batch transform jobs.

        " @@ -6776,11 +6805,13 @@ }, "BillableTimeInSeconds":{ "type":"integer", + "box":true, "min":1 }, "BlockedReason":{ "type":"string", - "max":1024 + "max":1024, + "min":0 }, "BlueGreenUpdatePolicy":{ "type":"structure", @@ -6811,6 +6842,7 @@ }, "BorrowLimit":{ "type":"integer", + "box":true, "max":500, "min":1 }, @@ -6858,7 +6890,7 @@ "type":"string", "max":10, "min":10, - "pattern":"^[a-zA-Z0-9]+$" + "pattern":"[a-zA-Z0-9]+" }, "CandidateArtifactLocations":{ "type":"structure", @@ -6991,6 +7023,10 @@ }, "documentation":"

        The SageMaker Canvas application settings.

        " }, + "CapacityReservationPreference":{ + "type":"string", + "enum":["capacity-reservations-only"] + }, "CapacitySize":{ "type":"structure", "required":[ @@ -7036,10 +7072,12 @@ }, "CapacitySizeValue":{ "type":"integer", + "box":true, "min":1 }, "CapacityUnit":{ "type":"integer", + "box":true, "max":10000000, "min":0 }, @@ -7166,6 +7204,208 @@ "min":0 }, "CertifyForMarketplace":{"type":"boolean"}, + "CfnCreateTemplateProvider":{ + "type":"structure", + "required":[ + "TemplateName", + "TemplateURL" + ], + "members":{ + "TemplateName":{ + "shape":"CfnTemplateName", + "documentation":"

        A unique identifier for the template within the project.

        " + }, + "TemplateURL":{ + "shape":"CfnTemplateURL", + "documentation":"

        The Amazon S3 URL of the CloudFormation template.

        " + }, + "RoleARN":{ + "shape":"RoleArn", + "documentation":"

        The IAM role that CloudFormation assumes when creating the stack.

        " + }, + "Parameters":{ + "shape":"CfnStackCreateParameters", + "documentation":"

        An array of CloudFormation stack parameters.

        " + } + }, + "documentation":"

        The CloudFormation template provider configuration for creating infrastructure resources.

        " + }, + "CfnStackCreateParameter":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"CfnStackParameterKey", + "documentation":"

        The name of the CloudFormation parameter.

        " + }, + "Value":{ + "shape":"CfnStackParameterValue", + "documentation":"

        The value of the CloudFormation parameter.

        " + } + }, + "documentation":"

        A key-value pair that represents a parameter for the CloudFormation stack.

        " + }, + "CfnStackCreateParameters":{ + "type":"list", + "member":{"shape":"CfnStackCreateParameter"}, + "max":180, + "min":0 + }, + "CfnStackDetail":{ + "type":"structure", + "required":["StatusMessage"], + "members":{ + "Name":{ + "shape":"CfnStackName", + "documentation":"

        The name of the CloudFormation stack.

        " + }, + "Id":{ + "shape":"CfnStackId", + "documentation":"

        The unique identifier of the CloudFormation stack.

        " + }, + "StatusMessage":{ + "shape":"CfnStackStatusMessage", + "documentation":"

        A human-readable message about the stack's current status.

        " + } + }, + "documentation":"

        Details about the CloudFormation stack.

        " + }, + "CfnStackId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"(?=.{1,256}$)arn:aws[a-z\\-]*:cloudformation:[a-z0-9\\-]*:[0-9]{12}:stack/[a-zA-Z][a-zA-Z0-9-]{0,127}/.*" + }, + "CfnStackName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[A-Za-z][A-Za-z0-9-]{0,127}" + }, + "CfnStackParameter":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"CfnStackParameterKey", + "documentation":"

        The name of the CloudFormation parameter.

        " + }, + "Value":{ + "shape":"CfnStackParameterValue", + "documentation":"

        The value of the CloudFormation parameter.

        " + } + }, + "documentation":"

        A key-value pair representing a parameter used in the CloudFormation stack.

        " + }, + "CfnStackParameterKey":{ + "type":"string", + "max":255, + "min":1, + "pattern":".{1,255}" + }, + "CfnStackParameterValue":{ + "type":"string", + "max":4096, + "min":0, + "pattern":".{0,4096}" + }, + "CfnStackParameters":{ + "type":"list", + "member":{"shape":"CfnStackParameter"}, + "max":180, + "min":0 + }, + "CfnStackStatusMessage":{ + "type":"string", + "max":4096, + "min":1, + "pattern":".{1,4096}" + }, + "CfnStackUpdateParameter":{ + "type":"structure", + "required":["Key"], + "members":{ + "Key":{ + "shape":"CfnStackParameterKey", + "documentation":"

        The name of the CloudFormation parameter.

        " + }, + "Value":{ + "shape":"CfnStackParameterValue", + "documentation":"

        The value of the CloudFormation parameter.

        " + } + }, + "documentation":"

        A key-value pair representing a parameter used in the CloudFormation stack.

        " + }, + "CfnStackUpdateParameters":{ + "type":"list", + "member":{"shape":"CfnStackUpdateParameter"}, + "max":180, + "min":0 + }, + "CfnTemplateName":{ + "type":"string", + "max":32, + "min":1, + "pattern":"(?=.{1,32}$)[a-zA-Z0-9](-*[a-zA-Z0-9])*" + }, + "CfnTemplateProviderDetail":{ + "type":"structure", + "required":[ + "TemplateName", + "TemplateURL" + ], + "members":{ + "TemplateName":{ + "shape":"CfnTemplateName", + "documentation":"

        The unique identifier of the template within the project.

        " + }, + "TemplateURL":{ + "shape":"CfnTemplateURL", + "documentation":"

        The Amazon S3 URL of the CloudFormation template.

        " + }, + "RoleARN":{ + "shape":"RoleArn", + "documentation":"

        The IAM role used by CloudFormation to create the stack.

        " + }, + "Parameters":{ + "shape":"CfnStackParameters", + "documentation":"

        An array of CloudFormation stack parameters.

        " + }, + "StackDetail":{ + "shape":"CfnStackDetail", + "documentation":"

        Information about the CloudFormation stack created by the template provider.

        " + } + }, + "documentation":"

        Details about a CloudFormation template provider configuration and associated provisioning information.

        " + }, + "CfnTemplateURL":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"(?=.{1,1024}$)(https)://([^/]+)/(.+)" + }, + "CfnUpdateTemplateProvider":{ + "type":"structure", + "required":[ + "TemplateName", + "TemplateURL" + ], + "members":{ + "TemplateName":{ + "shape":"CfnTemplateName", + "documentation":"

        The unique identifier of the template to update within the project.

        " + }, + "TemplateURL":{ + "shape":"CfnTemplateURL", + "documentation":"

        The Amazon S3 URL of the CloudFormation template.

        " + }, + "Parameters":{ + "shape":"CfnStackUpdateParameters", + "documentation":"

        An array of CloudFormation stack parameters.

        " + } + }, + "documentation":"

        Contains configuration details for updating an existing CloudFormation template provider in the project.

        " + }, "Channel":{ "type":"structure", "required":[ @@ -7228,7 +7468,8 @@ }, "IsRequired":{ "shape":"Boolean", - "documentation":"

        Indicates whether the channel is required by the algorithm.

        " + "documentation":"

        Indicates whether the channel is required by the algorithm.

        ", + "box":true }, "SupportedContentTypes":{ "shape":"ContentTypes", @@ -7305,11 +7546,13 @@ }, "SkipCheck":{ "shape":"Boolean", - "documentation":"

        This flag indicates if the drift check against the previous baseline will be skipped or not. If it is set to False, the previous baseline of the configured check type must be available.

        " + "documentation":"

        This flag indicates if the drift check against the previous baseline will be skipped or not. If it is set to False, the previous baseline of the configured check type must be available.

        ", + "box":true }, "RegisterNewBaseline":{ "shape":"Boolean", - "documentation":"

        This flag indicates if a newly calculated baseline can be accessed through step properties BaselineUsedForDriftCheckConstraints and BaselineUsedForDriftCheckStatistics. If it is set to False, the previous baseline of the configured check type must also be available. These can be accessed through the BaselineUsedForDriftCheckConstraints property.

        " + "documentation":"

        This flag indicates if a newly calculated baseline can be accessed through step properties BaselineUsedForDriftCheckConstraints and BaselineUsedForDriftCheckStatistics. If it is set to False, the previous baseline of the configured check type must also be available. These can be accessed through the BaselineUsedForDriftCheckConstraints property.

        ", + "box":true } }, "documentation":"

        The container for the metadata for the ClarifyCheck step. For more information, see the topic on ClarifyCheck step in the Amazon SageMaker Developer Guide.

        " @@ -7441,21 +7684,25 @@ }, "ClarifyLabelIndex":{ "type":"integer", + "box":true, "min":0 }, "ClarifyMaxPayloadInMB":{ "type":"integer", + "box":true, "max":25, "min":1 }, "ClarifyMaxRecordCount":{ "type":"integer", + "box":true, "min":1 }, "ClarifyMimeType":{ "type":"string", "max":255, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*\\/[a-zA-Z0-9](-*[a-zA-Z0-9+.])*" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*\\/[a-zA-Z0-9](-*[a-zA-Z0-9+.])*" }, "ClarifyProbabilityAttribute":{ "type":"string", @@ -7465,6 +7712,7 @@ }, "ClarifyProbabilityIndex":{ "type":"integer", + "box":true, "min":0 }, "ClarifyShapBaseline":{ @@ -7520,10 +7768,17 @@ }, "ClarifyShapNumberOfSamples":{ "type":"integer", + "box":true, "min":1 }, - "ClarifyShapSeed":{"type":"integer"}, - "ClarifyShapUseLogit":{"type":"boolean"}, + "ClarifyShapSeed":{ + "type":"integer", + "box":true + }, + "ClarifyShapUseLogit":{ + "type":"boolean", + "box":true + }, "ClarifyTextConfig":{ "type":"structure", "required":[ @@ -7632,20 +7887,21 @@ "type":"string", "max":36, "min":1, - "pattern":"^[a-zA-Z0-9-]+$" + "pattern":"[a-zA-Z0-9-]+" }, "ClusterArn":{ "type":"string", "max":256, - "pattern":"^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:cluster/[a-z0-9]{12}$" + "min":0, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:cluster/[a-z0-9]{12}" }, "ClusterAvailabilityZone":{ "type":"string", - "pattern":"^[a-z]{2}-[a-z]+-\\d[a-z]$" + "pattern":"[a-z]{2}-[a-z]+-\\d[a-z]" }, "ClusterAvailabilityZoneId":{ "type":"string", - "pattern":"^[a-z]{3}\\d-az\\d$" + "pattern":"[a-z]{3}\\d-az\\d" }, "ClusterEbsVolumeConfig":{ "type":"structure", @@ -7660,11 +7916,13 @@ }, "ClusterEbsVolumeSizeInGB":{ "type":"integer", + "box":true, "max":16384, "min":1 }, "ClusterInstanceCount":{ "type":"integer", + "box":true, "max":6758, "min":0 }, @@ -7738,7 +7996,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, "ClusterInstanceGroupSpecification":{ "type":"structure", @@ -7863,7 +8121,8 @@ "ClusterInstanceStorageConfigs":{ "type":"list", "member":{"shape":"ClusterInstanceStorageConfig"}, - "max":1 + "max":1, + "min":0 }, "ClusterInstanceType":{ "type":"string", @@ -7926,6 +8185,7 @@ "ml.g6e.48xlarge", "ml.p5e.48xlarge", "ml.p5en.48xlarge", + "ml.p6-b200.48xlarge", "ml.trn2.48xlarge", "ml.c6i.large", "ml.c6i.xlarge", @@ -8003,18 +8263,19 @@ "type":"string", "max":128, "min":1, - "pattern":"^[\\S\\s]+$" + "pattern":"[\\S\\s]+" }, "ClusterName":{ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, "ClusterNameOrArn":{ "type":"string", "max":256, - "pattern":"^(arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:cluster/[a-z0-9]{12})|([a-zA-Z0-9](-*[a-zA-Z0-9]){0,62})$" + "min":0, + "pattern":"(arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:cluster/[a-z0-9]{12})|([a-zA-Z0-9](-*[a-zA-Z0-9]){0,62})" }, "ClusterNodeDetails":{ "type":"structure", @@ -8082,7 +8343,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^i-[a-f0-9]{8}(?:[a-f0-9]{9})?$" + "pattern":"i-[a-f0-9]{8}(?:[a-f0-9]{9})?" }, "ClusterNodeIds":{ "type":"list", @@ -8140,6 +8401,7 @@ }, "ClusterNonNegativeInstanceCount":{ "type":"integer", + "box":true, "min":0 }, "ClusterOrchestrator":{ @@ -8166,22 +8428,24 @@ }, "ClusterPrivateDnsHostname":{ "type":"string", - "pattern":"^ip-((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)-?\\b){4}\\..*$" + "pattern":"ip-((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)-?\\b){4}\\..*" }, "ClusterPrivatePrimaryIp":{ "type":"string", - "pattern":"^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}$" + "pattern":"((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}" }, "ClusterPrivatePrimaryIpv6":{"type":"string"}, "ClusterSchedulerConfigArn":{ "type":"string", "max":256, - "pattern":"^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:cluster-scheduler-config/[a-z0-9]{12}$" + "min":0, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:cluster-scheduler-config/[a-z0-9]{12}" }, "ClusterSchedulerConfigId":{ "type":"string", "max":12, - "pattern":"^[a-z0-9]{12}$" + "min":0, + "pattern":"[a-z0-9]{12}" }, "ClusterSchedulerConfigSummary":{ "type":"structure", @@ -8203,7 +8467,8 @@ }, "ClusterSchedulerConfigVersion":{ "shape":"Integer", - "documentation":"

        Version of the cluster policy.

        " + "documentation":"

        Version of the cluster policy.

        ", + "box":true }, "Name":{ "shape":"EntityName", @@ -8236,7 +8501,7 @@ }, "ClusterSchedulerPriorityClassName":{ "type":"string", - "pattern":"^[a-z0-9]([-a-z0-9]*[a-z0-9]){0,39}?$" + "pattern":"[a-z0-9]([-a-z0-9]*[a-z0-9]){0,39}?" }, "ClusterSortBy":{ "type":"string", @@ -8295,6 +8560,7 @@ }, "ClusterThreadsPerCore":{ "type":"integer", + "box":true, "max":2, "min":1 }, @@ -8332,7 +8598,8 @@ "CodeRepositories":{ "type":"list", "member":{"shape":"CodeRepository"}, - "max":10 + "max":10, + "min":0 }, "CodeRepository":{ "type":"structure", @@ -8349,23 +8616,25 @@ "type":"string", "max":2048, "min":1, - "pattern":"^arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:code-repository/[\\S]{1,2048}$" + "pattern":"arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:code-repository/[\\S]{1,2048}" }, "CodeRepositoryContains":{ "type":"string", "max":1024, + "min":0, "pattern":"[a-zA-Z0-9-]+" }, "CodeRepositoryNameContains":{ "type":"string", "max":63, + "min":0, "pattern":"[a-zA-Z0-9-]+" }, "CodeRepositoryNameOrUrl":{ "type":"string", "max":1024, "min":1, - "pattern":"^https://([^/]+)/?(.*)$|^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"https://([^/]+)/?(.*)$|^[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, "CodeRepositorySortBy":{ "type":"string", @@ -8526,6 +8795,7 @@ "CompilationJobArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:compilation-job/.*" }, "CompilationJobStatus":{ @@ -8626,7 +8896,8 @@ "ComputeQuotaArn":{ "type":"string", "max":2048, - "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:compute-quota/[a-z0-9]{12}$" + "min":0, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:compute-quota/[a-z0-9]{12}" }, "ComputeQuotaConfig":{ "type":"structure", @@ -8648,7 +8919,7 @@ }, "ComputeQuotaId":{ "type":"string", - "pattern":"^[a-z0-9]{12}$" + "pattern":"[a-z0-9]{12}" }, "ComputeQuotaResourceConfig":{ "type":"structure", @@ -8699,7 +8970,8 @@ }, "ComputeQuotaVersion":{ "shape":"Integer", - "documentation":"

        Version of the compute allocation definition.

        " + "documentation":"

        Version of the compute allocation definition.

        ", + "box":true }, "Status":{ "shape":"SchedulerResourceStatus", @@ -8755,7 +9027,7 @@ }, "ComputeQuotaTargetTeamName":{ "type":"string", - "pattern":"^[a-z0-9]([-a-z0-9]*[a-z0-9]){0,39}?$" + "pattern":"[a-z0-9]([-a-z0-9]*[a-z0-9]){0,39}?" }, "ConditionOutcome":{ "type":"string", @@ -8783,6 +9055,7 @@ "ConfigValue":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "ConflictException":{ @@ -8796,6 +9069,7 @@ "ContainerArgument":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "ContainerArguments":{ @@ -8875,7 +9149,8 @@ "ContainerDefinitionList":{ "type":"list", "member":{"shape":"ContainerDefinition"}, - "max":15 + "max":15, + "min":0 }, "ContainerEntrypoint":{ "type":"list", @@ -8886,16 +9161,19 @@ "ContainerEntrypointString":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "ContainerHostname":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "ContainerImage":{ "type":"string", "max":255, + "min":0, "pattern":"[\\S]+" }, "ContainerMode":{ @@ -8915,7 +9193,8 @@ "ContentClassifiers":{ "type":"list", "member":{"shape":"ContentClassifier"}, - "max":256 + "max":256, + "min":0 }, "ContentColumn":{ "type":"string", @@ -8925,11 +9204,13 @@ "ContentDigest":{ "type":"string", "max":72, - "pattern":"^[Ss][Hh][Aa]256:[0-9a-fA-F]{64}$" + "min":0, + "pattern":"[Ss][Hh][Aa]256:[0-9a-fA-F]{64}" }, "ContentType":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "ContentTypes":{ @@ -8939,13 +9220,14 @@ "ContextArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:context/.*" }, "ContextName":{ "type":"string", "max":120, "min":1, - "pattern":"^[a-zA-Z0-9]([-_]*[a-zA-Z0-9]){0,119}" + "pattern":"[a-zA-Z0-9]([-_]*[a-zA-Z0-9]){0,119}" }, "ContextNameOrArn":{ "type":"string", @@ -9150,7 +9432,8 @@ }, "CertifyForMarketplace":{ "shape":"CertifyForMarketplace", - "documentation":"

        Whether to certify the algorithm so that it can be listed in Amazon Web Services Marketplace.

        " + "documentation":"

        Whether to certify the algorithm so that it can be listed in Amazon Web Services Marketplace.

        ", + "box":true }, "Tags":{ "shape":"TagList", @@ -9241,7 +9524,8 @@ }, "RecoveryMode":{ "shape":"Boolean", - "documentation":"

        Indicates whether the application is launched in recovery mode.

        " + "documentation":"

        Indicates whether the application is launched in recovery mode.

        ", + "box":true } } }, @@ -9332,7 +9616,8 @@ }, "GenerateCandidateDefinitionsOnly":{ "shape":"GenerateCandidateDefinitionsOnly", - "documentation":"

        Generates possible candidates without training the models. A candidate is a combination of data preprocessors, algorithms, and algorithm parameter settings.

        " + "documentation":"

        Generates possible candidates without training the models. A candidate is a combination of data preprocessors, algorithms, and algorithm parameter settings.

        ", + "box":true }, "Tags":{ "shape":"TagList", @@ -9422,10 +9707,7 @@ }, "CreateClusterRequest":{ "type":"structure", - "required":[ - "ClusterName", - "InstanceGroups" - ], + "required":["ClusterName"], "members":{ "ClusterName":{ "shape":"ClusterName", @@ -10008,7 +10290,8 @@ "VpcConfig":{"shape":"VpcConfig"}, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

        Sets whether all model containers deployed to the endpoint are isolated. If they are, no inbound or outbound network calls can be made to or from the model containers.

        " + "documentation":"

        Sets whether all model containers deployed to the endpoint are isolated. If they are, no inbound or outbound network calls can be made to or from the model containers.

        ", + "box":true } } }, @@ -10440,7 +10723,8 @@ }, "Horovod":{ "shape":"Horovod", - "documentation":"

        Indicates Horovod compatibility.

        " + "documentation":"

        Indicates Horovod compatibility.

        ", + "box":true }, "ReleaseNotes":{ "shape":"ReleaseNotes", @@ -10714,7 +10998,8 @@ }, "AutomaticModelRegistration":{ "shape":"Boolean", - "documentation":"

        Whether to enable or disable automatic registration of new MLflow models to the SageMaker Model Registry. To enable automatic model registration, set this value to True. To disable automatic model registration, set this value to False. If not specified, AutomaticModelRegistration defaults to False.

        " + "documentation":"

        Whether to enable or disable automatic registration of new MLflow models to the SageMaker Model Registry. To enable automatic model registration, set this value to True. To disable automatic model registration, set this value to False. If not specified, AutomaticModelRegistration defaults to False.

        ", + "box":true }, "WeeklyMaintenanceWindowStart":{ "shape":"WeeklyMaintenanceWindowStart", @@ -10803,7 +11088,8 @@ }, "ModelCardVersion":{ "shape":"Integer", - "documentation":"

        The version of the model card to export. If a version is not provided, then the latest version of the model card is exported.

        " + "documentation":"

        The version of the model card to export. If a version is not provided, then the latest version of the model card is exported.

        ", + "box":true }, "ModelCardExportJobName":{ "shape":"EntityName", @@ -10953,7 +11239,8 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

        Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

        " + "documentation":"

        Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

        ", + "box":true } } }, @@ -11024,7 +11311,8 @@ }, "CertifyForMarketplace":{ "shape":"CertifyForMarketplace", - "documentation":"

        Whether to certify the model package for listing on Amazon Web Services Marketplace.

        This parameter is optional for unversioned models, and does not apply to versioned models.

        " + "documentation":"

        Whether to certify the model package for listing on Amazon Web Services Marketplace.

        This parameter is optional for unversioned models, and does not apply to versioned models.

        ", + "box":true }, "Tags":{ "shape":"TagList", @@ -11431,7 +11719,8 @@ }, "EnableIamSessionBasedIdentity":{ "shape":"Boolean", - "documentation":"

        When set to TRUE, the SageMaker Partner AI App sets the Amazon Web Services IAM session name or the authenticated IAM user as the identity of the SageMaker Partner AI App user.

        " + "documentation":"

        When set to TRUE, the SageMaker Partner AI App sets the Amazon Web Services IAM session name or the authenticated IAM user as the identity of the SageMaker Partner AI App user.

        ", + "box":true }, "ClientToken":{ "shape":"ClientToken", @@ -11682,6 +11971,10 @@ "Tags":{ "shape":"TagList", "documentation":"

        An array of key-value pairs that you want to use to organize and track your Amazon Web Services resource costs. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

        " + }, + "TemplateProviders":{ + "shape":"CreateTemplateProviderList", + "documentation":"

        An array of template provider configurations for creating infrastructure resources for the project.

        " } } }, @@ -11783,6 +12076,22 @@ } } }, + "CreateTemplateProvider":{ + "type":"structure", + "members":{ + "CfnTemplateProvider":{ + "shape":"CfnCreateTemplateProvider", + "documentation":"

        The CloudFormation template provider configuration for creating infrastructure resources.

        " + } + }, + "documentation":"

        Contains configuration details for a template provider. Only one type of template provider can be specified.

        " + }, + "CreateTemplateProviderList":{ + "type":"list", + "member":{"shape":"CreateTemplateProvider"}, + "max":1, + "min":1 + }, "CreateTrainingJobRequest":{ "type":"structure", "required":[ @@ -11836,15 +12145,18 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

        Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If you enable network isolation for training jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

        " + "documentation":"

        Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If you enable network isolation for training jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

        ", + "box":true }, "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", - "documentation":"

        To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training. For more information, see Protect Communications Between ML Compute Instances in a Distributed Training Job.

        " + "documentation":"

        To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training. For more information, see Protect Communications Between ML Compute Instances in a Distributed Training Job.

        ", + "box":true }, "EnableManagedSpotTraining":{ "shape":"Boolean", - "documentation":"

        To train models using managed spot training, choose True. Managed spot training provides a fully managed and scalable infrastructure for training machine learning models. this option is useful when training jobs can be interrupted and when there is flexibility when the training job is run.

        The complete and intermediate results of jobs are stored in an Amazon S3 bucket, and can be used as a starting point to train models incrementally. Amazon SageMaker provides metrics and logs in CloudWatch. They can be used to see when managed spot training jobs are running, interrupted, resumed, or completed.

        " + "documentation":"

        To train models using managed spot training, choose True. Managed spot training provides a fully managed and scalable infrastructure for training machine learning models. this option is useful when training jobs can be interrupted and when there is flexibility when the training job is run.

        The complete and intermediate results of jobs are stored in an Amazon S3 bucket, and can be used as a starting point to train models incrementally. Amazon SageMaker provides metrics and logs in CloudWatch. They can be used to see when managed spot training jobs are running, interrupted, resumed, or completed.

        ", + "box":true }, "CheckpointConfig":{ "shape":"CheckpointConfig", @@ -12230,7 +12542,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*\\/[a-zA-Z0-9](-*[a-zA-Z0-9.])*" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*\\/[a-zA-Z0-9](-*[a-zA-Z0-9.])*" }, "CsvContentTypes":{ "type":"list", @@ -12272,12 +12584,14 @@ "CustomFileSystemConfigs":{ "type":"list", "member":{"shape":"CustomFileSystemConfig"}, - "max":10 + "max":10, + "min":0 }, "CustomFileSystems":{ "type":"list", "member":{"shape":"CustomFileSystem"}, - "max":5 + "max":5, + "min":0 }, "CustomImage":{ "type":"structure", @@ -12292,8 +12606,7 @@ }, "ImageVersionNumber":{ "shape":"ImageVersionNumber", - "documentation":"

        The version number of the CustomImage.

        ", - "box":true + "documentation":"

        The version number of the CustomImage.

        " }, "AppImageConfigName":{ "shape":"AppImageConfigName", @@ -12305,23 +12618,27 @@ "CustomImageContainerArguments":{ "type":"list", "member":{"shape":"NonEmptyString64"}, - "max":50 + "max":50, + "min":0 }, "CustomImageContainerEntrypoint":{ "type":"list", "member":{"shape":"NonEmptyString256"}, - "max":1 + "max":1, + "min":0 }, "CustomImageContainerEnvironmentVariables":{ "type":"map", "key":{"shape":"NonEmptyString256"}, "value":{"shape":"String256"}, - "max":25 + "max":25, + "min":0 }, "CustomImages":{ "type":"list", "member":{"shape":"CustomImage"}, - "max":200 + "max":200, + "min":0 }, "CustomPosixUserConfig":{ "type":"structure", @@ -12345,7 +12662,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*)${1,128}" + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*)${1,128}" }, "CustomerMetadataKeyList":{ "type":"list", @@ -12362,7 +12679,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*)${1,256}" + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*)${1,256}" }, "CustomizedMetricSpecification":{ "type":"structure", @@ -12392,7 +12709,8 @@ "members":{ "EnableCapture":{ "shape":"EnableCapture", - "documentation":"

        Whether data capture should be enabled or disabled (defaults to enabled).

        " + "documentation":"

        Whether data capture should be enabled or disabled (defaults to enabled).

        ", + "box":true }, "InitialSamplingPercentage":{ "shape":"SamplingPercentage", @@ -12429,7 +12747,8 @@ "members":{ "EnableCapture":{ "shape":"EnableCapture", - "documentation":"

        Whether data capture is enabled or disabled.

        " + "documentation":"

        Whether data capture is enabled or disabled.

        ", + "box":true }, "CaptureStatus":{ "shape":"CaptureStatus", @@ -12661,7 +12980,8 @@ }, "VolumeSizeInGB":{ "shape":"OptionalVolumeSizeInGB", - "documentation":"

        The size, in GB, of the ML storage volume attached to the processing instance.

        " + "documentation":"

        The size, in GB, of the ML storage volume attached to the processing instance.

        ", + "box":true }, "RuleParameters":{ "shape":"RuleParameters", @@ -13582,18 +13902,21 @@ "members":{ "Success":{ "shape":"Success", - "documentation":"

        Returns true if the work team was successfully deleted; otherwise, returns false.

        " + "documentation":"

        Returns true if the work team was successfully deleted; otherwise, returns false.

        ", + "box":true } } }, "DependencyCopyPath":{ "type":"string", "max":1023, + "min":0, "pattern":".*" }, "DependencyOriginPath":{ "type":"string", "max":1023, + "min":0, "pattern":".*" }, "DeployedImage":{ @@ -13879,7 +14202,8 @@ }, "CertifyForMarketplace":{ "shape":"CertifyForMarketplace", - "documentation":"

        Whether the algorithm is certified to be listed in Amazon Web Services Marketplace.

        " + "documentation":"

        Whether the algorithm is certified to be listed in Amazon Web Services Marketplace.

        ", + "box":true } } }, @@ -13989,7 +14313,8 @@ }, "RecoveryMode":{ "shape":"Boolean", - "documentation":"

        Indicates whether the application is launched in recovery mode.

        " + "documentation":"

        Indicates whether the application is launched in recovery mode.

        ", + "box":true }, "LastHealthCheckTimestamp":{ "shape":"Timestamp", @@ -14157,7 +14482,8 @@ }, "GenerateCandidateDefinitionsOnly":{ "shape":"GenerateCandidateDefinitionsOnly", - "documentation":"

        Indicates whether the output for an AutoML job generates candidate definitions only.

        " + "documentation":"

        Indicates whether the output for an AutoML job generates candidate definitions only.

        ", + "box":true }, "AutoMLJobArtifacts":{ "shape":"AutoMLJobArtifacts", @@ -14382,7 +14708,8 @@ }, "ClusterSchedulerConfigVersion":{ "shape":"Integer", - "documentation":"

        Version of the cluster policy.

        " + "documentation":"

        Version of the cluster policy.

        ", + "box":true } } }, @@ -14411,7 +14738,8 @@ }, "ClusterSchedulerConfigVersion":{ "shape":"Integer", - "documentation":"

        Version of the cluster policy.

        " + "documentation":"

        Version of the cluster policy.

        ", + "box":true }, "Status":{ "shape":"SchedulerResourceStatus", @@ -14596,7 +14924,8 @@ }, "ComputeQuotaVersion":{ "shape":"Integer", - "documentation":"

        Version of the compute allocation definition.

        " + "documentation":"

        Version of the compute allocation definition.

        ", + "box":true } } }, @@ -14630,7 +14959,8 @@ }, "ComputeQuotaVersion":{ "shape":"Integer", - "documentation":"

        Version of the compute allocation definition.

        " + "documentation":"

        Version of the compute allocation definition.

        ", + "box":true }, "Status":{ "shape":"SchedulerResourceStatus", @@ -14898,7 +15228,8 @@ }, "MaxModels":{ "shape":"Integer", - "documentation":"

        The maximum number of models.

        " + "documentation":"

        The maximum number of models.

        ", + "box":true }, "NextToken":{ "shape":"NextToken", @@ -15033,7 +15364,8 @@ }, "MaxResults":{ "shape":"DeploymentStageMaxResults", - "documentation":"

        The maximum number of results to select (50 by default).

        " + "documentation":"

        The maximum number of results to select (50 by default).

        ", + "box":true } } }, @@ -15065,15 +15397,18 @@ }, "EdgeDeploymentSuccess":{ "shape":"Integer", - "documentation":"

        The number of edge devices with the successful deployment.

        " + "documentation":"

        The number of edge devices with the successful deployment.

        ", + "box":true }, "EdgeDeploymentPending":{ "shape":"Integer", - "documentation":"

        The number of edge devices yet to pick up deployment, or in progress.

        " + "documentation":"

        The number of edge devices yet to pick up deployment, or in progress.

        ", + "box":true }, "EdgeDeploymentFailed":{ "shape":"Integer", - "documentation":"

        The number of edge devices that failed the deployment.

        " + "documentation":"

        The number of edge devices that failed the deployment.

        ", + "box":true }, "Stages":{ "shape":"DeploymentStageStatusSummaries", @@ -15232,7 +15567,8 @@ "VpcConfig":{"shape":"VpcConfig"}, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

        Indicates whether all model containers deployed to the endpoint are isolated. If they are, no inbound or outbound network calls can be made to or from the model containers.

        " + "documentation":"

        Indicates whether all model containers deployed to the endpoint are isolated. If they are, no inbound or outbound network calls can be made to or from the model containers.

        ", + "box":true } } }, @@ -16030,7 +16366,8 @@ }, "Horovod":{ "shape":"Horovod", - "documentation":"

        Indicates Horovod compatibility.

        " + "documentation":"

        Indicates Horovod compatibility.

        ", + "box":true }, "ReleaseNotes":{ "shape":"ReleaseNotes", @@ -16459,6 +16796,10 @@ "shape":"TrackingServerStatus", "documentation":"

        The current creation status of the described MLflow Tracking Server.

        " }, + "TrackingServerMaintenanceStatus":{ + "shape":"TrackingServerMaintenanceStatus", + "documentation":"

        The current maintenance status of the described MLflow Tracking Server.

        " + }, "IsActive":{ "shape":"IsTrackingServerActive", "documentation":"

        Whether the described MLflow Tracking Server is currently active.

        " @@ -16473,7 +16814,8 @@ }, "AutomaticModelRegistration":{ "shape":"Boolean", - "documentation":"

        Whether automatic registration of new MLflow models to the SageMaker Model Registry is enabled.

        " + "documentation":"

        Whether automatic registration of new MLflow models to the SageMaker Model Registry is enabled.

        ", + "box":true }, "CreationTime":{ "shape":"Timestamp", @@ -16588,7 +16930,8 @@ }, "ModelCardVersion":{ "shape":"Integer", - "documentation":"

        The version of the model card that the model export job exports.

        " + "documentation":"

        The version of the model card that the model export job exports.

        ", + "box":true }, "OutputConfig":{ "shape":"ModelCardExportOutputConfig", @@ -16622,7 +16965,8 @@ }, "ModelCardVersion":{ "shape":"Integer", - "documentation":"

        The version of the model card to describe. If a version is not provided, then the latest version of the model card is described.

        " + "documentation":"

        The version of the model card to describe. If a version is not provided, then the latest version of the model card is described.

        ", + "box":true } } }, @@ -16648,7 +16992,8 @@ }, "ModelCardVersion":{ "shape":"Integer", - "documentation":"

        The version of the model card.

        " + "documentation":"

        The version of the model card.

        ", + "box":true }, "Content":{ "shape":"ModelCardContent", @@ -16790,7 +17135,8 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

        If True, no inbound or outbound network calls can be made to or from the model container.

        " + "documentation":"

        If True, no inbound or outbound network calls can be made to or from the model container.

        ", + "box":true }, "DeploymentRecommendation":{ "shape":"DeploymentRecommendation", @@ -16907,7 +17253,8 @@ }, "CertifyForMarketplace":{ "shape":"CertifyForMarketplace", - "documentation":"

        Whether the model package is certified for listing on Amazon Web Services Marketplace.

        " + "documentation":"

        Whether the model package is certified for listing on Amazon Web Services Marketplace.

        ", + "box":true }, "ModelApprovalStatus":{ "shape":"ModelApprovalStatus", @@ -17403,7 +17750,8 @@ }, "EnableIamSessionBasedIdentity":{ "shape":"Boolean", - "documentation":"

        When set to TRUE, the SageMaker Partner AI App sets the Amazon Web Services IAM session name or the authenticated IAM user as the identity of the SageMaker Partner AI App user.

        " + "documentation":"

        When set to TRUE, the SageMaker Partner AI App sets the Amazon Web Services IAM session name or the authenticated IAM user as the identity of the SageMaker Partner AI App user.

        ", + "box":true }, "Error":{ "shape":"ErrorInfo", @@ -17708,6 +18056,10 @@ "shape":"ProjectStatus", "documentation":"

        The status of the project.

        " }, + "TemplateProviderDetails":{ + "shape":"TemplateProviderDetailList", + "documentation":"

        An array of template providers associated with the project.

        " + }, "CreatedBy":{"shape":"UserContext"}, "CreationTime":{ "shape":"Timestamp", @@ -17975,15 +18327,18 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

        If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster for distributed training, choose True. If you enable network isolation for training jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

        " + "documentation":"

        If you want to allow inbound or outbound network calls, except for calls between peers within a training cluster for distributed training, choose True. If you enable network isolation for training jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

        ", + "box":true }, "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", - "documentation":"

        To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithms in distributed training.

        " + "documentation":"

        To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithms in distributed training.

        ", + "box":true }, "EnableManagedSpotTraining":{ "shape":"Boolean", - "documentation":"

        A Boolean indicating whether managed spot training is enabled (True) or not (False).

        " + "documentation":"

        A Boolean indicating whether managed spot training is enabled (True) or not (False).

        ", + "box":true }, "CheckpointConfig":{"shape":"CheckpointConfig"}, "TrainingTimeInSeconds":{ @@ -18462,7 +18817,8 @@ }, "Description":{ "type":"string", - "max":128 + "max":128, + "min":0 }, "DesiredWeightAndCapacity":{ "type":"structure", @@ -18495,7 +18851,8 @@ "DestinationS3Uri":{ "type":"string", "max":512, - "pattern":"^(https|s3)://([^/])/?(.*)$" + "min":0, + "pattern":"(https|s3)://([^/])/?(.*)" }, "DetailedAlgorithmStatus":{ "type":"string", @@ -18538,7 +18895,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws[a-z\\-]*:[a-z\\-]*:[a-z\\-]*:\\d{12}:[a-z\\-]*/?[a-zA-Z_0-9+=,.@\\-_/]+$" + "pattern":"arn:aws[a-z\\-]*:[a-z\\-]*:[a-z\\-]*:\\d{12}:[a-z\\-]*/?[a-zA-Z_0-9+=,.@\\-_/]+" }, "DeviceDeploymentStatus":{ "type":"string", @@ -18616,17 +18973,17 @@ "type":"string", "max":40, "min":1, - "pattern":"^[-a-zA-Z0-9_.,;:! ]*$" + "pattern":"[-a-zA-Z0-9_.,;:! ]*" }, "DeviceFleetArn":{ "type":"string", - "pattern":"^arn:aws[a-z\\-]*:iam::\\d{12}:device-fleet/?[a-zA-Z_0-9+=,.@\\-_/]+$" + "pattern":"arn:aws[a-z\\-]*:iam::\\d{12}:device-fleet/?[a-zA-Z_0-9+=,.@\\-_/]+" }, "DeviceFleetDescription":{ "type":"string", "max":800, "min":1, - "pattern":"^[-a-zA-Z0-9_.,;:! ]*$" + "pattern":"[-a-zA-Z0-9_.,;:! ]*" }, "DeviceFleetSummaries":{ "type":"list", @@ -18662,7 +19019,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "DeviceNames":{ "type":"list", @@ -18678,7 +19035,8 @@ }, "Percentage":{ "shape":"Percentage", - "documentation":"

        Percentage of devices in the fleet to deploy to the current stage.

        " + "documentation":"

        Percentage of devices in the fleet to deploy to the current stage.

        ", + "box":true }, "DeviceNames":{ "shape":"DeviceNames", @@ -18700,11 +19058,13 @@ "members":{ "ConnectedDeviceCount":{ "shape":"Long", - "documentation":"

        The number of devices connected with a heartbeat.

        " + "documentation":"

        The number of devices connected with a heartbeat.

        ", + "box":true }, "RegisteredDeviceCount":{ "shape":"Long", - "documentation":"

        The number of registered devices.

        " + "documentation":"

        The number of registered devices.

        ", + "box":true } }, "documentation":"

        Status of devices.

        " @@ -18773,6 +19133,7 @@ }, "Dimension":{ "type":"integer", + "box":true, "max":8192, "min":1 }, @@ -18804,6 +19165,7 @@ "DirectoryPath":{ "type":"string", "max":4096, + "min":0, "pattern":".*" }, "DisableProfiler":{"type":"boolean"}, @@ -18867,7 +19229,7 @@ "type":"string", "max":14, "min":5, - "pattern":"^\\d{1,4}.\\d{1,4}.\\d{1,4}$" + "pattern":"\\d{1,4}.\\d{1,4}.\\d{1,4}" }, "Dollars":{ "type":"integer", @@ -18877,6 +19239,7 @@ "DomainArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:domain/.*" }, "DomainDetails":{ @@ -18916,7 +19279,8 @@ "DomainId":{ "type":"string", "max":63, - "pattern":"^d-(-*[a-z0-9]){1,61}" + "min":0, + "pattern":"d-(-*[a-z0-9]){1,61}" }, "DomainList":{ "type":"list", @@ -18925,12 +19289,14 @@ "DomainName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "DomainSecurityGroupIds":{ "type":"list", "member":{"shape":"SecurityGroupId"}, - "max":3 + "max":3, + "min":0 }, "DomainSettings":{ "type":"structure", @@ -19005,7 +19371,10 @@ ] }, "Double":{"type":"double"}, - "DoubleParameterValue":{"type":"double"}, + "DoubleParameterValue":{ + "type":"double", + "box":true + }, "DriftCheckBaselines":{ "type":"structure", "members":{ @@ -19093,19 +19462,23 @@ "members":{ "MinCapacity":{ "shape":"Integer", - "documentation":"

        The recommended minimum capacity to specify for your autoscaling policy.

        " + "documentation":"

        The recommended minimum capacity to specify for your autoscaling policy.

        ", + "box":true }, "MaxCapacity":{ "shape":"Integer", - "documentation":"

        The recommended maximum capacity to specify for your autoscaling policy.

        " + "documentation":"

        The recommended maximum capacity to specify for your autoscaling policy.

        ", + "box":true }, "ScaleInCooldown":{ "shape":"Integer", - "documentation":"

        The recommended scale in cooldown time for your autoscaling policy.

        " + "documentation":"

        The recommended scale in cooldown time for your autoscaling policy.

        ", + "box":true }, "ScaleOutCooldown":{ "shape":"Integer", - "documentation":"

        The recommended scale out cooldown time for your autoscaling policy.

        " + "documentation":"

        The recommended scale out cooldown time for your autoscaling policy.

        ", + "box":true }, "ScalingPolicies":{ "shape":"ScalingPolicies", @@ -19173,6 +19546,33 @@ }, "documentation":"

        A collection of EBS storage settings that apply to both private and shared spaces.

        " }, + "Ec2CapacityReservation":{ + "type":"structure", + "members":{ + "Ec2CapacityReservationId":{ + "shape":"Ec2CapacityReservationId", + "documentation":"

        The unique identifier for an EC2 capacity reservation that's part of the ML capacity reservation.

        " + }, + "TotalInstanceCount":{ + "shape":"TaskCount", + "documentation":"

        The number of instances that you allocated to the EC2 capacity reservation.

        " + }, + "AvailableInstanceCount":{ + "shape":"TaskCount", + "documentation":"

        The number of instances that are currently available in the EC2 capacity reservation.

        " + }, + "UsedByCurrentEndpoint":{ + "shape":"TaskCount", + "documentation":"

        The number of instances from the EC2 capacity reservation that are being used by the endpoint.

        " + } + }, + "documentation":"

        The EC2 capacity reservations that are shared to an ML capacity reservation.

        " + }, + "Ec2CapacityReservationId":{"type":"string"}, + "Ec2CapacityReservationsList":{ + "type":"list", + "member":{"shape":"Ec2CapacityReservation"} + }, "Edge":{ "type":"structure", "members":{ @@ -19228,7 +19628,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws[a-z\\-]*:sagemaker:[a-z\\-]*:\\d{12}:edge-deployment/?[a-zA-Z_0-9+=,.@\\-_/]+$" + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z\\-]*:\\d{12}:edge-deployment/?[a-zA-Z_0-9+=,.@\\-_/]+" }, "EdgeDeploymentPlanSummaries":{ "type":"list", @@ -19259,15 +19659,18 @@ }, "EdgeDeploymentSuccess":{ "shape":"Integer", - "documentation":"

        The number of edge devices with the successful deployment.

        " + "documentation":"

        The number of edge devices with the successful deployment.

        ", + "box":true }, "EdgeDeploymentPending":{ "shape":"Integer", - "documentation":"

        The number of edge devices yet to pick up the deployment, or in progress.

        " + "documentation":"

        The number of edge devices yet to pick up the deployment, or in progress.

        ", + "box":true }, "EdgeDeploymentFailed":{ "shape":"Integer", - "documentation":"

        The number of edge devices that failed the deployment.

        " + "documentation":"

        The number of edge devices that failed the deployment.

        ", + "box":true }, "CreationTime":{ "shape":"Timestamp", @@ -19295,15 +19698,18 @@ }, "EdgeDeploymentSuccessInStage":{ "shape":"Integer", - "documentation":"

        The number of edge devices with the successful deployment in the current stage.

        " + "documentation":"

        The number of edge devices with the successful deployment in the current stage.

        ", + "box":true }, "EdgeDeploymentPendingInStage":{ "shape":"Integer", - "documentation":"

        The number of edge devices yet to pick up the deployment in current stage, or in progress.

        " + "documentation":"

        The number of edge devices yet to pick up the deployment in current stage, or in progress.

        ", + "box":true }, "EdgeDeploymentFailedInStage":{ "shape":"Integer", - "documentation":"

        The number of edge devices that failed the deployment in current stage.

        " + "documentation":"

        The number of edge devices that failed the deployment in current stage.

        ", + "box":true }, "EdgeDeploymentStatusMessage":{ "shape":"String", @@ -19363,19 +19769,23 @@ }, "OfflineDeviceCount":{ "shape":"Long", - "documentation":"

        The number of devices that have this model version and do not have a heart beat.

        " + "documentation":"

        The number of devices that have this model version and do not have a heart beat.

        ", + "box":true }, "ConnectedDeviceCount":{ "shape":"Long", - "documentation":"

        The number of devices that have this model version and have a heart beat.

        " + "documentation":"

        The number of devices that have this model version and have a heart beat.

        ", + "box":true }, "ActiveDeviceCount":{ "shape":"Long", - "documentation":"

        The number of devices that have this model version, a heart beat, and are currently running.

        " + "documentation":"

        The number of devices that have this model version, a heart beat, and are currently running.

        ", + "box":true }, "SamplingDeviceCount":{ "shape":"Long", - "documentation":"

        The number of devices with this model version and are producing sample data.

        " + "documentation":"

        The number of devices with this model version and are producing sample data.

        ", + "box":true } }, "documentation":"

        Status of edge devices with this model.

        " @@ -19437,7 +19847,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws[a-z\\-]*:sagemaker:[a-z\\-]*:\\d{12}:edge-packaging-job/?[a-zA-Z_0-9+=,.@\\-_/]+$" + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z\\-]*:\\d{12}:edge-packaging-job/?[a-zA-Z_0-9+=,.@\\-_/]+" }, "EdgePackagingJobStatus":{ "type":"string", @@ -19549,13 +19959,14 @@ "EfsUid":{ "type":"string", "max":10, + "min":0, "pattern":"\\d+" }, "EksClusterArn":{ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws[a-z\\-]*:eks:[a-z0-9\\-]*:[0-9]{12}:cluster\\/[0-9A-Za-z][A-Za-z0-9\\-_]{0,99}$" + "pattern":"arn:aws[a-z\\-]*:eks:[a-z0-9\\-]*:[0-9]{12}:cluster\\/[0-9A-Za-z][A-Za-z0-9\\-_]{0,99}" }, "EmrServerlessComputeConfig":{ "type":"structure", @@ -19566,7 +19977,7 @@ "documentation":"

        The ARN of the IAM role granting the AutoML job V2 the necessary permissions access policies to list, connect to, or manage EMR Serverless jobs. For detailed information about the required permissions of this role, see \"How to configure AutoML to initiate a remote job on EMR Serverless for large datasets\" in Create a regression or classification job for tabular data using the AutoML API or Create an AutoML job for time-series forecasting using the API.

        " } }, - "documentation":"

        This data type is intended for use exclusively by SageMaker Canvas and cannot be used in other contexts at the moment.

        Specifies the compute configuration for the EMR Serverless job.

        " + "documentation":"

        This data type is intended for use exclusively by SageMaker Canvas and cannot be used in other contexts at the moment.

        Specifies the compute configuration for the EMR Serverless job.

        " }, "EmrServerlessSettings":{ "type":"structure", @@ -19597,9 +20008,18 @@ "documentation":"

        The configuration parameters that specify the IAM roles assumed by the execution role of SageMaker (assumable roles) and the cluster instances or job execution environments (execution roles or runtime roles) to manage and access resources required for running Amazon EMR clusters or Amazon EMR Serverless applications.

        " }, "EnableCapture":{"type":"boolean"}, - "EnableInfraCheck":{"type":"boolean"}, - "EnableIotRoleAlias":{"type":"boolean"}, - "EnableRemoteDebug":{"type":"boolean"}, + "EnableInfraCheck":{ + "type":"boolean", + "box":true + }, + "EnableIotRoleAlias":{ + "type":"boolean", + "box":true + }, + "EnableRemoteDebug":{ + "type":"boolean", + "box":true + }, "EnableSagemakerServicecatalogPortfolioInput":{ "type":"structure", "members":{} @@ -19608,7 +20028,10 @@ "type":"structure", "members":{} }, - "EnableSessionTagChaining":{"type":"boolean"}, + "EnableSessionTagChaining":{ + "type":"boolean", + "box":true + }, "EnabledOrDisabled":{ "type":"string", "enum":[ @@ -19690,11 +20113,13 @@ "EndpointConfigName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "EndpointConfigNameContains":{ "type":"string", "max":63, + "min":0, "pattern":"[a-zA-Z0-9-]+" }, "EndpointConfigSortKey":{ @@ -19856,11 +20281,13 @@ "EndpointName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "EndpointNameContains":{ "type":"string", "max":63, + "min":0, "pattern":"[a-zA-Z0-9-]+" }, "EndpointOutputConfiguration":{ @@ -19908,7 +20335,8 @@ "EndpointPerformances":{ "type":"list", "member":{"shape":"EndpointPerformance"}, - "max":1 + "max":1, + "min":0 }, "EndpointSortKey":{ "type":"string", @@ -19982,29 +20410,33 @@ "Endpoints":{ "type":"list", "member":{"shape":"EndpointInfo"}, - "max":1 + "max":1, + "min":0 }, "EntityDescription":{ "type":"string", "max":1024, + "min":0, "pattern":"[\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]*" }, "EntityName":{ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "EnvironmentKey":{ "type":"string", "max":1024, + "min":0, "pattern":"[a-zA-Z_][a-zA-Z0-9_]*" }, "EnvironmentMap":{ "type":"map", "key":{"shape":"EnvironmentKey"}, "value":{"shape":"EnvironmentValue"}, - "max":100 + "max":100, + "min":0 }, "EnvironmentParameter":{ "type":"structure", @@ -20048,6 +20480,7 @@ "EnvironmentValue":{ "type":"string", "max":1024, + "min":0, "pattern":"[\\S\\s]*" }, "ErrorInfo":{ @@ -20066,12 +20499,14 @@ }, "ExcludeFeaturesAttribute":{ "type":"string", - "max":100 + "max":100, + "min":0 }, "ExecutionRoleArns":{ "type":"list", "member":{"shape":"RoleArn"}, - "max":5 + "max":5, + "min":0 }, "ExecutionRoleIdentityConfig":{ "type":"string", @@ -20095,6 +20530,7 @@ "ExitMessage":{ "type":"string", "max":1024, + "min":0, "pattern":"[\\S\\s]*" }, "Experiment":{ @@ -20140,6 +20576,7 @@ "ExperimentArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:experiment/.*" }, "ExperimentConfig":{ @@ -20167,13 +20604,14 @@ "ExperimentDescription":{ "type":"string", "max":3072, + "min":0, "pattern":".*" }, "ExperimentEntityName":{ "type":"string", "max":120, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,119}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,119}" }, "ExperimentEntityNameOrArn":{ "type":"string", @@ -20199,6 +20637,7 @@ "ExperimentSourceArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:.*" }, "ExperimentSummaries":{ @@ -20234,6 +20673,7 @@ }, "ExpiresInSeconds":{ "type":"integer", + "box":true, "max":300, "min":5 }, @@ -20306,7 +20746,8 @@ }, "FailureReason":{ "type":"string", - "max":1024 + "max":1024, + "min":0 }, "FairShare":{ "type":"string", @@ -20317,6 +20758,7 @@ }, "FairShareWeight":{ "type":"integer", + "box":true, "max":100, "min":0 }, @@ -20428,10 +20870,12 @@ "FeatureGroupArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:feature-group/.*" }, "FeatureGroupMaxResults":{ "type":"integer", + "box":true, "max":100, "min":1 }, @@ -20439,7 +20883,7 @@ "type":"string", "max":64, "min":1, - "pattern":"^[a-zA-Z0-9]([_-]*[a-zA-Z0-9]){0,63}" + "pattern":"[a-zA-Z0-9]([_-]*[a-zA-Z0-9]){0,63}" }, "FeatureGroupNameContains":{ "type":"string", @@ -20555,7 +20999,7 @@ "type":"string", "max":64, "min":1, - "pattern":"^[a-zA-Z0-9]([-_]*[a-zA-Z0-9]){0,63}" + "pattern":"[a-zA-Z0-9]([-_]*[a-zA-Z0-9]){0,63}" }, "FeatureParameter":{ "type":"structure", @@ -20574,24 +21018,26 @@ "FeatureParameterAdditions":{ "type":"list", "member":{"shape":"FeatureParameter"}, - "max":25 + "max":25, + "min":0 }, "FeatureParameterKey":{ "type":"string", "max":255, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$" + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)" }, "FeatureParameterRemovals":{ "type":"list", "member":{"shape":"FeatureParameterKey"}, - "max":25 + "max":25, + "min":0 }, "FeatureParameterValue":{ "type":"string", "max":255, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$" + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)" }, "FeatureParameters":{ "type":"list", @@ -20692,13 +21138,13 @@ "type":"string", "max":21, "min":11, - "pattern":"^(fs-[0-9a-f]{8,})$" + "pattern":"(fs-[0-9a-f]{8,})" }, "FileSystemPath":{ "type":"string", "max":256, "min":1, - "pattern":"^\\/\\S*$" + "pattern":"\\/\\S*" }, "FileSystemType":{ "type":"string", @@ -20718,7 +21164,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9\\_\\-]+$" + "pattern":"[a-zA-Z0-9\\_\\-]+" }, "FillingTransformations":{ "type":"map", @@ -20788,7 +21234,8 @@ }, "Value":{ "shape":"MetricValue", - "documentation":"

        The value of the metric with the best result.

        " + "documentation":"

        The value of the metric with the best result.

        ", + "box":true }, "StandardMetricName":{ "shape":"AutoMLMetricEnum", @@ -20814,7 +21261,8 @@ }, "Value":{ "shape":"MetricValue", - "documentation":"

        The value of the objective metric.

        " + "documentation":"

        The value of the objective metric.

        ", + "box":true } }, "documentation":"

        Shows the latest objective metric emitted by a training job that was launched by a hyperparameter tuning job. You define the objective metric in the HyperParameterTuningJobObjective parameter of HyperParameterTuningJobConfig.

        " @@ -20836,13 +21284,14 @@ "FlowDefinitionArn":{ "type":"string", "max":1024, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]+:[0-9]{12}:flow-definition/.*" }, "FlowDefinitionName":{ "type":"string", "max":63, "min":1, - "pattern":"^[a-z0-9](-*[a-z0-9]){0,62}" + "pattern":"[a-z0-9](-*[a-z0-9]){0,62}" }, "FlowDefinitionOutputConfig":{ "type":"structure", @@ -20906,10 +21355,12 @@ }, "FlowDefinitionTaskAvailabilityLifetimeInSeconds":{ "type":"integer", + "box":true, "min":1 }, "FlowDefinitionTaskCount":{ "type":"integer", + "box":true, "max":3, "min":1 }, @@ -20923,7 +21374,7 @@ "type":"string", "max":30, "min":1, - "pattern":"^[A-Za-z0-9]+( [A-Za-z0-9]+)*$" + "pattern":"[A-Za-z0-9]+( [A-Za-z0-9]+)*" }, "FlowDefinitionTaskKeywords":{ "type":"list", @@ -20933,22 +21384,24 @@ }, "FlowDefinitionTaskTimeLimitInSeconds":{ "type":"integer", + "box":true, "min":30 }, "FlowDefinitionTaskTitle":{ "type":"string", "max":128, "min":1, - "pattern":"^[\\t\\n\\r -\\uD7FF\\uE000-\\uFFFD]*$" + "pattern":"[\\t\\n\\r -\\uD7FF\\uE000-\\uFFFD]*" }, "ForecastFrequency":{ "type":"string", "max":5, "min":1, - "pattern":"^1Y|Y|([1-9]|1[0-1])M|M|[1-4]W|W|[1-6]D|D|([1-9]|1[0-9]|2[0-3])H|H|([1-9]|[1-5][0-9])min$" + "pattern":"1Y|Y|([1-9]|1[0-1])M|M|[1-4]W|W|[1-6]D|D|([1-9]|1[0-9]|2[0-3])H|H|([1-9]|[1-5][0-9])min" }, "ForecastHorizon":{ "type":"integer", + "box":true, "min":1 }, "ForecastQuantile":{ @@ -21185,6 +21638,7 @@ }, "Gid":{ "type":"long", + "box":true, "max":4000000, "min":1001 }, @@ -21221,7 +21675,7 @@ "type":"string", "max":1024, "min":11, - "pattern":"^https://([^/]+)/?.{3,1016}$" + "pattern":"https://([^/]+)/?.{3,1016}" }, "Group":{ "type":"string", @@ -21275,7 +21729,8 @@ "HiddenSageMakerImageVersionAliasesList":{ "type":"list", "member":{"shape":"HiddenSageMakerImage"}, - "max":5 + "max":5, + "min":0 }, "HolidayConfig":{ "type":"list", @@ -21315,11 +21770,13 @@ "HubArn":{ "type":"string", "max":255, + "min":0, "pattern":".*" }, "HubContentArn":{ "type":"string", "max":255, + "min":0, "pattern":".*" }, "HubContentDependency":{ @@ -21339,21 +21796,25 @@ "HubContentDependencyList":{ "type":"list", "member":{"shape":"HubContentDependency"}, - "max":50 + "max":50, + "min":0 }, "HubContentDescription":{ "type":"string", "max":1023, + "min":0, "pattern":".*" }, "HubContentDisplayName":{ "type":"string", "max":255, + "min":0, "pattern":".*" }, "HubContentDocument":{ "type":"string", "max":65535, + "min":0, "pattern":".*" }, "HubContentInfo":{ @@ -21429,17 +21890,20 @@ }, "HubContentMarkdown":{ "type":"string", - "max":65535 + "max":65535, + "min":0 }, "HubContentName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "HubContentSearchKeywordList":{ "type":"list", "member":{"shape":"HubSearchKeyword"}, - "max":50 + "max":50, + "min":0 }, "HubContentSortBy":{ "type":"string", @@ -21479,16 +21943,18 @@ "type":"string", "max":14, "min":5, - "pattern":"^\\d{1,4}.\\d{1,4}.\\d{1,4}$" + "pattern":"\\d{1,4}.\\d{1,4}.\\d{1,4}" }, "HubDescription":{ "type":"string", "max":1023, + "min":0, "pattern":".*" }, "HubDisplayName":{ "type":"string", "max":255, + "min":0, "pattern":".*" }, "HubInfo":{ @@ -21543,11 +22009,12 @@ "HubName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "HubNameOrArn":{ "type":"string", - "pattern":"^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + "pattern":"(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "HubS3StorageConfig":{ "type":"structure", @@ -21562,12 +22029,14 @@ "HubSearchKeyword":{ "type":"string", "max":255, - "pattern":"^[^A-Z]*$" + "min":0, + "pattern":"[^A-Z]*" }, "HubSearchKeywordList":{ "type":"list", "member":{"shape":"HubSearchKeyword"}, - "max":50 + "max":50, + "min":0 }, "HubSortBy":{ "type":"string", @@ -21592,7 +22061,8 @@ }, "HumanLoopActivationConditions":{ "type":"string", - "max":10240 + "max":10240, + "min":0 }, "HumanLoopActivationConditionsConfig":{ "type":"structure", @@ -21739,13 +22209,14 @@ "HumanTaskUiArn":{ "type":"string", "max":1024, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]+:[0-9]{12}:human-task-ui/.*" }, "HumanTaskUiName":{ "type":"string", "max":63, "min":1, - "pattern":"^[a-z0-9](-*[a-z0-9])*" + "pattern":"[a-z0-9](-*[a-z0-9])*" }, "HumanTaskUiStatus":{ "type":"string", @@ -21804,6 +22275,7 @@ "HyperParameterKey":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "HyperParameterScalingType":{ @@ -21840,11 +22312,13 @@ }, "IsTunable":{ "shape":"Boolean", - "documentation":"

        Indicates whether this hyperparameter is tunable in a hyperparameter tuning job.

        " + "documentation":"

        Indicates whether this hyperparameter is tunable in a hyperparameter tuning job.

        ", + "box":true }, "IsRequired":{ "shape":"Boolean", - "documentation":"

        Indicates whether this hyperparameter is required.

        " + "documentation":"

        Indicates whether this hyperparameter is required.

        ", + "box":true }, "DefaultValue":{ "shape":"HyperParameterValue", @@ -21912,15 +22386,18 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

        Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If network isolation is used for training jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

        " + "documentation":"

        Isolates the training container. No inbound or outbound network calls can be made, except for calls between peers within a training cluster for distributed training. If network isolation is used for training jobs that are configured to use a VPC, SageMaker downloads and uploads customer data and model artifacts through the specified VPC, but the training container does not have network access.

        ", + "box":true }, "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", - "documentation":"

        To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training.

        " + "documentation":"

        To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training.

        ", + "box":true }, "EnableManagedSpotTraining":{ "shape":"Boolean", - "documentation":"

        A Boolean indicating whether managed spot training is enabled (True) or not (False).

        " + "documentation":"

        A Boolean indicating whether managed spot training is enabled (True) or not (False).

        ", + "box":true }, "CheckpointConfig":{"shape":"CheckpointConfig"}, "RetryStrategy":{ @@ -21938,7 +22415,7 @@ "type":"string", "max":64, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}" }, "HyperParameterTrainingJobDefinitions":{ "type":"list", @@ -21949,17 +22426,20 @@ "HyperParameterTrainingJobEnvironmentKey":{ "type":"string", "max":512, + "min":0, "pattern":"[a-zA-Z_][a-zA-Z0-9_]*" }, "HyperParameterTrainingJobEnvironmentMap":{ "type":"map", "key":{"shape":"HyperParameterTrainingJobEnvironmentKey"}, "value":{"shape":"HyperParameterTrainingJobEnvironmentValue"}, - "max":48 + "max":48, + "min":0 }, "HyperParameterTrainingJobEnvironmentValue":{ "type":"string", "max":512, + "min":0, "pattern":"[\\S\\s]*" }, "HyperParameterTrainingJobSummaries":{ @@ -22045,11 +22525,13 @@ }, "InstanceCount":{ "shape":"TrainingInstanceCount", - "documentation":"

        The number of instances of the type specified by InstanceType. Choose an instance count larger than 1 for distributed training algorithms. See Step 2: Launch a SageMaker Distributed Training Job Using the SageMaker Python SDK for more information.

        " + "documentation":"

        The number of instances of the type specified by InstanceType. Choose an instance count larger than 1 for distributed training algorithms. See Step 2: Launch a SageMaker Distributed Training Job Using the SageMaker Python SDK for more information.

        ", + "box":true }, "VolumeSizeInGB":{ "shape":"VolumeSizeInGB", - "documentation":"

        The volume size in GB of the data to be processed for hyperparameter optimization (optional).

        " + "documentation":"

        The volume size in GB of the data to be processed for hyperparameter optimization (optional).

        ", + "box":true } }, "documentation":"

        The configuration for hyperparameter tuning resources for use in training jobs launched by the tuning job. These resources include compute instances and storage volumes. Specify one or more compute instance configurations and allocation strategies to select resources (optional).

        " @@ -22063,6 +22545,7 @@ "HyperParameterTuningJobArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:hyper-parameter-tuning-job/.*" }, "HyperParameterTuningJobCompletionDetails":{ @@ -22070,7 +22553,8 @@ "members":{ "NumberOfTrainingJobsObjectiveNotImproving":{ "shape":"Integer", - "documentation":"

        The number of training jobs launched by a tuning job that are not improving (1% or less) as measured by model performance evaluated against an objective function.

        " + "documentation":"

        The number of training jobs launched by a tuning job that are not improving (1% or less) as measured by model performance evaluated against an objective function.

        ", + "box":true }, "ConvergenceDetectedTime":{ "shape":"Timestamp", @@ -22126,7 +22610,8 @@ "members":{ "RuntimeInSeconds":{ "shape":"Integer", - "documentation":"

        The wall clock runtime in seconds used by your hyperparameter tuning job.

        " + "documentation":"

        The wall clock runtime in seconds used by your hyperparameter tuning job.

        ", + "box":true } }, "documentation":"

        The total resources consumed by your hyperparameter tuning job.

        " @@ -22135,7 +22620,7 @@ "type":"string", "max":32, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,31}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,31}" }, "HyperParameterTuningJobObjective":{ "type":"structure", @@ -22349,6 +22834,7 @@ }, "HyperParameterTuningMaxRuntimeInSeconds":{ "type":"integer", + "box":true, "max":15768000, "min":120 }, @@ -22361,11 +22847,13 @@ }, "InstanceCount":{ "shape":"TrainingInstanceCount", - "documentation":"

        The number of compute instances of type InstanceType to use. For distributed training, select a value greater than 1.

        " + "documentation":"

        The number of compute instances of type InstanceType to use. For distributed training, select a value greater than 1.

        ", + "box":true }, "VolumeSizeInGB":{ "shape":"OptionalVolumeSizeInGB", - "documentation":"

        The volume size in GB for the storage volume to be used in processing hyperparameter optimization jobs (optional). These volumes store model artifacts, incremental states and optionally, scratch space for training algorithms. Do not provide a value for this parameter if a value for InstanceConfigs is also specified.

        Some instance types have a fixed total local storage size. If you select one of these instances for training, VolumeSizeInGB cannot be greater than this total size. For a list of instance types with local instance storage and their sizes, see instance store volumes.

        SageMaker supports only the General Purpose SSD (gp2) storage volume type.

        " + "documentation":"

        The volume size in GB for the storage volume to be used in processing hyperparameter optimization jobs (optional). These volumes store model artifacts, incremental states and optionally, scratch space for training algorithms. Do not provide a value for this parameter if a value for InstanceConfigs is also specified.

        Some instance types have a fixed total local storage size. If you select one of these instances for training, VolumeSizeInGB cannot be greater than this total size. For a list of instance types with local instance storage and their sizes, see instance store volumes.

        SageMaker supports only the General Purpose SSD (gp2) storage volume type.

        ", + "box":true }, "VolumeKmsKeyId":{ "shape":"KmsKeyId", @@ -22385,6 +22873,7 @@ "HyperParameterValue":{ "type":"string", "max":2500, + "min":0, "pattern":".*" }, "HyperParameters":{ @@ -22410,10 +22899,12 @@ }, "HyperbandStrategyMaxResource":{ "type":"integer", + "box":true, "min":1 }, "HyperbandStrategyMinResource":{ "type":"integer", + "box":true, "min":1 }, "IamIdentity":{ @@ -22474,7 +22965,8 @@ "IdentityProviderOAuthSettings":{ "type":"list", "member":{"shape":"IdentityProviderOAuthSetting"}, - "max":20 + "max":20, + "min":0 }, "IdleSettings":{ "type":"structure", @@ -22500,6 +22992,7 @@ }, "IdleTimeoutInMinutes":{ "type":"integer", + "box":true, "max":525600, "min":60 }, @@ -22551,7 +23044,8 @@ "ImageArn":{ "type":"string", "max":256, - "pattern":"^arn:aws(-[\\w]+)*:sagemaker:.+:[0-9]{12}:image/[a-zA-Z0-9]([-.]?[a-zA-Z0-9])*$" + "min":0, + "pattern":"arn:aws(-[\\w]+)*:sagemaker:.+:[0-9]{12}:image/[a-zA-Z0-9]([-.]?[a-zA-Z0-9])*" }, "ImageBaseImage":{ "type":"string", @@ -22598,7 +23092,8 @@ "ImageDeletePropertyList":{ "type":"list", "member":{"shape":"ImageDeleteProperty"}, - "max":2 + "max":2, + "min":0 }, "ImageDescription":{ "type":"string", @@ -22609,24 +23104,26 @@ "ImageDigest":{ "type":"string", "max":72, - "pattern":"^[Ss][Hh][Aa]256:[0-9a-fA-F]{64}$" + "min":0, + "pattern":"[Ss][Hh][Aa]256:[0-9a-fA-F]{64}" }, "ImageDisplayName":{ "type":"string", "max":128, "min":1, - "pattern":"^\\S(.*\\S)?$" + "pattern":"\\S(.*\\S)?" }, "ImageName":{ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9]([-.]?[a-zA-Z0-9]){0,62}$" + "pattern":"[a-zA-Z0-9]([-.]?[a-zA-Z0-9]){0,62}" }, "ImageNameContains":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9\\-.]+$" + "min":0, + "pattern":"[a-zA-Z0-9\\-.]+" }, "ImageSortBy":{ "type":"string", @@ -22658,6 +23155,7 @@ "ImageUri":{ "type":"string", "max":255, + "min":0, "pattern":".*" }, "ImageVersion":{ @@ -22712,15 +23210,17 @@ "type":"string", "max":128, "min":1, - "pattern":"^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)$" + "pattern":"(0|[1-9]\\d*)\\.(0|[1-9]\\d*)" }, "ImageVersionArn":{ "type":"string", "max":256, - "pattern":"^(arn:aws(-[\\w]+)*:sagemaker:.+:[0-9]{12}:image-version/[a-z0-9]([-.]?[a-z0-9])*/[0-9]+|None)$" + "min":0, + "pattern":"(arn:aws(-[\\w]+)*:sagemaker:.+:[0-9]{12}:image-version/[a-z0-9]([-.]?[a-z0-9])*/[0-9]+|None)" }, "ImageVersionNumber":{ "type":"integer", + "box":true, "min":0 }, "ImageVersionSortBy":{ @@ -22835,6 +23335,7 @@ }, "InUseInstanceCount":{ "type":"integer", + "box":true, "min":0 }, "InferenceComponentArn":{ @@ -22925,6 +23426,7 @@ }, "InferenceComponentCopyCount":{ "type":"integer", + "box":true, "min":0 }, "InferenceComponentDeploymentConfig":{ @@ -22942,11 +23444,13 @@ "InferenceComponentName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9]([\\-a-zA-Z0-9]*[a-zA-Z0-9])?$" + "min":0, + "pattern":"[a-zA-Z0-9]([\\-a-zA-Z0-9]*[a-zA-Z0-9])?" }, "InferenceComponentNameContains":{ "type":"string", "max":63, + "min":0, "pattern":"[a-zA-Z0-9-]+" }, "InferenceComponentRollingUpdatePolicy":{ @@ -23156,6 +23660,7 @@ "InferenceExperimentArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:inference-experiment/.*" }, "InferenceExperimentDataStorageConfig":{ @@ -23177,6 +23682,7 @@ "InferenceExperimentDescription":{ "type":"string", "max":1024, + "min":0, "pattern":".*" }, "InferenceExperimentList":{ @@ -23187,7 +23693,7 @@ "type":"string", "max":120, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,119}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,119}" }, "InferenceExperimentSchedule":{ "type":"structure", @@ -23219,6 +23725,7 @@ "InferenceExperimentStatusReason":{ "type":"string", "max":1024, + "min":0, "pattern":".*" }, "InferenceExperimentStopDesiredState":{ @@ -23298,7 +23805,8 @@ }, "InferenceImage":{ "type":"string", - "max":256 + "max":256, + "min":0 }, "InferenceMetrics":{ "type":"structure", @@ -23309,11 +23817,13 @@ "members":{ "MaxInvocations":{ "shape":"Integer", - "documentation":"

        The expected maximum number of requests per minute for the instance.

        " + "documentation":"

        The expected maximum number of requests per minute for the instance.

        ", + "box":true }, "ModelLatency":{ "shape":"Integer", - "documentation":"

        The expected model latency at maximum invocations per minute for the instance.

        " + "documentation":"

        The expected model latency at maximum invocations per minute for the instance.

        ", + "box":true } }, "documentation":"

        The metrics for an existing endpoint compared in an Inference Recommender job.

        " @@ -23492,7 +24002,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "InfraCheckConfig":{ "type":"structure", @@ -23506,14 +24016,17 @@ }, "InitialInstanceCount":{ "type":"integer", + "box":true, "min":1 }, "InitialNumberOfUsers":{ "type":"integer", + "box":true, "min":1 }, "InitialTaskCount":{ "type":"integer", + "box":true, "min":1 }, "InputConfig":{ @@ -23562,6 +24075,7 @@ }, "InstanceCount":{ "type":"integer", + "box":true, "min":1 }, "InstanceGroup":{ @@ -23578,7 +24092,8 @@ }, "InstanceCount":{ "shape":"TrainingInstanceCount", - "documentation":"

        Specifies the number of instances of the instance group.

        " + "documentation":"

        Specifies the number of instances of the instance group.

        ", + "box":true }, "InstanceGroupName":{ "shape":"InstanceGroupName", @@ -23596,7 +24111,8 @@ "InstanceGroupNames":{ "type":"list", "member":{"shape":"InstanceGroupName"}, - "max":5 + "max":5, + "min":0 }, "InstanceGroupStatus":{ "type":"string", @@ -23618,7 +24134,8 @@ "InstanceGroups":{ "type":"list", "member":{"shape":"InstanceGroup"}, - "max":5 + "max":5, + "min":0 }, "InstanceMetadataServiceConfiguration":{ "type":"structure", @@ -23864,17 +24381,19 @@ "InvocationStartTime":{"type":"timestamp"}, "InvocationsMaxRetries":{ "type":"integer", + "box":true, "max":3, "min":0 }, "InvocationsTimeoutInSeconds":{ "type":"integer", + "box":true, "max":3600, "min":1 }, "IotRoleAlias":{ "type":"string", - "pattern":"^arn:aws[a-z\\-]*:iam::\\d{12}:rolealias/?[a-zA-Z_0-9+=,.@\\-_/]+$" + "pattern":"arn:aws[a-z\\-]*:iam::\\d{12}:rolealias/?[a-zA-Z_0-9+=,.@\\-_/]+" }, "IsTrackingServerActive":{ "type":"string", @@ -23890,6 +24409,7 @@ }, "JobDurationInSeconds":{ "type":"integer", + "box":true, "min":1 }, "JobReferenceCode":{ @@ -23922,7 +24442,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*\\/[a-zA-Z0-9](-*[a-zA-Z0-9.])*" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*\\/[a-zA-Z0-9](-*[a-zA-Z0-9.])*" }, "JsonContentTypes":{ "type":"list", @@ -23994,7 +24514,8 @@ }, "KeepAlivePeriodInSeconds":{ "type":"integer", - "documentation":"Optional. Customer requested period in seconds for which the Training cluster is kept alive after the job is finished.", + "documentation":"

        Optional. Customer requested period in seconds for which the Training cluster is kept alive after the job is finished.

        ", + "box":true, "max":3600, "min":0 }, @@ -24010,7 +24531,8 @@ }, "KernelDisplayName":{ "type":"string", - "max":1024 + "max":1024, + "min":0 }, "KernelGatewayAppSettings":{ "type":"structure", @@ -24047,7 +24569,8 @@ }, "KernelName":{ "type":"string", - "max":1024 + "max":1024, + "min":0 }, "KernelSpec":{ "type":"structure", @@ -24079,13 +24602,14 @@ "KmsKeyId":{ "type":"string", "max":2048, - "pattern":"^[a-zA-Z0-9:/_-]*$" + "min":0, + "pattern":"[a-zA-Z0-9:/_-]*" }, "LabelAttributeName":{ "type":"string", "max":127, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,126}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,126}" }, "LabelCounter":{ "type":"integer", @@ -24096,23 +24620,28 @@ "members":{ "TotalLabeled":{ "shape":"LabelCounter", - "documentation":"

        The total number of objects labeled.

        " + "documentation":"

        The total number of objects labeled.

        ", + "box":true }, "HumanLabeled":{ "shape":"LabelCounter", - "documentation":"

        The total number of objects labeled by a human worker.

        " + "documentation":"

        The total number of objects labeled by a human worker.

        ", + "box":true }, "MachineLabeled":{ "shape":"LabelCounter", - "documentation":"

        The total number of objects labeled by automated data labeling.

        " + "documentation":"

        The total number of objects labeled by automated data labeling.

        ", + "box":true }, "FailedNonRetryableError":{ "shape":"LabelCounter", - "documentation":"

        The total number of objects that could not be labeled due to an error.

        " + "documentation":"

        The total number of objects that could not be labeled due to an error.

        ", + "box":true }, "Unlabeled":{ "shape":"LabelCounter", - "documentation":"

        The total number of objects not yet labeled.

        " + "documentation":"

        The total number of objects not yet labeled.

        ", + "box":true } }, "documentation":"

        Provides a breakdown of the number of objects labeled.

        " @@ -24122,15 +24651,18 @@ "members":{ "HumanLabeled":{ "shape":"LabelCounter", - "documentation":"

        The total number of data objects labeled by a human worker.

        " + "documentation":"

        The total number of data objects labeled by a human worker.

        ", + "box":true }, "PendingHuman":{ "shape":"LabelCounter", - "documentation":"

        The total number of data objects that need to be labeled by a human worker.

        " + "documentation":"

        The total number of data objects that need to be labeled by a human worker.

        ", + "box":true }, "Total":{ "shape":"LabelCounter", - "documentation":"

        The total number of tasks in the labeling job.

        " + "documentation":"

        The total number of tasks in the labeling job.

        ", + "box":true } }, "documentation":"

        Provides counts for human-labeled tasks in the labeling job.

        " @@ -24138,6 +24670,7 @@ "LabelingJobAlgorithmSpecificationArn":{ "type":"string", "max":2048, + "min":0, "pattern":"arn:.*" }, "LabelingJobAlgorithmsConfig":{ @@ -24162,6 +24695,7 @@ "LabelingJobArn":{ "type":"string", "max":2048, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:labeling-job/.*" }, "LabelingJobDataAttributes":{ @@ -24246,7 +24780,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "LabelingJobOutput":{ "type":"structure", @@ -24410,6 +24944,7 @@ "LambdaFunctionArn":{ "type":"string", "max":2048, + "min":0, "pattern":"arn:aws[a-z\\-]*:lambda:[a-z0-9\\-]*:[0-9]{12}:function:.*" }, "LambdaStepMetadata":{ @@ -24428,7 +24963,8 @@ }, "LandingUri":{ "type":"string", - "max":1023 + "max":1023, + "min":0 }, "LastModifiedTime":{"type":"timestamp"}, "LastUpdateStatus":{ @@ -24469,11 +25005,13 @@ "type":"map", "key":{"shape":"StringParameterValue"}, "value":{"shape":"StringParameterValue"}, - "max":30 + "max":30, + "min":0 }, "LineageGroupArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:lineage-group/.*" }, "LineageGroupNameOrArn":{ @@ -25097,7 +25635,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        Set the maximum number of SageMaker HyperPod clusters to list.

        " + "documentation":"

        Specifies the maximum number of clusters to evaluate for the operation (not necessarily the number of matching items). After SageMaker processes the number of clusters up to MaxResults, it stops the operation and returns the matching clusters up to that point. If all the matching clusters are desired, SageMaker will go through all the clusters until NextToken is empty.

        " }, "NameContains":{ "shape":"NameContains", @@ -25199,8 +25737,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The maximum number of model compilation jobs to return in the response.

        ", - "box":true + "documentation":"

        The maximum number of model compilation jobs to return in the response.

        " }, "CreationTimeAfter":{ "shape":"CreationTime", @@ -25910,8 +26447,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The total number of items to return. If the total number of available items is more than the value specified in MaxResults, then a NextToken will be provided in the output that you can use to resume pagination.

        ", - "box":true + "documentation":"

        The total number of items to return. If the total number of available items is more than the value specified in MaxResults, then a NextToken will be provided in the output that you can use to resume pagination.

        " } } }, @@ -26136,8 +26672,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The total number of items to return. If the total number of available items is more than the value specified in MaxResults, then a NextToken will be provided in the output that you can use to resume pagination.

        ", - "box":true + "documentation":"

        The total number of items to return. If the total number of available items is more than the value specified in MaxResults, then a NextToken will be provided in the output that you can use to resume pagination.

        " } } }, @@ -26164,8 +26699,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The maximum number of tuning jobs to return. The default value is 10.

        ", - "box":true + "documentation":"

        The maximum number of tuning jobs to return. The default value is 10.

        " }, "SortBy":{ "shape":"HyperParameterTuningJobSortByOptions", @@ -26842,7 +27376,8 @@ }, "ModelCardVersion":{ "shape":"Integer", - "documentation":"

        List export jobs for the model card with the specified version.

        " + "documentation":"

        List export jobs for the model card with the specified version.

        ", + "box":true }, "CreationTimeAfter":{ "shape":"Timestamp", @@ -27653,8 +28188,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The maximum number of optimization jobs to return in the response. The default is 50.

        ", - "box":true + "documentation":"

        The maximum number of optimization jobs to return in the response. The default is 50.

        " }, "CreationTimeAfter":{ "shape":"CreationTime", @@ -27941,8 +28475,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The maximum number of processing jobs to return in the response.

        ", - "box":true + "documentation":"

        The maximum number of processing jobs to return in the response.

        " } } }, @@ -28117,7 +28650,8 @@ }, "ExcludeDevicesDeployedInOtherStage":{ "shape":"Boolean", - "documentation":"

        Toggle for excluding devices deployed in other stages.

        " + "documentation":"

        Toggle for excluding devices deployed in other stages.

        ", + "box":true }, "StageName":{ "shape":"EntityName", @@ -28210,8 +28744,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The maximum number of work teams to return in each page of the response.

        ", - "box":true + "documentation":"

        The maximum number of work teams to return in each page of the response.

        " } } }, @@ -28249,6 +28782,7 @@ }, "ListTagsMaxResults":{ "type":"integer", + "box":true, "min":50 }, "ListTagsOutput":{ @@ -28317,8 +28851,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The maximum number of training jobs to return in the response.

        ", - "box":true + "documentation":"

        The maximum number of training jobs to return in the response.

        " }, "CreationTimeAfter":{ "shape":"Timestamp", @@ -28385,8 +28918,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The maximum number of results to return in the response.

        ", - "box":true + "documentation":"

        The maximum number of results to return in the response.

        " }, "StartTimeAfter":{ "shape":"Timestamp", @@ -28465,8 +28997,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The maximum number of transform jobs to return in the response. The default value is 10.

        ", - "box":true + "documentation":"

        The maximum number of transform jobs to return in the response. The default value is 10.

        " } } }, @@ -28655,8 +29186,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The maximum number of workforces returned in the response.

        ", - "box":true + "documentation":"

        The maximum number of workforces returned in the response.

        " } } }, @@ -28702,8 +29232,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The maximum number of work teams to return in each page of the response.

        ", - "box":true + "documentation":"

        The maximum number of work teams to return in each page of the response.

        " } } }, @@ -28733,14 +29262,16 @@ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z]+ ?\\d+\\.\\d+(\\.\\d+)?$" + "pattern":"[a-zA-Z]+ ?\\d+\\.\\d+(\\.\\d+)?" }, "ManagedInstanceScalingMaxInstanceCount":{ "type":"integer", + "box":true, "min":1 }, "ManagedInstanceScalingMinInstanceCount":{ "type":"integer", + "box":true, "min":0 }, "ManagedInstanceScalingStatus":{ @@ -28752,41 +29283,50 @@ }, "MaxAutoMLJobRuntimeInSeconds":{ "type":"integer", + "box":true, "min":1 }, "MaxCandidates":{ "type":"integer", + "box":true, "max":750, "min":1 }, "MaxConcurrentInvocationsPerInstance":{ "type":"integer", + "box":true, "max":1000, "min":1 }, "MaxConcurrentTaskCount":{ "type":"integer", + "box":true, "max":5000, "min":1 }, "MaxConcurrentTransforms":{ "type":"integer", + "box":true, "min":0 }, "MaxHumanLabeledObjectCount":{ "type":"integer", + "box":true, "min":1 }, "MaxNumberOfTests":{ "type":"integer", + "box":true, "min":1 }, "MaxNumberOfTrainingJobs":{ "type":"integer", + "box":true, "min":1 }, "MaxNumberOfTrainingJobsNotImproving":{ "type":"integer", + "box":true, "min":3 }, "MaxParallelExecutionSteps":{ @@ -28795,6 +29335,7 @@ }, "MaxParallelOfTests":{ "type":"integer", + "box":true, "min":1 }, "MaxParallelTrainingJobs":{ @@ -28803,21 +29344,25 @@ }, "MaxPayloadInMB":{ "type":"integer", + "box":true, "min":0 }, "MaxPendingTimeInSeconds":{ "type":"integer", - "documentation":"Maximum job scheduler pending time in seconds.", + "documentation":"

        Maximum job scheduler pending time in seconds.

        ", + "box":true, "max":2419200, "min":7200 }, "MaxPercentageOfInputDatasetLabeled":{ "type":"integer", + "box":true, "max":100, "min":1 }, "MaxResults":{ "type":"integer", + "box":true, "max":100, "min":1 }, @@ -28827,14 +29372,17 @@ }, "MaxRuntimePerTrainingJobInSeconds":{ "type":"integer", + "box":true, "min":1 }, "MaxWaitTimeInSeconds":{ "type":"integer", + "box":true, "min":1 }, "MaximumExecutionTimeoutInSeconds":{ "type":"integer", + "box":true, "max":28800, "min":600 }, @@ -28846,7 +29394,8 @@ "MediaType":{ "type":"string", "max":64, - "pattern":"^[-\\w]+\\/[-\\w+]+$" + "min":0, + "pattern":"[-\\w]+\\/[-\\w+]+" }, "MemberDefinition":{ "type":"structure", @@ -28870,6 +29419,7 @@ }, "MemoryInMb":{ "type":"integer", + "box":true, "min":128 }, "MetadataProperties":{ @@ -28897,6 +29447,7 @@ "MetadataPropertyValue":{ "type":"string", "max":1024, + "min":0, "pattern":".*" }, "MetricData":{ @@ -28908,7 +29459,8 @@ }, "Value":{ "shape":"Float", - "documentation":"

        The value of the metric.

        " + "documentation":"

        The value of the metric.

        ", + "box":true }, "Timestamp":{ "shape":"Timestamp", @@ -28930,17 +29482,18 @@ "shape":"AutoMLMetricEnum", "documentation":"

        The name of the metric.

        " }, + "StandardMetricName":{ + "shape":"AutoMLMetricExtendedEnum", + "documentation":"

        The name of the standard metric.

        For definitions of the standard metrics, see Autopilot candidate metrics .

        " + }, "Value":{ "shape":"Float", - "documentation":"

        The value of the metric.

        " + "documentation":"

        The value of the metric.

        ", + "box":true }, "Set":{ "shape":"MetricSetSource", "documentation":"

        The dataset split from which the AutoML job produced the metric.

        " - }, - "StandardMetricName":{ - "shape":"AutoMLMetricExtendedEnum", - "documentation":"

        The name of the standard metric.

        For definitions of the standard metrics, see Autopilot candidate metrics .

        " } }, "documentation":"

        Information about the metric for a candidate produced by an AutoML job.

        " @@ -29030,8 +29583,15 @@ "MinimumInstanceMetadataServiceVersion":{ "type":"string", "max":1, + "min":0, "pattern":"1|2" }, + "MlReservationArn":{ + "type":"string", + "max":258, + "min":20, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:ml-reservation/.*" + }, "MlTools":{ "type":"string", "enum":[ @@ -29060,7 +29620,8 @@ "MlflowVersion":{ "type":"string", "max":16, - "pattern":"^[0-9]*.[0-9]*.[0-9]*" + "min":0, + "pattern":"[0-9]*.[0-9]*.[0-9]*" }, "Model":{ "type":"structure", @@ -29090,7 +29651,8 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

        Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

        " + "documentation":"

        Isolates the model container. No inbound or outbound network calls can be made to or from the model container.

        ", + "box":true }, "Tags":{ "shape":"TagList", @@ -29109,7 +29671,8 @@ "members":{ "AcceptEula":{ "shape":"AcceptEula", - "documentation":"

        Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as True in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

        " + "documentation":"

        Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as True in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

        ", + "box":true } }, "documentation":"

        The access configuration file to control access to the ML model. You can explicitly accept the model end-user license agreement (EULA) within the ModelAccessConfig.

        " @@ -29208,7 +29771,8 @@ }, "ModelCardVersion":{ "shape":"Integer", - "documentation":"

        The version of the model card.

        " + "documentation":"

        The version of the model card.

        ", + "box":true }, "Content":{ "shape":"ModelCardContent", @@ -29254,7 +29818,8 @@ "ModelCardArn":{ "type":"string", "max":256, - "pattern":"^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-card/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + "min":0, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-card/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "ModelCardContent":{ "type":"string", @@ -29277,11 +29842,12 @@ "ModelCardExportJobArn":{ "type":"string", "max":256, - "pattern":"^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-card/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}/export-job/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + "min":0, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-card/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}/export-job/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "ModelCardExportJobSortBy":{ "type":"string", - "documentation":"Attribute by which to sort returned export jobs.", + "documentation":"

        Attribute by which to sort returned export jobs.

        ", "enum":[ "Name", "CreationTime", @@ -29333,7 +29899,8 @@ }, "ModelCardVersion":{ "shape":"Integer", - "documentation":"

        The version of the model card that the export job exports.

        " + "documentation":"

        The version of the model card that the export job exports.

        ", + "box":true }, "CreatedAt":{ "shape":"Timestamp", @@ -29475,7 +30042,8 @@ }, "ModelCardVersion":{ "shape":"Integer", - "documentation":"

        A version of the model card.

        " + "documentation":"

        A version of the model card.

        ", + "box":true }, "CreationTime":{ "shape":"Timestamp", @@ -29587,7 +30155,8 @@ "members":{ "Enabled":{ "shape":"Boolean", - "documentation":"

        Indicates whether the alert action is turned on.

        " + "documentation":"

        Indicates whether the alert action is turned on.

        ", + "box":true } }, "documentation":"

        An alert action taken to light up an icon on the Amazon SageMaker Model Dashboard when an alert goes into InAlert status.

        " @@ -29628,7 +30197,8 @@ }, "ModelCardVersion":{ "shape":"Integer", - "documentation":"

        The model card version.

        " + "documentation":"

        The model card version.

        ", + "box":true }, "ModelCardStatus":{ "shape":"ModelCardStatus", @@ -29741,7 +30311,8 @@ "members":{ "AutoGenerateEndpointName":{ "shape":"AutoGenerateEndpointName", - "documentation":"

        Set to True to automatically generate an endpoint name for a one-click Autopilot model deployment; set to False otherwise. The default value is False.

        If you set AutoGenerateEndpointName to True, do not specify the EndpointName; otherwise a 400 error is thrown.

        " + "documentation":"

        Set to True to automatically generate an endpoint name for a one-click Autopilot model deployment; set to False otherwise. The default value is False.

        If you set AutoGenerateEndpointName to True, do not specify the EndpointName; otherwise a 400 error is thrown.

        ", + "box":true }, "EndpointName":{ "shape":"EndpointName", @@ -29860,7 +30431,8 @@ }, "ValueInMilliseconds":{ "shape":"Integer", - "documentation":"

        The model latency percentile value in milliseconds.

        " + "documentation":"

        The model latency percentile value in milliseconds.

        ", + "box":true } }, "documentation":"

        The model latency threshold.

        " @@ -29998,11 +30570,13 @@ "ModelName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9]([\\-a-zA-Z0-9]*[a-zA-Z0-9])?" + "min":0, + "pattern":"[a-zA-Z0-9]([\\-a-zA-Z0-9]*[a-zA-Z0-9])?" }, "ModelNameContains":{ "type":"string", "max":63, + "min":0, "pattern":"[a-zA-Z0-9-]+" }, "ModelPackage":{ @@ -30054,7 +30628,8 @@ }, "CertifyForMarketplace":{ "shape":"CertifyForMarketplace", - "documentation":"

        Whether the model package is to be certified to be listed on Amazon Web Services Marketplace. For information about listing model packages on Amazon Web Services Marketplace, see List Your Algorithm or Model Package on Amazon Web Services Marketplace.

        " + "documentation":"

        Whether the model package is to be certified to be listed on Amazon Web Services Marketplace. For information about listing model packages on Amazon Web Services Marketplace, see List Your Algorithm or Model Package on Amazon Web Services Marketplace.

        ", + "box":true }, "ModelApprovalStatus":{ "shape":"ModelApprovalStatus", @@ -30133,7 +30708,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"^arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-package/[\\S]{1,2048}$" + "pattern":"arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-package/[\\S]{1,2048}" }, "ModelPackageArnList":{ "type":"list", @@ -30247,7 +30822,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"^arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-package-group/[\\S]{1,2048}$" + "pattern":"arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-package-group/[\\S]{1,2048}" }, "ModelPackageGroupSortBy":{ "type":"string", @@ -30497,6 +31072,7 @@ }, "ModelPackageVersion":{ "type":"integer", + "box":true, "min":1 }, "ModelQuality":{ @@ -30605,6 +31181,7 @@ }, "ModelSetupTime":{ "type":"integer", + "box":true, "min":0 }, "ModelShardingConfig":{ @@ -30744,7 +31321,8 @@ "ModelVariantName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9]([\\-a-zA-Z0-9]*[a-zA-Z0-9])?" + "min":0, + "pattern":"[a-zA-Z0-9]([\\-a-zA-Z0-9]*[a-zA-Z0-9])?" }, "ModelVariantStatus":{ "type":"string", @@ -30809,7 +31387,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "MonitoringAlertStatus":{ "type":"string", @@ -30960,13 +31538,15 @@ "members":{ "Header":{ "shape":"Boolean", - "documentation":"

        Indicates if the CSV data has a header.

        " + "documentation":"

        Indicates if the CSV data has a header.

        ", + "box":true } }, "documentation":"

        Represents the CSV dataset format used when running a monitoring job.

        " }, "MonitoringDatapointsToAlert":{ "type":"integer", + "box":true, "max":100, "min":1 }, @@ -30992,10 +31572,12 @@ "type":"map", "key":{"shape":"ProcessingEnvironmentKey"}, "value":{"shape":"ProcessingEnvironmentValue"}, - "max":50 + "max":50, + "min":0 }, "MonitoringEvaluationPeriod":{ "type":"integer", + "box":true, "max":100, "min":1 }, @@ -31146,13 +31728,14 @@ "MonitoringJobDefinitionArn":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "MonitoringJobDefinitionName":{ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "MonitoringJobDefinitionSortKey":{ "type":"string", @@ -31198,7 +31781,8 @@ "members":{ "Line":{ "shape":"Boolean", - "documentation":"

        Indicates if the file should be read as a JSON object per line.

        " + "documentation":"

        Indicates if the file should be read as a JSON object per line.

        ", + "box":true } }, "documentation":"

        Represents the JSON dataset format used when running a monitoring job.

        " @@ -31213,11 +31797,13 @@ "members":{ "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", - "documentation":"

        Whether to encrypt all communications between the instances used for the monitoring jobs. Choose True to encrypt communications. Encryption provides greater security for distributed jobs, but the processing might take longer.

        " + "documentation":"

        Whether to encrypt all communications between the instances used for the monitoring jobs. Choose True to encrypt communications. Encryption provides greater security for distributed jobs, but the processing might take longer.

        ", + "box":true }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

        Whether to allow inbound and outbound network calls to and from the containers used for the monitoring job.

        " + "documentation":"

        Whether to allow inbound and outbound network calls to and from the containers used for the monitoring job.

        ", + "box":true }, "VpcConfig":{"shape":"VpcConfig"} }, @@ -31304,7 +31890,8 @@ "MonitoringS3Uri":{ "type":"string", "max":512, - "pattern":"^(https|s3)://([^/]+)/?(.*)$" + "min":0, + "pattern":"(https|s3)://([^/]+)/?(.*)" }, "MonitoringSchedule":{ "type":"structure", @@ -31353,6 +31940,7 @@ "MonitoringScheduleArn":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "MonitoringScheduleConfig":{ @@ -31385,7 +31973,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "MonitoringScheduleSortKey":{ "type":"string", @@ -31460,7 +32048,8 @@ "members":{ "MaxRuntimeInSeconds":{ "shape":"MonitoringMaxRuntimeInSeconds", - "documentation":"

        The maximum runtime allowed in seconds.

        The MaxRuntimeInSeconds cannot exceed the frequency of the job. For data quality and model explainability, this can be up to 3600 seconds for an hourly schedule. For model bias and model quality hourly schedules, this can be up to 1800 seconds.

        " + "documentation":"

        The maximum runtime allowed in seconds.

        The MaxRuntimeInSeconds cannot exceed the frequency of the job. For data quality and model explainability, this can be up to 3600 seconds for an hourly schedule. For model bias and model quality hourly schedules, this can be up to 1800 seconds.

        ", + "box":true } }, "documentation":"

        A time limit for how long the monitoring job is allowed to run before stopping.

        " @@ -31469,7 +32058,7 @@ "type":"string", "max":15, "min":1, - "pattern":"^.?P.*" + "pattern":".?P.*" }, "MonitoringType":{ "type":"string", @@ -31483,7 +32072,8 @@ "MountPath":{ "type":"string", "max":1024, - "pattern":"^\\/.*" + "min":0, + "pattern":"\\/.*" }, "MultiModelConfig":{ "type":"structure", @@ -31498,6 +32088,7 @@ "NameContains":{ "type":"string", "max":63, + "min":0, "pattern":"[a-zA-Z0-9\\-]+" }, "NeoVpcConfig":{ @@ -31521,6 +32112,7 @@ "NeoVpcSecurityGroupId":{ "type":"string", "max":32, + "min":0, "pattern":"[-0-9a-zA-Z]+" }, "NeoVpcSecurityGroupIds":{ @@ -31532,6 +32124,7 @@ "NeoVpcSubnetId":{ "type":"string", "max":32, + "min":0, "pattern":"[-0-9a-zA-Z]+" }, "NeoVpcSubnets":{ @@ -31569,11 +32162,13 @@ "members":{ "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", - "documentation":"

        Whether to encrypt all communications between distributed processing jobs. Choose True to encrypt communications. Encryption provides greater security for distributed processing jobs, but the processing might take longer.

        " + "documentation":"

        Whether to encrypt all communications between distributed processing jobs. Choose True to encrypt communications. Encryption provides greater security for distributed processing jobs, but the processing might take longer.

        ", + "box":true }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

        Whether to allow inbound and outbound network calls to and from the containers used for the processing job.

        " + "documentation":"

        Whether to allow inbound and outbound network calls to and from the containers used for the processing job.

        ", + "box":true }, "VpcConfig":{"shape":"VpcConfig"} }, @@ -31583,6 +32178,7 @@ "NextToken":{ "type":"string", "max":8192, + "min":0, "pattern":".*" }, "NodeUnavailabilityType":{ @@ -31594,17 +32190,20 @@ }, "NodeUnavailabilityValue":{ "type":"integer", + "box":true, "min":1 }, "NonEmptyString256":{ "type":"string", "max":256, - "pattern":"^(?!\\s*$).+" + "min":0, + "pattern":"(?!\\s*$).+" }, "NonEmptyString64":{ "type":"string", "max":64, - "pattern":"^(?!\\s*$).+" + "min":0, + "pattern":"(?!\\s*$).+" }, "NotebookInstanceAcceleratorType":{ "type":"string", @@ -31623,11 +32222,13 @@ }, "NotebookInstanceArn":{ "type":"string", - "max":256 + "max":256, + "min":0 }, "NotebookInstanceLifecycleConfigArn":{ "type":"string", - "max":256 + "max":256, + "min":0 }, "NotebookInstanceLifecycleConfigContent":{ "type":"string", @@ -31638,16 +32239,19 @@ "NotebookInstanceLifecycleConfigList":{ "type":"list", "member":{"shape":"NotebookInstanceLifecycleHook"}, - "max":1 + "max":1, + "min":0 }, "NotebookInstanceLifecycleConfigName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, "NotebookInstanceLifecycleConfigNameContains":{ "type":"string", "max":63, + "min":0, "pattern":"[a-zA-Z0-9-]+" }, "NotebookInstanceLifecycleConfigSortKey":{ @@ -31708,11 +32312,13 @@ "NotebookInstanceName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, "NotebookInstanceNameContains":{ "type":"string", "max":63, + "min":0, "pattern":"[a-zA-Z0-9-]+" }, "NotebookInstanceSortKey":{ @@ -31799,6 +32405,7 @@ "NotebookInstanceUrl":{"type":"string"}, "NotebookInstanceVolumeSizeInGB":{ "type":"integer", + "box":true, "max":16384, "min":5 }, @@ -31825,19 +32432,23 @@ }, "NumberOfAcceleratorDevices":{ "type":"float", + "box":true, "min":1 }, "NumberOfCpuCores":{ "type":"float", + "box":true, "min":0.25 }, "NumberOfHumanWorkersPerDataObject":{ "type":"integer", + "box":true, "max":9, "min":1 }, "NumberOfSteps":{ "type":"integer", + "box":true, "min":1 }, "ObjectiveStatus":{ @@ -31857,15 +32468,18 @@ "members":{ "Succeeded":{ "shape":"ObjectiveStatusCounter", - "documentation":"

        The number of training jobs whose final objective metric was evaluated by the hyperparameter tuning job and used in the hyperparameter tuning process.

        " + "documentation":"

        The number of training jobs whose final objective metric was evaluated by the hyperparameter tuning job and used in the hyperparameter tuning process.

        ", + "box":true }, "Pending":{ "shape":"ObjectiveStatusCounter", - "documentation":"

        The number of training jobs that are in progress and pending evaluation of their final objective metric.

        " + "documentation":"

        The number of training jobs that are in progress and pending evaluation of their final objective metric.

        ", + "box":true }, "Failed":{ "shape":"ObjectiveStatusCounter", - "documentation":"

        The number of training jobs whose final objective metric was not evaluated and used in the hyperparameter tuning process. This typically occurs when the training job failed or did not emit an objective metric.

        " + "documentation":"

        The number of training jobs whose final objective metric was not evaluated and used in the hyperparameter tuning process. This typically occurs when the training job failed or did not emit an objective metric.

        ", + "box":true } }, "documentation":"

        Specifies the number of training jobs that this hyperparameter tuning job launched, categorized by the status of their objective metric. The objective metric status shows whether the final objective metric for the training job has been evaluated by the tuning job and used in the hyperparameter tuning process.

        " @@ -31880,7 +32494,8 @@ }, "DisableGlueTableCreation":{ "shape":"Boolean", - "documentation":"

        Set to True to disable the automatic creation of an Amazon Web Services Glue table when configuring an OfflineStore. If set to False, Feature Store will name the OfflineStore Glue table following Athena's naming recommendations.

        The default value is False.

        " + "documentation":"

        Set to True to disable the automatic creation of an Amazon Web Services Glue table when configuring an OfflineStore. If set to False, Feature Store will name the OfflineStore Glue table following Athena's naming recommendations.

        The default value is False.

        ", + "box":true }, "DataCatalogConfig":{ "shape":"DataCatalogConfig", @@ -32017,6 +32632,7 @@ "OidcEndpoint":{ "type":"string", "max":500, + "min":0, "pattern":"https://\\S+" }, "OidcMemberDefinition":{ @@ -32044,7 +32660,8 @@ }, "EnableOnlineStore":{ "shape":"Boolean", - "documentation":"

        Turn OnlineStore off by specifying False for the EnableOnlineStore flag. Turn OnlineStore on by specifying True for the EnableOnlineStore flag.

        The default value is False.

        " + "documentation":"

        Turn OnlineStore off by specifying False for the EnableOnlineStore flag. Turn OnlineStore on by specifying True for the EnableOnlineStore flag.

        The default value is False.

        ", + "box":true }, "TtlDuration":{ "shape":"TtlDuration", @@ -32077,7 +32694,10 @@ }, "documentation":"

        The security configuration for OnlineStore.

        " }, - "OnlineStoreTotalSizeBytes":{"type":"long"}, + "OnlineStoreTotalSizeBytes":{ + "type":"long", + "box":true + }, "Operator":{ "type":"string", "enum":[ @@ -32115,16 +32735,19 @@ "OptimizationConfigs":{ "type":"list", "member":{"shape":"OptimizationConfig"}, - "max":10 + "max":10, + "min":0 }, "OptimizationContainerImage":{ "type":"string", "max":255, + "min":0, "pattern":"[\\S]+" }, "OptimizationJobArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:optimization-job/.*" }, "OptimizationJobDeploymentInstanceType":{ @@ -32170,7 +32793,8 @@ "type":"map", "key":{"shape":"NonEmptyString256"}, "value":{"shape":"String256"}, - "max":25 + "max":25, + "min":0 }, "OptimizationJobModelSource":{ "type":"structure", @@ -32283,7 +32907,8 @@ "members":{ "AcceptEula":{ "shape":"OptimizationModelAcceptEula", - "documentation":"

        Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as True in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

        " + "documentation":"

        Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as True in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

        ", + "box":true } }, "documentation":"

        The access configuration settings for the source ML model for an optimization job, where you can accept the model end-user license agreement (EULA).

        " @@ -32324,6 +32949,7 @@ "OptimizationVpcSecurityGroupId":{ "type":"string", "max":32, + "min":0, "pattern":"[-0-9a-zA-Z]+" }, "OptimizationVpcSecurityGroupIds":{ @@ -32335,6 +32961,7 @@ "OptimizationVpcSubnetId":{ "type":"string", "max":32, + "min":0, "pattern":"[-0-9a-zA-Z]+" }, "OptimizationVpcSubnets":{ @@ -32343,8 +32970,14 @@ "max":16, "min":1 }, - "OptionalDouble":{"type":"double"}, - "OptionalInteger":{"type":"integer"}, + "OptionalDouble":{ + "type":"double", + "box":true + }, + "OptionalInteger":{ + "type":"integer", + "box":true + }, "OptionalVolumeSizeInGB":{ "type":"integer", "min":0 @@ -32457,6 +33090,7 @@ "PaginationToken":{ "type":"string", "max":8192, + "min":0, "pattern":".*" }, "ParallelismConfiguration":{ @@ -32465,7 +33099,8 @@ "members":{ "MaxParallelExecutionSteps":{ "shape":"MaxParallelExecutionSteps", - "documentation":"

        The max number of steps that can be executed in parallel.

        " + "documentation":"

        The max number of steps that can be executed in parallel.

        ", + "box":true } }, "documentation":"

        Configuration that controls the parallelism of the pipeline. By default, the parallelism configuration specified applies to all executions of the pipeline unless overridden.

        " @@ -32491,6 +33126,7 @@ "ParameterKey":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "ParameterList":{ @@ -32502,6 +33138,7 @@ "ParameterName":{ "type":"string", "max":256, + "min":0, "pattern":"[\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]*" }, "ParameterRange":{ @@ -32556,6 +33193,7 @@ "ParameterValue":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "ParameterValues":{ @@ -32615,7 +33253,7 @@ "type":"string", "max":128, "min":1, - "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:partner-app\\/app-[A-Z0-9]{12}$" + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:partner-app\\/app-[A-Z0-9]{12}" }, "PartnerAppAuthType":{ "type":"string", @@ -32649,7 +33287,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9]+" + "pattern":"[a-zA-Z0-9]+" }, "PartnerAppStatus":{ "type":"string", @@ -32871,6 +33509,7 @@ "PipelineArn":{ "type":"string", "max":2048, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:pipeline/.*" }, "PipelineDefinition":{ @@ -32963,7 +33602,8 @@ "PipelineExecutionArn":{ "type":"string", "max":2048, - "pattern":"^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:pipeline\\/.*\\/execution\\/.*$" + "min":0, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:pipeline\\/.*\\/execution\\/.*" }, "PipelineExecutionDescription":{ "type":"string", @@ -32974,13 +33614,14 @@ "PipelineExecutionFailureReason":{ "type":"string", "max":1300, + "min":0, "pattern":".*" }, "PipelineExecutionName":{ "type":"string", "max":82, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,81}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,81}" }, "PipelineExecutionStatus":{ "type":"string", @@ -33033,7 +33674,8 @@ }, "AttemptCount":{ "shape":"Integer", - "documentation":"

        The current attempt of the execution step. For more information, see Retry Policy for SageMaker Pipelines steps.

        " + "documentation":"

        The current attempt of the execution step. For more information, see Retry Policy for SageMaker Pipelines steps.

        ", + "box":true }, "SelectiveExecutionResult":{ "shape":"SelectiveExecutionResult", @@ -33172,7 +33814,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}" }, "PipelineNameOrArn":{ "type":"string", @@ -33184,7 +33826,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[A-Za-z0-9\\-_]*$" + "pattern":"[A-Za-z0-9\\-_]*" }, "PipelineStatus":{ "type":"string", @@ -33240,7 +33882,8 @@ "PlatformIdentifier":{ "type":"string", "max":15, - "pattern":"^(notebook-al1-v1|notebook-al2-v1|notebook-al2-v2|notebook-al2-v3)$" + "min":0, + "pattern":"(notebook-al1-v1|notebook-al2-v1|notebook-al2-v2|notebook-al2-v3)" }, "PolicyString":{ "type":"string", @@ -33292,10 +33935,14 @@ }, "PriorityWeight":{ "type":"integer", + "box":true, "max":100, "min":0 }, - "ProbabilityThresholdAttribute":{"type":"double"}, + "ProbabilityThresholdAttribute":{ + "type":"double", + "box":true + }, "ProblemType":{ "type":"string", "enum":[ @@ -33334,17 +33981,20 @@ "ProcessingEnvironmentKey":{ "type":"string", "max":256, + "min":0, "pattern":"[a-zA-Z_][a-zA-Z0-9_]*" }, "ProcessingEnvironmentMap":{ "type":"map", "key":{"shape":"ProcessingEnvironmentKey"}, "value":{"shape":"ProcessingEnvironmentValue"}, - "max":100 + "max":100, + "min":0 }, "ProcessingEnvironmentValue":{ "type":"string", "max":256, + "min":0, "pattern":"[\\S\\s]*" }, "ProcessingFeatureStoreOutput":{ @@ -33368,7 +34018,8 @@ }, "AppManaged":{ "shape":"AppManaged", - "documentation":"

        When True, input operations such as data download are managed natively by the processing job application. When False (default), input operations are managed by Amazon SageMaker.

        " + "documentation":"

        When True, input operations such as data download are managed natively by the processing job application. When False (default), input operations are managed by Amazon SageMaker.

        ", + "box":true }, "S3Input":{ "shape":"ProcessingS3Input", @@ -33389,6 +34040,7 @@ }, "ProcessingInstanceCount":{ "type":"integer", + "box":true, "max":100, "min":1 }, @@ -33487,7 +34139,34 @@ "ml.c6i.12xlarge", "ml.c6i.16xlarge", "ml.c6i.24xlarge", - "ml.c6i.32xlarge" + "ml.c6i.32xlarge", + "ml.m7i.large", + "ml.m7i.xlarge", + "ml.m7i.2xlarge", + "ml.m7i.4xlarge", + "ml.m7i.8xlarge", + "ml.m7i.12xlarge", + "ml.m7i.16xlarge", + "ml.m7i.24xlarge", + "ml.m7i.48xlarge", + "ml.c7i.large", + "ml.c7i.xlarge", + "ml.c7i.2xlarge", + "ml.c7i.4xlarge", + "ml.c7i.8xlarge", + "ml.c7i.12xlarge", + "ml.c7i.16xlarge", + "ml.c7i.24xlarge", + "ml.c7i.48xlarge", + "ml.r7i.large", + "ml.r7i.xlarge", + "ml.r7i.2xlarge", + "ml.r7i.4xlarge", + "ml.r7i.8xlarge", + "ml.r7i.12xlarge", + "ml.r7i.16xlarge", + "ml.r7i.24xlarge", + "ml.r7i.48xlarge" ] }, "ProcessingJob":{ @@ -33569,13 +34248,14 @@ "ProcessingJobArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:processing-job/.*" }, "ProcessingJobName":{ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "ProcessingJobStatus":{ "type":"string", @@ -33648,6 +34328,7 @@ "ProcessingLocalPath":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "ProcessingMaxRuntimeInSeconds":{ @@ -33673,7 +34354,8 @@ }, "AppManaged":{ "shape":"AppManaged", - "documentation":"

        When True, output operations such as data upload are managed natively by the processing job application. When False (default), output operations are managed by Amazon SageMaker.

        " + "documentation":"

        When True, output operations such as data upload are managed natively by the processing job application. When False (default), output operations are managed by Amazon SageMaker.

        ", + "box":true } }, "documentation":"

        Describes the results of a processing job. The processing output must specify exactly one of either S3Output or FeatureStoreOutput types.

        " @@ -33807,13 +34489,15 @@ "members":{ "MaxRuntimeInSeconds":{ "shape":"ProcessingMaxRuntimeInSeconds", - "documentation":"

        Specifies the maximum runtime in seconds.

        " + "documentation":"

        Specifies the maximum runtime in seconds.

        ", + "box":true } }, "documentation":"

        Configures conditions under which the processing job should be stopped, such as how long the processing job has been running. After the condition is met, the processing job is stopped.

        " }, "ProcessingVolumeSizeInGB":{ "type":"integer", + "box":true, "max":16384, "min":1 }, @@ -33827,7 +34511,8 @@ "ProductId":{ "type":"string", "max":256, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, "ProductListings":{ "type":"list", @@ -33896,6 +34581,10 @@ "InferenceAmiVersion":{ "shape":"ProductionVariantInferenceAmiVersion", "documentation":"

        Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads.

        By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions.

        The AMI version names, and their configurations, are the following:

        al2-ami-sagemaker-inference-gpu-2
        • Accelerator: GPU

        • NVIDIA driver version: 535

        • CUDA version: 12.2

        al2-ami-sagemaker-inference-gpu-2-1
        • Accelerator: GPU

        • NVIDIA driver version: 535

        • CUDA version: 12.2

        • NVIDIA Container Toolkit with disabled CUDA-compat mounting

        al2-ami-sagemaker-inference-gpu-3-1
        • Accelerator: GPU

        • NVIDIA driver version: 550

        • CUDA version: 12.4

        • NVIDIA Container Toolkit with disabled CUDA-compat mounting

        al2-ami-sagemaker-inference-neuron-2
        • Accelerator: Inferentia2 and Trainium

        • Neuron driver version: 2.19

        " + }, + "CapacityReservationConfig":{ + "shape":"ProductionVariantCapacityReservationConfig", + "documentation":"

        Settings for the capacity reservation for the compute instances that SageMaker AI reserves for an endpoint.

        " } }, "documentation":"

        Identifies a model that you want to host and the resources chosen to deploy for hosting it. If you are deploying multiple models, tell SageMaker how to distribute traffic among the models by specifying variant weights. For more information on production variants, check Production variants.

        " @@ -33911,8 +34600,53 @@ "ml.eia2.xlarge" ] }, + "ProductionVariantCapacityReservationConfig":{ + "type":"structure", + "members":{ + "CapacityReservationPreference":{ + "shape":"CapacityReservationPreference", + "documentation":"

        Options that you can choose for the capacity reservation. SageMaker AI supports the following options:

        capacity-reservations-only

        SageMaker AI launches instances only into an ML capacity reservation. If no capacity is available, the instances fail to launch.

        " + }, + "MlReservationArn":{ + "shape":"MlReservationArn", + "documentation":"

        The Amazon Resource Name (ARN) that uniquely identifies the ML capacity reservation that SageMaker AI applies when it deploys the endpoint.

        " + } + }, + "documentation":"

        Settings for the capacity reservation for the compute instances that SageMaker AI reserves for an endpoint.

        " + }, + "ProductionVariantCapacityReservationSummary":{ + "type":"structure", + "members":{ + "MlReservationArn":{ + "shape":"MlReservationArn", + "documentation":"

        The Amazon Resource Name (ARN) that uniquely identifies the ML capacity reservation that SageMaker AI applies when it deploys the endpoint.

        " + }, + "CapacityReservationPreference":{ + "shape":"CapacityReservationPreference", + "documentation":"

        The option that you chose for the capacity reservation. SageMaker AI supports the following options:

        capacity-reservations-only

        SageMaker AI launches instances only into an ML capacity reservation. If no capacity is available, the instances fail to launch.

        " + }, + "TotalInstanceCount":{ + "shape":"TaskCount", + "documentation":"

        The number of instances that you allocated to the ML capacity reservation.

        " + }, + "AvailableInstanceCount":{ + "shape":"TaskCount", + "documentation":"

        The number of instances that are currently available in the ML capacity reservation.

        " + }, + "UsedByCurrentEndpoint":{ + "shape":"TaskCount", + "documentation":"

        The number of instances from the ML capacity reservation that are being used by the endpoint.

        " + }, + "Ec2CapacityReservations":{ + "shape":"Ec2CapacityReservationsList", + "documentation":"

        The EC2 capacity reservations that are shared to this ML capacity reservation, if any.

        " + } + }, + "documentation":"

        Details about an ML capacity reservation.

        " + }, "ProductionVariantContainerStartupHealthCheckTimeoutInSeconds":{ "type":"integer", + "box":true, "max":3600, "min":60 }, @@ -34166,7 +34900,46 @@ "ml.r7i.12xlarge", "ml.r7i.16xlarge", "ml.r7i.24xlarge", - "ml.r7i.48xlarge" + "ml.r7i.48xlarge", + "ml.c8g.medium", + "ml.c8g.large", + "ml.c8g.xlarge", + "ml.c8g.2xlarge", + "ml.c8g.4xlarge", + "ml.c8g.8xlarge", + "ml.c8g.12xlarge", + "ml.c8g.16xlarge", + "ml.c8g.24xlarge", + "ml.c8g.48xlarge", + "ml.r7gd.medium", + "ml.r7gd.large", + "ml.r7gd.xlarge", + "ml.r7gd.2xlarge", + "ml.r7gd.4xlarge", + "ml.r7gd.8xlarge", + "ml.r7gd.12xlarge", + "ml.r7gd.16xlarge", + "ml.m8g.medium", + "ml.m8g.large", + "ml.m8g.xlarge", + "ml.m8g.2xlarge", + "ml.m8g.4xlarge", + "ml.m8g.8xlarge", + "ml.m8g.12xlarge", + "ml.m8g.16xlarge", + "ml.m8g.24xlarge", + "ml.m8g.48xlarge", + "ml.c6in.large", + "ml.c6in.xlarge", + "ml.c6in.2xlarge", + "ml.c6in.4xlarge", + "ml.c6in.8xlarge", + "ml.c6in.12xlarge", + "ml.c6in.16xlarge", + "ml.c6in.24xlarge", + "ml.c6in.32xlarge", + "ml.p6-b200.48xlarge", + "ml.p6e-gb200.36xlarge" ] }, "ProductionVariantList":{ @@ -34195,6 +34968,7 @@ }, "ProductionVariantModelDataDownloadTimeoutInSeconds":{ "type":"integer", + "box":true, "max":3600, "min":60 }, @@ -34209,7 +34983,10 @@ }, "documentation":"

        Settings that control how the endpoint routes incoming traffic to the instances that the endpoint hosts.

        " }, - "ProductionVariantSSMAccess":{"type":"boolean"}, + "ProductionVariantSSMAccess":{ + "type":"boolean", + "box":true + }, "ProductionVariantServerlessConfig":{ "type":"structure", "required":[ @@ -34318,6 +35095,10 @@ "RoutingConfig":{ "shape":"ProductionVariantRoutingConfig", "documentation":"

        Settings that control how the endpoint routes incoming traffic to the instances that the endpoint hosts.

        " + }, + "CapacityReservationConfig":{ + "shape":"ProductionVariantCapacityReservationSummary", + "documentation":"

        Settings for the capacity reservation for the compute instances that SageMaker AI reserves for an endpoint.

        " } }, "documentation":"

        Describes weight and capacities for a production variant associated with an endpoint. If you sent a request to the UpdateEndpointWeightsAndCapacities API and the endpoint status is Updating, you get different desired and current values.

        " @@ -34329,6 +35110,7 @@ }, "ProductionVariantVolumeSizeInGB":{ "type":"integer", + "box":true, "max":512, "min":1 }, @@ -34349,7 +35131,8 @@ }, "DisableProfiler":{ "shape":"DisableProfiler", - "documentation":"

        Configuration to turn off Amazon SageMaker Debugger's system monitoring and profiling functionality. To turn it off, set to True.

        " + "documentation":"

        Configuration to turn off Amazon SageMaker Debugger's system monitoring and profiling functionality. To turn it off, set to True.

        ", + "box":true } }, "documentation":"

        Configuration information for Amazon SageMaker Debugger system monitoring, framework profiling, and storage paths.

        " @@ -34371,7 +35154,8 @@ }, "DisableProfiler":{ "shape":"DisableProfiler", - "documentation":"

        To turn off Amazon SageMaker Debugger monitoring and profiling while a training job is in progress, set to True.

        " + "documentation":"

        To turn off Amazon SageMaker Debugger monitoring and profiling while a training job is in progress, set to True.

        ", + "box":true } }, "documentation":"

        Configuration information for updating the Amazon SageMaker Debugger profile parameters, system and framework metrics configurations, and storage paths.

        " @@ -34405,7 +35189,8 @@ }, "VolumeSizeInGB":{ "shape":"OptionalVolumeSizeInGB", - "documentation":"

        The size, in GB, of the ML storage volume attached to the processing instance.

        " + "documentation":"

        The size, in GB, of the ML storage volume attached to the processing instance.

        ", + "box":true }, "RuleParameters":{ "shape":"RuleParameters", @@ -34452,7 +35237,10 @@ "max":20, "min":0 }, - "ProfilingIntervalInMilliseconds":{"type":"long"}, + "ProfilingIntervalInMilliseconds":{ + "type":"long", + "box":true + }, "ProfilingParameters":{ "type":"map", "key":{"shape":"ConfigKey"}, @@ -34471,7 +35259,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^[a-zA-Z]+ ?\\d+\\.\\d+(\\.\\d+)?$" + "pattern":"[a-zA-Z]+ ?\\d+\\.\\d+(\\.\\d+)?" }, "Project":{ "type":"structure", @@ -34506,6 +35294,10 @@ "shape":"Timestamp", "documentation":"

        A timestamp specifying when the project was created.

        " }, + "TemplateProviderDetails":{ + "shape":"TemplateProviderDetailList", + "documentation":"

        An array of template providers associated with the project.

        " + }, "Tags":{ "shape":"TagList", "documentation":"

        An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.

        " @@ -34522,19 +35314,19 @@ "type":"string", "max":2048, "min":1, - "pattern":"^arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:project/[\\S]{1,2048}$" + "pattern":"arn:aws(-cn|-us-gov|-iso-f)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:project/[\\S]{1,2048}" }, "ProjectEntityName":{ "type":"string", "max":32, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,31}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,31}" }, "ProjectId":{ "type":"string", "max":20, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, "ProjectSortBy":{ "type":"string", @@ -34664,6 +35456,7 @@ "ProvisioningParameterValue":{ "type":"string", "max":4096, + "min":0, "pattern":".*" }, "ProvisioningParameters":{ @@ -34709,7 +35502,7 @@ }, "QProfileArn":{ "type":"string", - "pattern":"^arn:[-.a-z0-9]{1,63}:codewhisperer:([-.a-z0-9]{0,63}:){2}([a-zA-Z0-9-_:/]){1,1023}$" + "pattern":"arn:[-.a-z0-9]{1,63}:codewhisperer:([-.a-z0-9]{0,63}:){2}([a-zA-Z0-9-_:/]){1,1023}" }, "QualityCheckStepMetadata":{ "type":"structure", @@ -34748,11 +35541,13 @@ }, "SkipCheck":{ "shape":"Boolean", - "documentation":"

        This flag indicates if the drift check against the previous baseline will be skipped or not. If it is set to False, the previous baseline of the configured check type must be available.

        " + "documentation":"

        This flag indicates if the drift check against the previous baseline will be skipped or not. If it is set to False, the previous baseline of the configured check type must be available.

        ", + "box":true }, "RegisterNewBaseline":{ "shape":"Boolean", - "documentation":"

        This flag indicates if a newly calculated baseline can be accessed through step properties BaselineUsedForDriftCheckConstraints and BaselineUsedForDriftCheckStatistics. If it is set to False, the previous baseline of the configured check type must also be available. These can be accessed through the BaselineUsedForDriftCheckConstraints and BaselineUsedForDriftCheckStatistics properties.

        " + "documentation":"

        This flag indicates if a newly calculated baseline can be accessed through step properties BaselineUsedForDriftCheckConstraints and BaselineUsedForDriftCheckStatistics. If it is set to False, the previous baseline of the configured check type must also be available. These can be accessed through the BaselineUsedForDriftCheckConstraints and BaselineUsedForDriftCheckStatistics properties.

        ", + "box":true } }, "documentation":"

        Container for the metadata for a Quality check step. For more information, see the topic on QualityCheck step in the Amazon SageMaker Developer Guide.

        " @@ -34793,10 +35588,12 @@ }, "QueryLineageMaxDepth":{ "type":"integer", + "box":true, "max":10 }, "QueryLineageMaxResults":{ "type":"integer", + "box":true, "max":50 }, "QueryLineageRequest":{ @@ -34812,7 +35609,8 @@ }, "IncludeEdges":{ "shape":"Boolean", - "documentation":"

        Setting this value to True retrieves not only the entities of interest but also the Associations and lineage entities on the path. Set to False to only return lineage entities that match your query.

        " + "documentation":"

        Setting this value to True retrieves not only the entities of interest but also the Associations and lineage entities on the path. Set to False to only return lineage entities that match your query.

        ", + "box":true }, "Filters":{ "shape":"QueryFilters", @@ -34858,18 +35656,21 @@ "QueryLineageTypes":{ "type":"list", "member":{"shape":"LineageType"}, - "max":4 + "max":4, + "min":0 }, "QueryProperties":{ "type":"map", "key":{"shape":"String256"}, "value":{"shape":"String256"}, - "max":5 + "max":5, + "min":0 }, "QueryTypes":{ "type":"list", "member":{"shape":"String40"}, - "max":5 + "max":5, + "min":0 }, "RSessionAppSettings":{ "type":"structure", @@ -34952,6 +35753,7 @@ }, "RandomSeed":{ "type":"integer", + "box":true, "min":0 }, "RealTimeInferenceConfig":{ @@ -35008,13 +35810,14 @@ "RecommendationJobArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:inference-recommendations-job/.*" }, "RecommendationJobCompilationJobName":{ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "RecommendationJobCompiledOutputConfig":{ "type":"structure", @@ -35080,7 +35883,8 @@ }, "RecommendationJobDescription":{ "type":"string", - "max":128 + "max":128, + "min":0 }, "RecommendationJobFrameworkVersion":{ "type":"string", @@ -35161,7 +35965,7 @@ "type":"string", "max":64, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}" }, "RecommendationJobOutputConfig":{ "type":"structure", @@ -35223,7 +36027,8 @@ "members":{ "MaxInvocations":{ "shape":"Integer", - "documentation":"

        The maximum number of requests per minute expected for the endpoint.

        " + "documentation":"

        The maximum number of requests per minute expected for the endpoint.

        ", + "box":true }, "ModelLatencyThresholds":{ "shape":"ModelLatencyThresholds", @@ -35239,6 +36044,7 @@ "RecommendationJobSupportedContentType":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "RecommendationJobSupportedContentTypes":{ @@ -35259,7 +36065,8 @@ "RecommendationJobSupportedResponseMIMEType":{ "type":"string", "max":1024, - "pattern":"^[-\\w]+\\/.+$" + "min":0, + "pattern":"[-\\w]+\\/.+" }, "RecommendationJobSupportedResponseMIMETypes":{ "type":"list", @@ -35293,6 +36100,7 @@ "RecommendationJobVpcSecurityGroupId":{ "type":"string", "max":32, + "min":0, "pattern":"[-0-9a-zA-Z]+" }, "RecommendationJobVpcSecurityGroupIds":{ @@ -35304,6 +36112,7 @@ "RecommendationJobVpcSubnetId":{ "type":"string", "max":32, + "min":0, "pattern":"[-0-9a-zA-Z]+" }, "RecommendationJobVpcSubnets":{ @@ -35317,19 +36126,23 @@ "members":{ "CostPerHour":{ "shape":"Float", - "documentation":"

        Defines the cost per hour for the instance.

        " + "documentation":"

        Defines the cost per hour for the instance.

        ", + "box":true }, "CostPerInference":{ "shape":"Float", - "documentation":"

        Defines the cost per inference for the instance .

        " + "documentation":"

        Defines the cost per inference for the instance .

        ", + "box":true }, "MaxInvocations":{ "shape":"Integer", - "documentation":"

        The expected maximum number of requests per minute for the instance.

        " + "documentation":"

        The expected maximum number of requests per minute for the instance.

        ", + "box":true }, "ModelLatency":{ "shape":"Integer", - "documentation":"

        The expected model latency at maximum invocation per minute for the instance.

        " + "documentation":"

        The expected model latency at maximum invocation per minute for the instance.

        ", + "box":true }, "CpuUtilization":{ "shape":"UtilizationMetric", @@ -35450,7 +36263,7 @@ "type":"string", "max":14, "min":5, - "pattern":"^\\d{1,4}.\\d{1,4}.\\d{1,4}$" + "pattern":"\\d{1,4}.\\d{1,4}.\\d{1,4}" }, "RegionName":{ "type":"string", @@ -35623,7 +36436,8 @@ "RepositoryUrl":{ "type":"string", "max":1024, - "pattern":"^https://([.\\-_a-zA-Z0-9]+/?){3,1016}$" + "min":0, + "pattern":"https://([.\\-_a-zA-Z0-9]+/?){3,1016}" }, "ReservedCapacityArn":{ "type":"string", @@ -35633,18 +36447,20 @@ }, "ReservedCapacityDurationHours":{ "type":"long", + "box":true, "max":87600, "min":0 }, "ReservedCapacityDurationMinutes":{ "type":"long", + "box":true, "max":59, "min":0 }, "ReservedCapacityInstanceCount":{ "type":"integer", "max":256, - "min":1 + "min":0 }, "ReservedCapacityInstanceType":{ "type":"string", @@ -35654,7 +36470,8 @@ "ml.p5e.48xlarge", "ml.p5en.48xlarge", "ml.trn1.32xlarge", - "ml.trn2.48xlarge" + "ml.trn2.48xlarge", + "ml.p6-b200.48xlarge" ] }, "ReservedCapacityOffering":{ @@ -35670,7 +36487,8 @@ }, "InstanceCount":{ "shape":"ReservedCapacityInstanceCount", - "documentation":"

        The number of instances in the reserved capacity offering.

        " + "documentation":"

        The number of instances in the reserved capacity offering.

        ", + "box":true }, "AvailabilityZone":{ "shape":"AvailabilityZone", @@ -35780,6 +36598,7 @@ "ResourceArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z-]*:sagemaker:[a-z0-9-]*:[0-9]{12}:.+" }, "ResourceCatalog":{ @@ -35813,11 +36632,13 @@ "ResourceCatalogArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:sagemaker-catalog/.*" }, "ResourceCatalogDescription":{ "type":"string", - "max":256 + "max":256, + "min":0 }, "ResourceCatalogList":{ "type":"list", @@ -35845,15 +36666,17 @@ "members":{ "InstanceType":{ "shape":"TrainingInstanceType", - "documentation":"

        The ML compute instance type.

        SageMaker Training on Amazon Elastic Compute Cloud (EC2) P4de instances is in preview release starting December 9th, 2022.

        Amazon EC2 P4de instances (currently in preview) are powered by 8 NVIDIA A100 GPUs with 80GB high-performance HBM2e GPU memory, which accelerate the speed of training ML models that need to be trained on large datasets of high-resolution data. In this preview release, Amazon SageMaker supports ML training jobs on P4de instances (ml.p4de.24xlarge) to reduce model training time. The ml.p4de.24xlarge instances are available in the following Amazon Web Services Regions.

        • US East (N. Virginia) (us-east-1)

        • US West (Oregon) (us-west-2)

        To request quota limit increase and start using P4de instances, contact the SageMaker Training service team through your account team.

        " + "documentation":"

        The ML compute instance type.

        " }, "InstanceCount":{ "shape":"TrainingInstanceCount", - "documentation":"

        The number of ML compute instances to use. For distributed training, provide a value greater than 1.

        " + "documentation":"

        The number of ML compute instances to use. For distributed training, provide a value greater than 1.

        ", + "box":true }, "VolumeSizeInGB":{ "shape":"VolumeSizeInGB", - "documentation":"

        The size of the ML storage volume that you want to provision.

        ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File as the TrainingInputMode in the algorithm specification.

        When using an ML instance with NVMe SSD volumes, SageMaker doesn't provision Amazon EBS General Purpose SSD (gp2) storage. Available storage is fixed to the NVMe-type instance's storage capacity. SageMaker configures storage paths for training datasets, checkpoints, model artifacts, and outputs to use the entire capacity of the instance storage. For example, ML instance families with the NVMe-type instance storage include ml.p4d, ml.g4dn, and ml.g5.

        When using an ML instance with the EBS-only storage option and without instance storage, you must define the size of EBS volume through VolumeSizeInGB in the ResourceConfig API. For example, ML instance families that use EBS volumes include ml.c5 and ml.p2.

        To look up instance types and their instance storage types and volumes, see Amazon EC2 Instance Types.

        To find the default local paths defined by the SageMaker training platform, see Amazon SageMaker Training Storage Folders for Training Datasets, Checkpoints, Model Artifacts, and Outputs.

        " + "documentation":"

        The size of the ML storage volume that you want to provision.

        ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File as the TrainingInputMode in the algorithm specification.

        When using an ML instance with NVMe SSD volumes, SageMaker doesn't provision Amazon EBS General Purpose SSD (gp2) storage. Available storage is fixed to the NVMe-type instance's storage capacity. SageMaker configures storage paths for training datasets, checkpoints, model artifacts, and outputs to use the entire capacity of the instance storage. For example, ML instance families with the NVMe-type instance storage include ml.p4d, ml.g4dn, and ml.g5.

        When using an ML instance with the EBS-only storage option and without instance storage, you must define the size of EBS volume through VolumeSizeInGB in the ResourceConfig API. For example, ML instance families that use EBS volumes include ml.c5 and ml.p2.

        To look up instance types and their instance storage types and volumes, see Amazon EC2 Instance Types.

        To find the default local paths defined by the SageMaker training platform, see Amazon SageMaker Training Storage Folders for Training Datasets, Checkpoints, Model Artifacts, and Outputs.

        ", + "box":true }, "VolumeKmsKeyId":{ "shape":"KmsKeyId", @@ -35887,7 +36710,8 @@ }, "ResourceId":{ "type":"string", - "max":32 + "max":32, + "min":0 }, "ResourceInUse":{ "type":"structure", @@ -35915,7 +36739,8 @@ }, "MaxParallelTrainingJobs":{ "shape":"MaxParallelTrainingJobs", - "documentation":"

        The maximum number of concurrent training jobs that a hyperparameter tuning job can launch.

        " + "documentation":"

        The maximum number of concurrent training jobs that a hyperparameter tuning job can launch.

        ", + "box":true }, "MaxRuntimeInSeconds":{ "shape":"HyperParameterTuningMaxRuntimeInSeconds", @@ -35935,6 +36760,7 @@ "ResourcePolicyString":{ "type":"string", "max":20480, + "min":0, "pattern":".*(?:[ \\r\\n\\t].*)*" }, "ResourcePropertyName":{ @@ -35945,7 +36771,8 @@ }, "ResourceRetainedBillableTimeInSeconds":{ "type":"integer", - "documentation":"Optional. Indicates how many seconds the resource stayed in ResourceRetained state. Populated only after resource reaches ResourceReused or ResourceReleased state.", + "documentation":"

        Optional. Indicates how many seconds the resource stayed in ResourceRetained state. Populated only after resource reaches ResourceReused or ResourceReleased state.

        ", + "box":true, "min":0 }, "ResourceSharingConfig":{ @@ -36022,7 +36849,8 @@ "ResponseMIMEType":{ "type":"string", "max":1024, - "pattern":"^[-\\w]+\\/.+$" + "min":0, + "pattern":"[-\\w]+\\/.+" }, "ResponseMIMETypes":{ "type":"list", @@ -36082,7 +36910,8 @@ "members":{ "MaximumRetryAttempts":{ "shape":"MaximumRetryAttempts", - "documentation":"

        The number of times to retry the job. When the job is retried, it's SecondaryStatus is changed to STARTING.

        " + "documentation":"

        The number of times to retry the job. When the job is retried, it's SecondaryStatus is changed to STARTING.

        ", + "box":true } }, "documentation":"

        The retry strategy to use when a training job fails due to an InternalServerError. RetryStrategy is specified as part of the CreateTrainingJob and CreateHyperParameterTuningJob requests. You can add the StoppingCondition parameter to the request to limit the training time for the complete job.

        " @@ -36091,7 +36920,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" + "pattern":"arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" }, "RollingDeploymentPolicy":{ "type":"structure", @@ -36188,7 +37017,7 @@ "members":{ "S3DataType":{ "shape":"S3DataType", - "documentation":"

        If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix for model training.

        If you choose ManifestFile, S3Uri identifies an object that is a manifest file containing a list of object keys that you want SageMaker to use for model training.

        If you choose AugmentedManifestFile, S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. AugmentedManifestFile can only be used if the Channel's input mode is Pipe.

        " + "documentation":"

        If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix for model training.

        If you choose ManifestFile, S3Uri identifies an object that is a manifest file containing a list of object keys that you want SageMaker to use for model training.

        If you choose AugmentedManifestFile, S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. AugmentedManifestFile can only be used if the Channel's input mode is Pipe.

        If you choose Converse, S3Uri identifies an Amazon S3 location that contains data formatted according to Converse format. This format structures conversational messages with specific roles and content types used for training and fine-tuning foundational models.

        " }, "S3Uri":{ "shape":"S3Uri", @@ -36219,7 +37048,8 @@ "enum":[ "ManifestFile", "S3Prefix", - "AugmentedManifestFile" + "AugmentedManifestFile", + "Converse" ] }, "S3ModelDataSource":{ @@ -36275,12 +37105,14 @@ "S3ModelUri":{ "type":"string", "max":1024, - "pattern":"^(https|s3)://([^/]+)/?(.*)$" + "min":0, + "pattern":"(https|s3)://([^/]+)/?(.*)" }, "S3OutputPath":{ "type":"string", "max":1024, - "pattern":"^(https|s3)://([^/]+)/?(.*)$" + "min":0, + "pattern":"(https|s3)://([^/]+)/?(.*)" }, "S3Presign":{ "type":"structure", @@ -36314,7 +37146,8 @@ "S3Uri":{ "type":"string", "max":1024, - "pattern":"^(https|s3)://([^/]+)/?(.*)$" + "min":0, + "pattern":"(https|s3)://([^/]+)/?(.*)" }, "SageMakerImageName":{ "type":"string", @@ -36324,7 +37157,7 @@ "type":"string", "max":128, "min":1, - "pattern":"(?!^[.-])^([a-zA-Z0-9-_.]+)$" + "pattern":"(?!^[.-])^([a-zA-Z0-9-_.]+)" }, "SageMakerImageVersionAliases":{ "type":"list", @@ -36333,7 +37166,8 @@ "SageMakerPublicHubContentArn":{ "type":"string", "max":255, - "pattern":"^arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:aws:hub-content\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}\\/Model\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}$" + "min":0, + "pattern":"arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:aws:hub-content\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}\\/Model\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}" }, "SageMakerResourceName":{ "type":"string", @@ -36358,10 +37192,11 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9_-]+$" + "pattern":"[a-zA-Z0-9_-]+" }, "SamplingPercentage":{ "type":"integer", + "box":true, "max":100, "min":0 }, @@ -36385,11 +37220,13 @@ "members":{ "InvocationsPerInstance":{ "shape":"Integer", - "documentation":"

        The number of invocations sent to a model, normalized by InstanceCount in each ProductionVariant. 1/numberOfInstances is sent as the value on each request, where numberOfInstances is the number of active instances for the ProductionVariant behind the endpoint at the time of the request.

        " + "documentation":"

        The number of invocations sent to a model, normalized by InstanceCount in each ProductionVariant. 1/numberOfInstances is sent as the value on each request, where numberOfInstances is the number of active instances for the ProductionVariant behind the endpoint at the time of the request.

        ", + "box":true }, "ModelLatency":{ "shape":"Integer", - "documentation":"

        The interval of time taken by a model to respond as viewed from SageMaker. This interval includes the local communication times taken to send the request and to fetch the response from the container of a model and the time taken to complete the inference in the container.

        " + "documentation":"

        The interval of time taken by a model to respond as viewed from SageMaker. This interval includes the local communication times taken to send the request and to fetch the response from the container of a model and the time taken to complete the inference in the container.

        ", + "box":true } }, "documentation":"

        The metric for a scaling policy.

        " @@ -36399,11 +37236,13 @@ "members":{ "MinInvocationsPerMinute":{ "shape":"Integer", - "documentation":"

        The minimum number of expected requests to your endpoint per minute.

        " + "documentation":"

        The minimum number of expected requests to your endpoint per minute.

        ", + "box":true }, "MaxInvocationsPerMinute":{ "shape":"Integer", - "documentation":"

        The maximum number of expected requests to your endpoint per minute.

        " + "documentation":"

        The maximum number of expected requests to your endpoint per minute.

        ", + "box":true } }, "documentation":"

        An object where you specify the anticipated traffic pattern for an endpoint.

        " @@ -36490,7 +37329,8 @@ "Scope":{ "type":"string", "max":1024, - "pattern":"^[!#-\\[\\]-~]+( [!#-\\[\\]-~]+)*$" + "min":0, + "pattern":"[!#-\\[\\]-~]+( [!#-\\[\\]-~]+)*" }, "SearchExpression":{ "type":"structure", @@ -36591,8 +37431,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

        The maximum number of results to return.

        ", - "box":true + "documentation":"

        The maximum number of results to return.

        " }, "CrossAccountFilterOption":{ "shape":"CrossAccountFilterOption", @@ -36645,7 +37484,8 @@ }, "InstanceCount":{ "shape":"ReservedCapacityInstanceCount", - "documentation":"

        The number of instances you want to reserve in the training plan offerings. This allows you to specify the quantity of compute resources needed for your SageMaker training jobs or SageMaker HyperPod clusters, helping you find reserved capacity offerings that match your requirements.

        " + "documentation":"

        The number of instances you want to reserve in the training plan offerings. This allows you to specify the quantity of compute resources needed for your SageMaker training jobs or SageMaker HyperPod clusters, helping you find reserved capacity offerings that match your requirements.

        ", + "box":true }, "StartTimeAfter":{ "shape":"Timestamp", @@ -36736,12 +37576,14 @@ "SecurityGroupId":{ "type":"string", "max":32, + "min":0, "pattern":"[-0-9a-zA-Z]+" }, "SecurityGroupIds":{ "type":"list", "member":{"shape":"SecurityGroupId"}, - "max":5 + "max":5, + "min":0 }, "Seed":{"type":"long"}, "SelectedStep":{ @@ -36844,16 +37686,19 @@ }, "ServerlessMaxConcurrency":{ "type":"integer", + "box":true, "max":200, "min":1 }, "ServerlessMemorySizeInMB":{ "type":"integer", + "box":true, "max":6144, "min":1024 }, "ServerlessProvisionedConcurrency":{ "type":"integer", + "box":true, "max":200, "min":1 }, @@ -36861,7 +37706,7 @@ "type":"string", "max":100, "min":1, - "pattern":"^[a-zA-Z0-9_\\-]*" + "pattern":"[a-zA-Z0-9_\\-]*" }, "ServiceCatalogProvisionedProductDetails":{ "type":"structure", @@ -36926,6 +37771,7 @@ }, "SessionExpirationDurationInSeconds":{ "type":"integer", + "box":true, "max":43200, "min":1800 }, @@ -36960,7 +37806,8 @@ }, "SamplingPercentage":{ "shape":"Percentage", - "documentation":"

        The percentage of inference requests that Amazon SageMaker replicates from the production variant to the shadow variant.

        " + "documentation":"

        The percentage of inference requests that Amazon SageMaker replicates from the production variant to the shadow variant.

        ", + "box":true } }, "documentation":"

        The name and sampling percentage of a shadow variant.

        " @@ -37002,14 +37849,15 @@ "members":{ "Seed":{ "shape":"Seed", - "documentation":"

        Determines the shuffling order in ShuffleConfig value.

        " + "documentation":"

        Determines the shuffling order in ShuffleConfig value.

        ", + "box":true } }, "documentation":"

        A configuration for a shuffle option for input data in a channel. If you use S3Prefix for S3DataType, the results of the S3 key prefix matches are shuffled. If you use ManifestFile, the order of the S3 object references in the ManifestFile is shuffled. If you use AugmentedManifestFile, the order of the JSON lines in the AugmentedManifestFile is shuffled. The shuffling order is determined using the Seed value.

        For Pipe input mode, when ShuffleConfig is specified shuffling is done at the start of every epoch. With large datasets, this ensures that the order of the training data is different for each epoch, and it helps reduce bias and possible overfitting. In a multi-node training job when ShuffleConfig is combined with S3DataDistributionType of ShardedByS3Key, the data is shuffled across nodes so that the content sent to a particular node on the first epoch might be sent to a different node on the second epoch.

        " }, "SingleSignOnApplicationArn":{ "type":"string", - "pattern":"^arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):sso::[0-9]+:application\\/[a-zA-Z0-9-_.]+\\/apl-[a-zA-Z0-9]+$" + "pattern":"arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):sso::[0-9]+:application\\/[a-zA-Z0-9-_.]+\\/apl-[a-zA-Z0-9]+" }, "SingleSignOnUserIdentifier":{ "type":"string", @@ -37025,6 +37873,7 @@ "SnsTopicArn":{ "type":"string", "max":2048, + "min":0, "pattern":"arn:aws[a-z\\-]*:sns:[a-z0-9\\-]*:[0-9]{12}:[a-zA-Z0-9_.-]+" }, "SortActionsBy":{ @@ -37198,7 +38047,8 @@ }, "SourceType":{ "type":"string", - "max":128 + "max":128, + "min":0 }, "SourceUri":{ "type":"string", @@ -37219,6 +38069,7 @@ "SpaceArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:space/.*" }, "SpaceCodeEditorAppSettings":{ @@ -37276,6 +38127,7 @@ }, "SpaceEbsVolumeSizeInGb":{ "type":"integer", + "box":true, "max":16384, "min":5 }, @@ -37311,7 +38163,8 @@ "SpaceName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "SpaceSettings":{ "type":"structure", @@ -37411,6 +38264,7 @@ }, "SpawnRate":{ "type":"integer", + "box":true, "min":0 }, "SplitType":{ @@ -37426,7 +38280,7 @@ "type":"string", "max":1024, "min":0, - "pattern":"^.{0,1024}$" + "pattern":".{0,1024}" }, "StageStatus":{ "type":"string", @@ -37595,6 +38449,7 @@ "StatusDetails":{ "type":"string", "max":1024, + "min":0, "pattern":".*" }, "StatusMessage":{"type":"string"}, @@ -37613,7 +38468,8 @@ "StepName":{ "type":"string", "max":64, - "pattern":"^[A-Za-z0-9\\-_]*$" + "min":0, + "pattern":"[A-Za-z0-9\\-_]*" }, "StepStatus":{ "type":"string", @@ -37853,7 +38709,8 @@ "members":{ "MaxRuntimeInSeconds":{ "shape":"MaxRuntimeInSeconds", - "documentation":"

        The maximum length of time, in seconds, that a training or compilation job can run before it is stopped.

        For compilation jobs, if the job does not complete during this time, a TimeOut error is generated. We recommend starting with 900 seconds and increasing as necessary based on your model.

        For all other jobs, if the job does not complete during this time, SageMaker ends the job. When RetryStrategy is specified in the job request, MaxRuntimeInSeconds specifies the maximum time for all of the attempts in total, not each individual attempt. The default value is 1 day. The maximum value is 28 days.

        The maximum time that a TrainingJob can run in total, including any time spent publishing metrics or archiving and uploading models after it has been stopped, is 30 days.

        " + "documentation":"

        The maximum length of time, in seconds, that a training or compilation job can run before it is stopped.

        For compilation jobs, if the job does not complete during this time, a TimeOut error is generated. We recommend starting with 900 seconds and increasing as necessary based on your model.

        For all other jobs, if the job does not complete during this time, SageMaker ends the job. When RetryStrategy is specified in the job request, MaxRuntimeInSeconds specifies the maximum time for all of the attempts in total, not each individual attempt. The default value is 1 day. The maximum value is 28 days.

        The maximum time that a TrainingJob can run in total, including any time spent publishing metrics or archiving and uploading models after it has been stopped, is 30 days.

        ", + "box":true }, "MaxWaitTimeInSeconds":{ "shape":"MaxWaitTimeInSeconds", @@ -37876,11 +38733,13 @@ "String":{"type":"string"}, "String1024":{ "type":"string", - "max":1024 + "max":1024, + "min":0 }, "String128":{ "type":"string", - "max":128 + "max":128, + "min":0 }, "String200":{ "type":"string", @@ -37890,31 +38749,38 @@ }, "String2048":{ "type":"string", - "max":2048 + "max":2048, + "min":0 }, "String256":{ "type":"string", - "max":256 + "max":256, + "min":0 }, "String3072":{ "type":"string", - "max":3072 + "max":3072, + "min":0 }, "String40":{ "type":"string", - "max":40 + "max":40, + "min":0 }, "String64":{ "type":"string", - "max":64 + "max":64, + "min":0 }, "String8192":{ "type":"string", - "max":8192 + "max":8192, + "min":0 }, "StringParameterValue":{ "type":"string", "max":2500, + "min":0, "pattern":".*" }, "StudioLifecycleConfigAppType":{ @@ -37929,7 +38795,8 @@ "StudioLifecycleConfigArn":{ "type":"string", "max":256, - "pattern":"^(arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:studio-lifecycle-config/.*|None)$" + "min":0, + "pattern":"(arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:studio-lifecycle-config/.*|None)" }, "StudioLifecycleConfigContent":{ "type":"string", @@ -37966,7 +38833,8 @@ "StudioLifecycleConfigName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "StudioLifecycleConfigSortKey":{ "type":"string", @@ -38012,6 +38880,7 @@ "SubnetId":{ "type":"string", "max":32, + "min":0, "pattern":"[-0-9a-zA-Z]+" }, "Subnets":{ @@ -38095,7 +38964,8 @@ }, "GenerateCandidateDefinitionsOnly":{ "shape":"GenerateCandidateDefinitionsOnly", - "documentation":"

        Generates possible candidates without training the models. A model candidate is a combination of data preprocessors, algorithms, and algorithm parameter settings.

        " + "documentation":"

        Generates possible candidates without training the models. A model candidate is a combination of data preprocessors, algorithms, and algorithm parameter settings.

        ", + "box":true }, "ProblemType":{ "shape":"ProblemType", @@ -38144,7 +39014,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" }, "TagKeyList":{ "type":"list", @@ -38169,7 +39039,7 @@ "type":"string", "max":256, "min":0, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" }, "TargetAttributeName":{ "type":"string", @@ -38222,7 +39092,10 @@ "max":256, "min":1 }, - "TargetObjectiveMetricValue":{"type":"float"}, + "TargetObjectiveMetricValue":{ + "type":"float", + "box":true + }, "TargetPlatform":{ "type":"structure", "required":[ @@ -38280,17 +39153,20 @@ }, "TargetValue":{ "shape":"Double", - "documentation":"

        The recommended target value to specify for the metric when creating a scaling policy.

        " + "documentation":"

        The recommended target value to specify for the metric when creating a scaling policy.

        ", + "box":true } }, "documentation":"

        A target tracking scaling policy. Includes support for predefined or customized metrics.

        When using the PutScalingPolicy API, this parameter is required when you are creating a policy with the policy type TargetTrackingScaling.

        " }, "TaskAvailabilityLifetimeInSeconds":{ "type":"integer", + "box":true, "min":60 }, "TaskCount":{ "type":"integer", + "box":true, "min":0 }, "TaskDescription":{ @@ -38309,7 +39185,7 @@ "type":"string", "max":30, "min":1, - "pattern":"^[A-Za-z0-9]+( [A-Za-z0-9]+)*$" + "pattern":"[A-Za-z0-9]+( [A-Za-z0-9]+)*" }, "TaskKeywords":{ "type":"list", @@ -38319,13 +39195,14 @@ }, "TaskTimeLimitInSeconds":{ "type":"integer", + "box":true, "min":30 }, "TaskTitle":{ "type":"string", "max":128, "min":1, - "pattern":"^[\\t\\n\\r -\\uD7FF\\uE000-\\uFFFD]*$" + "pattern":"[\\t\\n\\r -\\uD7FF\\uE000-\\uFFFD]*" }, "TemplateContent":{ "type":"string", @@ -38338,6 +39215,22 @@ "max":128000, "min":1 }, + "TemplateProviderDetail":{ + "type":"structure", + "members":{ + "CfnTemplateProviderDetail":{ + "shape":"CfnTemplateProviderDetail", + "documentation":"

        Details about a CloudFormation template provider configuration and associated provisioning information.

        " + } + }, + "documentation":"

        Details about a template provider configuration and associated provisioning information.

        " + }, + "TemplateProviderDetailList":{ + "type":"list", + "member":{"shape":"TemplateProviderDetail"}, + "max":1, + "min":1 + }, "TemplateUrl":{ "type":"string", "max":2048, @@ -38375,6 +39268,7 @@ }, "TerminationWaitInSeconds":{ "type":"integer", + "box":true, "max":3600, "min":0 }, @@ -38403,12 +39297,14 @@ "TextGenerationHyperParameterKey":{ "type":"string", "max":32, - "pattern":"^[a-zA-Z0-9._-]+$" + "min":0, + "pattern":"[a-zA-Z0-9._-]+" }, "TextGenerationHyperParameterValue":{ "type":"string", "max":16, - "pattern":"^[a-zA-Z0-9._-]+$" + "min":0, + "pattern":"[a-zA-Z0-9._-]+" }, "TextGenerationHyperParameters":{ "type":"map", @@ -38449,6 +39345,7 @@ "ThingName":{ "type":"string", "max":128, + "min":0, "pattern":"[a-zA-Z0-9:_-]+" }, "ThroughputConfig":{ @@ -38621,7 +39518,8 @@ "members":{ "Value":{ "shape":"Long", - "documentation":"

        The total number of matching results. This value may be exact or an estimate, depending on the Relation field.

        " + "documentation":"

        The total number of matching results. This value may be exact or an estimate, depending on the Relation field.

        ", + "box":true }, "Relation":{ "shape":"Relation", @@ -38632,18 +39530,28 @@ }, "TotalInstanceCount":{ "type":"integer", + "box":true, "min":0 }, "TrackingServerArn":{ "type":"string", "max":2048, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:mlflow-tracking-server/.*" }, + "TrackingServerMaintenanceStatus":{ + "type":"string", + "enum":[ + "MaintenanceInProgress", + "MaintenanceComplete", + "MaintenanceFailed" + ] + }, "TrackingServerName":{ "type":"string", "max":256, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}" }, "TrackingServerSize":{ "type":"string", @@ -38717,10 +39625,12 @@ }, "TrackingServerUrl":{ "type":"string", - "max":2048 + "max":2048, + "min":0 }, "TrafficDurationInSeconds":{ "type":"integer", + "box":true, "min":1 }, "TrafficPattern":{ @@ -38785,6 +39695,7 @@ "TrainingContainerArgument":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "TrainingContainerArguments":{ @@ -38802,22 +39713,26 @@ "TrainingContainerEntrypointString":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "TrainingEnvironmentKey":{ "type":"string", "max":512, + "min":0, "pattern":"[a-zA-Z_][a-zA-Z0-9_]*" }, "TrainingEnvironmentMap":{ "type":"map", "key":{"shape":"TrainingEnvironmentKey"}, "value":{"shape":"TrainingEnvironmentValue"}, - "max":100 + "max":100, + "min":0 }, "TrainingEnvironmentValue":{ "type":"string", "max":512, + "min":0, "pattern":"[\\S\\s]*" }, "TrainingImageConfig":{ @@ -38958,7 +39873,35 @@ "ml.r5.8xlarge", "ml.r5.12xlarge", "ml.r5.16xlarge", - "ml.r5.24xlarge" + "ml.r5.24xlarge", + "ml.p6-b200.48xlarge", + "ml.m7i.large", + "ml.m7i.xlarge", + "ml.m7i.2xlarge", + "ml.m7i.4xlarge", + "ml.m7i.8xlarge", + "ml.m7i.12xlarge", + "ml.m7i.16xlarge", + "ml.m7i.24xlarge", + "ml.m7i.48xlarge", + "ml.c7i.large", + "ml.c7i.xlarge", + "ml.c7i.2xlarge", + "ml.c7i.4xlarge", + "ml.c7i.8xlarge", + "ml.c7i.12xlarge", + "ml.c7i.16xlarge", + "ml.c7i.24xlarge", + "ml.c7i.48xlarge", + "ml.r7i.large", + "ml.r7i.xlarge", + "ml.r7i.2xlarge", + "ml.r7i.4xlarge", + "ml.r7i.8xlarge", + "ml.r7i.12xlarge", + "ml.r7i.16xlarge", + "ml.r7i.24xlarge", + "ml.r7i.48xlarge" ] }, "TrainingInstanceTypes":{ @@ -39062,15 +40005,18 @@ }, "EnableNetworkIsolation":{ "shape":"Boolean", - "documentation":"

        If the TrainingJob was created with network isolation, the value is set to true. If network isolation is enabled, nodes can't communicate beyond the VPC they run in.

        " + "documentation":"

        If the TrainingJob was created with network isolation, the value is set to true. If network isolation is enabled, nodes can't communicate beyond the VPC they run in.

        ", + "box":true }, "EnableInterContainerTrafficEncryption":{ "shape":"Boolean", - "documentation":"

        To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training.

        " + "documentation":"

        To encrypt all communications between ML compute instances in distributed training, choose True. Encryption provides greater security for distributed training, but training might take longer. How long it takes depends on the amount of communication between compute instances, especially if you use a deep learning algorithm in distributed training.

        ", + "box":true }, "EnableManagedSpotTraining":{ "shape":"Boolean", - "documentation":"

        When true, enables managed spot training using Amazon EC2 Spot instances to run training jobs instead of on-demand instances. For more information, see Managed Spot Training.

        " + "documentation":"

        When true, enables managed spot training using Amazon EC2 Spot instances to run training jobs instead of on-demand instances. For more information, see Managed Spot Training.

        ", + "box":true }, "CheckpointConfig":{"shape":"CheckpointConfig"}, "TrainingTimeInSeconds":{ @@ -39111,6 +40057,7 @@ "TrainingJobArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:training-job/.*" }, "TrainingJobDefinition":{ @@ -39158,7 +40105,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "TrainingJobSortByOptions":{ "type":"string", @@ -39188,23 +40135,28 @@ "members":{ "Completed":{ "shape":"TrainingJobStatusCounter", - "documentation":"

        The number of completed training jobs launched by the hyperparameter tuning job.

        " + "documentation":"

        The number of completed training jobs launched by the hyperparameter tuning job.

        ", + "box":true }, "InProgress":{ "shape":"TrainingJobStatusCounter", - "documentation":"

        The number of in-progress training jobs launched by a hyperparameter tuning job.

        " + "documentation":"

        The number of in-progress training jobs launched by a hyperparameter tuning job.

        ", + "box":true }, "RetryableError":{ "shape":"TrainingJobStatusCounter", - "documentation":"

        The number of training jobs that failed, but can be retried. A failed training job can be retried only if it failed because an internal service error occurred.

        " + "documentation":"

        The number of training jobs that failed, but can be retried. A failed training job can be retried only if it failed because an internal service error occurred.

        ", + "box":true }, "NonRetryableError":{ "shape":"TrainingJobStatusCounter", - "documentation":"

        The number of training jobs that failed and can't be retried. A failed training job can't be retried if it failed because a client error occurred.

        " + "documentation":"

        The number of training jobs that failed and can't be retried. A failed training job can't be retried if it failed because a client error occurred.

        ", + "box":true }, "Stopped":{ "shape":"TrainingJobStatusCounter", - "documentation":"

        The number of training jobs launched by a hyperparameter tuning job that were manually stopped.

        " + "documentation":"

        The number of training jobs launched by a hyperparameter tuning job that were manually stopped.

        ", + "box":true } }, "documentation":"

        The numbers of training jobs launched by a hyperparameter tuning job, categorized by status.

        " @@ -39283,16 +40235,19 @@ }, "TrainingPlanDurationHours":{ "type":"long", + "box":true, "max":87600, "min":0 }, "TrainingPlanDurationHoursInput":{ "type":"long", + "box":true, "max":87600, "min":1 }, "TrainingPlanDurationMinutes":{ "type":"long", + "box":true, "max":59, "min":0 }, @@ -39328,7 +40283,7 @@ "type":"string", "max":64, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}" }, "TrainingPlanOffering":{ "type":"structure", @@ -39380,7 +40335,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[a-z0-9\\-]+$" + "pattern":"[a-z0-9\\-]+" }, "TrainingPlanOfferings":{ "type":"list", @@ -39542,7 +40497,8 @@ }, "SupportsDistributedTraining":{ "shape":"Boolean", - "documentation":"

        Indicates whether the algorithm supports distributed training. If set to false, buyers can't request more than one instance during training.

        " + "documentation":"

        Indicates whether the algorithm supports distributed training. If set to false, buyers can't request more than one instance during training.

        ", + "box":true }, "MetricDefinitions":{ "shape":"MetricDefinitionList", @@ -39565,13 +40521,14 @@ }, "TrainingTimeInSeconds":{ "type":"integer", + "box":true, "min":1 }, "TransformAmiVersion":{ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$" + "pattern":"[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*" }, "TransformDataSource":{ "type":"structure", @@ -39587,17 +40544,20 @@ "TransformEnvironmentKey":{ "type":"string", "max":1024, + "min":0, "pattern":"[a-zA-Z_][a-zA-Z0-9_]{0,1023}" }, "TransformEnvironmentMap":{ "type":"map", "key":{"shape":"TransformEnvironmentKey"}, "value":{"shape":"TransformEnvironmentValue"}, - "max":16 + "max":16, + "min":0 }, "TransformEnvironmentValue":{ "type":"string", "max":10240, + "min":0, "pattern":"[\\S\\s]*" }, "TransformInput":{ @@ -39625,6 +40585,7 @@ }, "TransformInstanceCount":{ "type":"integer", + "box":true, "min":1 }, "TransformInstanceType":{ @@ -39813,6 +40774,7 @@ "TransformJobArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:transform-job/.*" }, "TransformJobDefinition":{ @@ -39858,7 +40820,7 @@ "type":"string", "max":63, "min":1, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "TransformJobStatus":{ "type":"string", @@ -40044,6 +41006,7 @@ "TrialArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:experiment-trial/.*" }, "TrialComponent":{ @@ -40130,6 +41093,7 @@ "TrialComponentArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:experiment-trial-component/.*" }, "TrialComponentArtifact":{ @@ -40150,27 +41114,32 @@ "TrialComponentArtifactValue":{ "type":"string", "max":2048, + "min":0, "pattern":".*" }, "TrialComponentArtifacts":{ "type":"map", "key":{"shape":"TrialComponentKey128"}, "value":{"shape":"TrialComponentArtifact"}, - "max":60 + "max":60, + "min":0 }, "TrialComponentKey128":{ "type":"string", "max":128, + "min":0, "pattern":".*" }, "TrialComponentKey256":{ "type":"string", "max":256, + "min":0, "pattern":".*" }, "TrialComponentKey320":{ "type":"string", "max":320, + "min":0, "pattern":".*" }, "TrialComponentMetricSummaries":{ @@ -40237,7 +41206,8 @@ "type":"map", "key":{"shape":"TrialComponentKey320"}, "value":{"shape":"TrialComponentParameterValue"}, - "max":300 + "max":300, + "min":0 }, "TrialComponentPrimaryStatus":{ "type":"string", @@ -40291,6 +41261,7 @@ "TrialComponentSourceArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:.*" }, "TrialComponentSourceDetail":{ @@ -40336,6 +41307,7 @@ "TrialComponentStatusMessage":{ "type":"string", "max":1024, + "min":0, "pattern":".*" }, "TrialComponentSummaries":{ @@ -40407,6 +41379,7 @@ "TrialSourceArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:.*" }, "TrialSummaries":{ @@ -40466,6 +41439,7 @@ }, "TtlDurationValue":{ "type":"integer", + "box":true, "min":1 }, "TuningJobCompletionCriteria":{ @@ -40501,15 +41475,18 @@ "members":{ "Dollars":{ "shape":"Dollars", - "documentation":"

        The whole number of dollars in the amount.

        " + "documentation":"

        The whole number of dollars in the amount.

        ", + "box":true }, "Cents":{ "shape":"Cents", - "documentation":"

        The fractional portion, in cents, of the amount.

        " + "documentation":"

        The fractional portion, in cents, of the amount.

        ", + "box":true }, "TenthFractionsOfACent":{ "shape":"TenthFractionsOfACent", - "documentation":"

        Fractions of a cent, in tenths.

        " + "documentation":"

        Fractions of a cent, in tenths.

        ", + "box":true } }, "documentation":"

        Represents an amount of money in United States dollars.

        " @@ -40555,20 +41532,21 @@ }, "Uid":{ "type":"long", + "box":true, "max":4000000, "min":10000 }, "UnifiedStudioDomainId":{ "type":"string", - "pattern":"^dzd[-_][a-zA-Z0-9_-]{1,36}$" + "pattern":"dzd[-_][a-zA-Z0-9_-]{1,36}" }, "UnifiedStudioEnvironmentId":{ "type":"string", - "pattern":"^[a-zA-Z0-9_-]{1,36}$" + "pattern":"[a-zA-Z0-9_-]{1,36}" }, "UnifiedStudioProjectId":{ "type":"string", - "pattern":"^[a-zA-Z0-9_-]{1,36}$" + "pattern":"[a-zA-Z0-9_-]{1,36}" }, "UnifiedStudioSettings":{ "type":"structure", @@ -40600,6 +41578,10 @@ "ProjectS3Path":{ "shape":"S3Uri", "documentation":"

        The location where Amazon S3 stores temporary execution data and other artifacts for the project that corresponds to the domain.

        " + }, + "SingleSignOnApplicationArn":{ + "shape":"SingleSignOnApplicationArn", + "documentation":"

        The ARN of the application managed by SageMaker AI and SageMaker Unified Studio in the Amazon Web Services IAM Identity Center.

        " } }, "documentation":"

        The settings that apply to an Amazon SageMaker AI domain when you use it in Amazon SageMaker Unified Studio.

        " @@ -40703,10 +41685,7 @@ }, "UpdateClusterRequest":{ "type":"structure", - "required":[ - "ClusterName", - "InstanceGroups" - ], + "required":["ClusterName"], "members":{ "ClusterName":{ "shape":"ClusterNameOrArn", @@ -40749,7 +41728,8 @@ }, "TargetVersion":{ "shape":"Integer", - "documentation":"

        Target version.

        " + "documentation":"

        Target version.

        ", + "box":true }, "SchedulerConfig":{ "shape":"SchedulerConfig", @@ -40774,7 +41754,8 @@ }, "ClusterSchedulerConfigVersion":{ "shape":"Integer", - "documentation":"

        Version of the cluster policy.

        " + "documentation":"

        Version of the cluster policy.

        ", + "box":true } } }, @@ -40860,7 +41841,8 @@ }, "TargetVersion":{ "shape":"Integer", - "documentation":"

        Target version.

        " + "documentation":"

        Target version.

        ", + "box":true }, "ComputeQuotaConfig":{ "shape":"ComputeQuotaConfig", @@ -40893,7 +41875,8 @@ }, "ComputeQuotaVersion":{ "shape":"Integer", - "documentation":"

        Version of the compute allocation definition.

        " + "documentation":"

        Version of the compute allocation definition.

        ", + "box":true } } }, @@ -41038,7 +42021,8 @@ }, "RetainAllVariantProperties":{ "shape":"Boolean", - "documentation":"

        When updating endpoint resources, enables or disables the retention of variant properties, such as the instance count or the variant weight. To retain the variant properties of an endpoint when updating it, set RetainAllVariantProperties to true. To use the variant properties specified in a new EndpointConfig call when updating an endpoint, set RetainAllVariantProperties to false. The default is false.

        " + "documentation":"

        When updating endpoint resources, enables or disables the retention of variant properties, such as the instance count or the variant weight. To retain the variant properties of an endpoint when updating it, set RetainAllVariantProperties to true. To use the variant properties specified in a new EndpointConfig call when updating an endpoint, set RetainAllVariantProperties to false. The default is false.

        ", + "box":true }, "ExcludeRetainedVariantProperties":{ "shape":"VariantPropertyList", @@ -41050,7 +42034,8 @@ }, "RetainDeploymentConfig":{ "shape":"Boolean", - "documentation":"

        Specifies whether to reuse the last deployment configuration. The default value is false (the configuration is not reused).

        " + "documentation":"

        Specifies whether to reuse the last deployment configuration. The default value is false (the configuration is not reused).

        ", + "box":true } } }, @@ -41396,7 +42381,8 @@ }, "Horovod":{ "shape":"Horovod", - "documentation":"

        Indicates Horovod compatibility.

        " + "documentation":"

        Indicates Horovod compatibility.

        ", + "box":true }, "ReleaseNotes":{ "shape":"ReleaseNotes", @@ -41530,7 +42516,8 @@ }, "AutomaticModelRegistration":{ "shape":"Boolean", - "documentation":"

        Whether to enable or disable automatic registration of new MLflow models to the SageMaker Model Registry. To enable automatic model registration, set this value to True. To disable automatic model registration, set this value to False. If not specified, AutomaticModelRegistration defaults to False

        " + "documentation":"

        Whether to enable or disable automatic registration of new MLflow models to the SageMaker Model Registry. To enable automatic model registration, set this value to True. To disable automatic model registration, set this value to False. If not specified, AutomaticModelRegistration defaults to False

        ", + "box":true }, "WeeklyMaintenanceWindowStart":{ "shape":"WeeklyMaintenanceWindowStart", @@ -41725,7 +42712,8 @@ }, "DisassociateLifecycleConfig":{ "shape":"DisassociateNotebookInstanceLifecycleConfig", - "documentation":"

        Set to true to remove the notebook instance lifecycle configuration currently associated with the notebook instance. This operation is idempotent. If you specify a lifecycle configuration that is not associated with the notebook instance when you call this method, it does not throw an error.

        " + "documentation":"

        Set to true to remove the notebook instance lifecycle configuration currently associated with the notebook instance. This operation is idempotent. If you specify a lifecycle configuration that is not associated with the notebook instance when you call this method, it does not throw an error.

        ", + "box":true }, "VolumeSizeInGB":{ "shape":"NotebookInstanceVolumeSizeInGB", @@ -41745,15 +42733,18 @@ }, "DisassociateAcceleratorTypes":{ "shape":"DisassociateNotebookInstanceAcceleratorTypes", - "documentation":"

        This parameter is no longer supported. Elastic Inference (EI) is no longer available.

        This parameter was used to specify a list of the EI instance types to remove from this notebook instance.

        " + "documentation":"

        This parameter is no longer supported. Elastic Inference (EI) is no longer available.

        This parameter was used to specify a list of the EI instance types to remove from this notebook instance.

        ", + "box":true }, "DisassociateDefaultCodeRepository":{ "shape":"DisassociateDefaultCodeRepository", - "documentation":"

        The name or URL of the default Git repository to remove from this notebook instance. This operation is idempotent. If you specify a Git repository that is not associated with the notebook instance when you call this method, it does not throw an error.

        " + "documentation":"

        The name or URL of the default Git repository to remove from this notebook instance. This operation is idempotent. If you specify a Git repository that is not associated with the notebook instance when you call this method, it does not throw an error.

        ", + "box":true }, "DisassociateAdditionalCodeRepositories":{ "shape":"DisassociateAdditionalCodeRepositories", - "documentation":"

        A list of names or URLs of the default Git repositories to remove from this notebook instance. This operation is idempotent. If you specify a Git repository that is not associated with the notebook instance when you call this method, it does not throw an error.

        " + "documentation":"

        A list of names or URLs of the default Git repositories to remove from this notebook instance. This operation is idempotent. If you specify a Git repository that is not associated with the notebook instance when you call this method, it does not throw an error.

        ", + "box":true }, "RootAccess":{ "shape":"RootAccess", @@ -41929,6 +42920,10 @@ "Tags":{ "shape":"TagList", "documentation":"

        An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources. In addition, the project must have tag update constraints set in order to include this parameter in the request. For more information, see Amazon Web Services Service Catalog Tag Update Constraints.

        " + }, + "TemplateProvidersToUpdate":{ + "shape":"UpdateTemplateProviderList", + "documentation":"

        The template providers to update in the project.

        " } } }, @@ -41976,6 +42971,22 @@ } } }, + "UpdateTemplateProvider":{ + "type":"structure", + "members":{ + "CfnTemplateProvider":{ + "shape":"CfnUpdateTemplateProvider", + "documentation":"

        The CloudFormation template provider configuration to update.

        " + } + }, + "documentation":"

        Contains configuration details for updating an existing template provider in the project.

        " + }, + "UpdateTemplateProviderList":{ + "type":"list", + "member":{"shape":"UpdateTemplateProvider"}, + "max":1, + "min":1 + }, "UpdateTrainingJobRequest":{ "type":"structure", "required":["TrainingJobName"], @@ -42195,7 +43206,8 @@ "Url":{ "type":"string", "max":1024, - "pattern":"^(https|s3)://([^/]+)/?(.*)$" + "min":0, + "pattern":"(https|s3)://([^/]+)/?(.*)" }, "UserContext":{ "type":"structure", @@ -42222,6 +43234,7 @@ "UserProfileArn":{ "type":"string", "max":256, + "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:user-profile/.*" }, "UserProfileDetails":{ @@ -42257,7 +43270,8 @@ "UserProfileName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "UserProfileSortKey":{ "type":"string", @@ -42358,27 +43372,32 @@ }, "UsersPerStep":{ "type":"integer", + "box":true, "max":3, "min":1 }, "UtilizationMetric":{ "type":"float", + "box":true, "min":0.0 }, "UtilizationPercentagePerCore":{ "type":"integer", + "box":true, "max":100, "min":1 }, "ValidationFraction":{ "type":"float", + "box":true, "max":1, "min":0 }, "VariantName":{ "type":"string", "max":63, - "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" + "min":0, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, "VariantProperty":{ "type":"structure", @@ -42417,10 +43436,12 @@ }, "VariantStatusMessage":{ "type":"string", - "max":1024 + "max":1024, + "min":0 }, "VariantWeight":{ "type":"float", + "box":true, "min":0 }, "VectorConfig":{ @@ -42446,7 +43467,8 @@ "VersionAliasesList":{ "type":"list", "member":{"shape":"ImageVersionAliasPattern"}, - "max":20 + "max":20, + "min":0 }, "VersionId":{ "type":"string", @@ -42458,7 +43480,7 @@ "type":"string", "max":176, "min":1, - "pattern":"(arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:[a-z\\-]*\\/)?([a-zA-Z0-9]([a-zA-Z0-9-]){0,62})(? software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sagemakera2iruntime AWS Java SDK :: Services :: SageMaker A2I Runtime diff --git a/services/sagemakera2iruntime/src/main/resources/codegen-resources/customization.config b/services/sagemakera2iruntime/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/sagemakera2iruntime/src/main/resources/codegen-resources/customization.config +++ b/services/sagemakera2iruntime/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/sagemakeredge/pom.xml b/services/sagemakeredge/pom.xml index 7ca96dd10176..0f65f530944b 100644 --- a/services/sagemakeredge/pom.xml +++ b/services/sagemakeredge/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sagemakeredge AWS Java SDK :: Services :: Sagemaker Edge diff --git a/services/sagemakeredge/src/main/resources/codegen-resources/customization.config b/services/sagemakeredge/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/sagemakeredge/src/main/resources/codegen-resources/customization.config +++ b/services/sagemakeredge/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/sagemakerfeaturestoreruntime/pom.xml b/services/sagemakerfeaturestoreruntime/pom.xml index e7e7005441e4..a30776a25f38 100644 --- a/services/sagemakerfeaturestoreruntime/pom.xml +++ b/services/sagemakerfeaturestoreruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sagemakerfeaturestoreruntime AWS Java SDK :: Services :: Sage Maker Feature Store Runtime diff --git a/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/customization.config b/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/customization.config +++ b/services/sagemakerfeaturestoreruntime/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/sagemakergeospatial/pom.xml b/services/sagemakergeospatial/pom.xml index 0130bdaafaba..7f831454a9cd 100644 --- a/services/sagemakergeospatial/pom.xml +++ b/services/sagemakergeospatial/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sagemakergeospatial AWS Java SDK :: Services :: Sage Maker Geospatial diff --git a/services/sagemakergeospatial/src/main/resources/codegen-resources/customization.config b/services/sagemakergeospatial/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/sagemakergeospatial/src/main/resources/codegen-resources/customization.config +++ b/services/sagemakergeospatial/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/sagemakermetrics/pom.xml b/services/sagemakermetrics/pom.xml index 38f9fce2c87c..45a673acc0dd 100644 --- a/services/sagemakermetrics/pom.xml +++ b/services/sagemakermetrics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sagemakermetrics AWS Java SDK :: Services :: Sage Maker Metrics diff --git a/services/sagemakermetrics/src/main/resources/codegen-resources/customization.config b/services/sagemakermetrics/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/sagemakermetrics/src/main/resources/codegen-resources/customization.config +++ b/services/sagemakermetrics/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/sagemakerruntime/pom.xml b/services/sagemakerruntime/pom.xml index 375f8c32b65f..6a5a9ebab99e 100644 --- a/services/sagemakerruntime/pom.xml +++ b/services/sagemakerruntime/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sagemakerruntime AWS Java SDK :: Services :: SageMaker Runtime diff --git a/services/sagemakerruntime/src/main/resources/codegen-resources/customization.config b/services/sagemakerruntime/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/sagemakerruntime/src/main/resources/codegen-resources/customization.config +++ b/services/sagemakerruntime/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/savingsplans/pom.xml b/services/savingsplans/pom.xml index 8ce0be9860fc..56dc536a4ad5 100644 --- a/services/savingsplans/pom.xml +++ b/services/savingsplans/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT savingsplans AWS Java SDK :: Services :: Savingsplans diff --git a/services/savingsplans/src/main/resources/codegen-resources/customization.config b/services/savingsplans/src/main/resources/codegen-resources/customization.config index beae47f452ee..d7ed49e2a984 100644 --- a/services/savingsplans/src/main/resources/codegen-resources/customization.config +++ b/services/savingsplans/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "customServiceMetadata": { "contentType": "application/json" }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/scheduler/pom.xml b/services/scheduler/pom.xml index 45efff6a2581..d319a9eb7f93 100644 --- a/services/scheduler/pom.xml +++ b/services/scheduler/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT scheduler AWS Java SDK :: Services :: Scheduler diff --git a/services/scheduler/src/main/resources/codegen-resources/customization.config b/services/scheduler/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/scheduler/src/main/resources/codegen-resources/customization.config +++ b/services/scheduler/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/schemas/pom.xml b/services/schemas/pom.xml index 2fca05cfaf59..0a63693fd4ba 100644 --- a/services/schemas/pom.xml +++ b/services/schemas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT schemas AWS Java SDK :: Services :: Schemas diff --git a/services/schemas/src/main/resources/codegen-resources/customization.config b/services/schemas/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/schemas/src/main/resources/codegen-resources/customization.config +++ b/services/schemas/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/secretsmanager/pom.xml b/services/secretsmanager/pom.xml index 259c250d5dde..fea68f78f96b 100644 --- a/services/secretsmanager/pom.xml +++ b/services/secretsmanager/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT secretsmanager AWS Java SDK :: Services :: AWS Secrets Manager diff --git a/services/secretsmanager/src/main/resources/codegen-resources/customization.config b/services/secretsmanager/src/main/resources/codegen-resources/customization.config index f9f63f7232bc..55141f588dc7 100644 --- a/services/secretsmanager/src/main/resources/codegen-resources/customization.config +++ b/services/secretsmanager/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "getRandomPassword", "listSecrets" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/securityhub/pom.xml b/services/securityhub/pom.xml index e1651ed78468..e687e39d282c 100644 --- a/services/securityhub/pom.xml +++ b/services/securityhub/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT securityhub AWS Java SDK :: Services :: SecurityHub diff --git a/services/securityhub/src/main/resources/codegen-resources/customization.config b/services/securityhub/src/main/resources/codegen-resources/customization.config index cbad36f4c234..fdcbc9df329a 100644 --- a/services/securityhub/src/main/resources/codegen-resources/customization.config +++ b/services/securityhub/src/main/resources/codegen-resources/customization.config @@ -13,6 +13,5 @@ "getEnabledStandards", "getInsights" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/securityhub/src/main/resources/codegen-resources/paginators-1.json b/services/securityhub/src/main/resources/codegen-resources/paginators-1.json index 520c3b68e517..7d228336bdbb 100644 --- a/services/securityhub/src/main/resources/codegen-resources/paginators-1.json +++ b/services/securityhub/src/main/resources/codegen-resources/paginators-1.json @@ -12,6 +12,12 @@ "limit_key": "MaxResults", "result_key": "Products" }, + "DescribeProductsV2": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ProductsV2" + }, "DescribeStandards": { "input_token": "NextToken", "output_token": "NextToken", @@ -42,12 +48,30 @@ "limit_key": "MaxResults", "result_key": "Findings" }, + "GetFindingsV2": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Findings" + }, "GetInsights": { "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Insights" }, + "GetResourcesV2": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Resources" + }, + "ListAggregatorsV2": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "AggregatorsV2" + }, "ListConfigurationPolicies": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/services/securityhub/src/main/resources/codegen-resources/service-2.json b/services/securityhub/src/main/resources/codegen-resources/service-2.json index 6e295fd52b16..c8bf3a2018b3 100644 --- a/services/securityhub/src/main/resources/codegen-resources/service-2.json +++ b/services/securityhub/src/main/resources/codegen-resources/service-2.json @@ -216,7 +216,24 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidAccessException"} ], - "documentation":"

        Used by Security Hub customers to update information about their investigation into a finding. Requested by administrator accounts or member accounts. Administrator accounts can update findings for their account and their member accounts. Member accounts can update findings for their account.

        Updates from BatchUpdateFindings don't affect the value of UpdatedAt for a finding.

        Administrator and member accounts can use BatchUpdateFindings to update the following finding fields and objects.

        • Confidence

        • Criticality

        • Note

        • RelatedFindings

        • Severity

        • Types

        • UserDefinedFields

        • VerificationState

        • Workflow

        You can configure IAM policies to restrict access to fields and field values. For example, you might not want member accounts to be able to suppress findings or change the finding severity. See Configuring access to BatchUpdateFindings in the Security Hub User Guide.

        " + "documentation":"

        Used by Security Hub customers to update information about their investigation into one or more findings. Requested by administrator accounts or member accounts. Administrator accounts can update findings for their account and their member accounts. A member account can update findings only for their own account. Administrator and member accounts can use this operation to update the following fields and objects for one or more findings:

        • Confidence

        • Criticality

        • Note

        • RelatedFindings

        • Severity

        • Types

        • UserDefinedFields

        • VerificationState

        • Workflow

        If you use this operation to update a finding, your updates don’t affect the value for the UpdatedAt field of the finding. Also note that it can take several minutes for Security Hub to process your request and update each finding specified in the request.

        You can configure IAM policies to restrict access to fields and field values. For example, you might not want member accounts to be able to suppress findings or change the finding severity. For more information see Configuring access to BatchUpdateFindings in the Security Hub User Guide.

        " + }, + "BatchUpdateFindingsV2":{ + "name":"BatchUpdateFindingsV2", + "http":{ + "method":"PATCH", + "requestUri":"/findingsv2/batchupdatev2" + }, + "input":{"shape":"BatchUpdateFindingsV2Request"}, + "output":{"shape":"BatchUpdateFindingsV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Used by customers to update information about their investigation into a finding. Requested by delegated administrator accounts or member accounts. Delegated administrator accounts can update findings for their account and their member accounts. Member accounts can update findings for their account. BatchUpdateFindings and BatchUpdateFindingV2 both use securityhub:BatchUpdateFindings in the Action element of an IAM policy statement. You must have permission to perform the securityhub:BatchUpdateFindings action. Updates from BatchUpdateFindingsV2 don't affect the value of finding_info.modified_time, finding_info.modified_time_dt, time, time_dt for a finding. This API is in private preview and subject to change.

        " }, "BatchUpdateStandardsControlAssociations":{ "name":"BatchUpdateStandardsControlAssociations", @@ -235,6 +252,24 @@ ], "documentation":"

        For a batch of security controls and standards, this operation updates the enablement status of a control in a standard.

        " }, + "ConnectorRegistrationsV2":{ + "name":"ConnectorRegistrationsV2", + "http":{ + "method":"POST", + "requestUri":"/connectorsv2/registrations" + }, + "input":{"shape":"ConnectorRegistrationsV2Request"}, + "output":{"shape":"ConnectorRegistrationsV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Grants permission to complete the authorization based on input parameters. This API is in preview release and subject to change.

        " + }, "CreateActionTarget":{ "name":"CreateActionTarget", "http":{ @@ -252,6 +287,24 @@ ], "documentation":"

        Creates a custom action target in Security Hub.

        You can use custom actions on findings and insights in Security Hub to trigger target actions in Amazon CloudWatch Events.

        " }, + "CreateAggregatorV2":{ + "name":"CreateAggregatorV2", + "http":{ + "method":"POST", + "requestUri":"/aggregatorv2/create" + }, + "input":{"shape":"CreateAggregatorV2Request"}, + "output":{"shape":"CreateAggregatorV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Enables aggregation across Amazon Web Services Regions. This API is in private preview and subject to change.

        " + }, "CreateAutomationRule":{ "name":"CreateAutomationRule", "http":{ @@ -269,6 +322,23 @@ ], "documentation":"

        Creates an automation rule based on input parameters.

        " }, + "CreateAutomationRuleV2":{ + "name":"CreateAutomationRuleV2", + "http":{ + "method":"POST", + "requestUri":"/automationrulesv2/create" + }, + "input":{"shape":"CreateAutomationRuleV2Request"}, + "output":{"shape":"CreateAutomationRuleV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Creates a V2 automation rule. This API is in private preview and subject to change.

        " + }, "CreateConfigurationPolicy":{ "name":"CreateConfigurationPolicy", "http":{ @@ -287,6 +357,24 @@ ], "documentation":"

        Creates a configuration policy with the defined configuration. Only the Security Hub delegated administrator can invoke this operation from the home Region.

        " }, + "CreateConnectorV2":{ + "name":"CreateConnectorV2", + "http":{ + "method":"POST", + "requestUri":"/connectorsv2" + }, + "input":{"shape":"CreateConnectorV2Request"}, + "output":{"shape":"CreateConnectorV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Grants permission to create a connectorV2 based on input parameters. This API is in preview release and subject to change.

        " + }, "CreateFindingAggregator":{ "name":"CreateFindingAggregator", "http":{ @@ -339,6 +427,24 @@ ], "documentation":"

        Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the administrator account. If you are integrated with Organizations, then the administrator account is designated by the organization management account.

        CreateMembers is always used to add accounts that are not organization members.

        For accounts that are managed using Organizations, CreateMembers is only used in the following cases:

        • Security Hub is not configured to automatically add new organization accounts.

        • The account was disassociated or deleted in Security Hub.

        This action can only be used by an account that has Security Hub enabled. To enable Security Hub, you can use the EnableSecurityHub operation.

        For accounts that are not organization members, you create the account association and then send an invitation to the member account. To send the invitation, you use the InviteMembers operation. If the account owner accepts the invitation, the account becomes a member account in Security Hub.

        Accounts that are managed using Organizations don't receive an invitation. They automatically become a member account in Security Hub.

        • If the organization account does not have Security Hub enabled, then Security Hub and the default standards are automatically enabled. Note that Security Hub cannot be enabled automatically for the organization management account. The organization management account must enable Security Hub before the administrator account enables it as a member account.

        • For organization accounts that already have Security Hub enabled, Security Hub does not make any other changes to those accounts. It does not change their enabled standards or controls.

        A permissions policy is added that permits the administrator account to view the findings generated in the member account.

        To remove the association between the administrator and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation.

        " }, + "CreateTicketV2":{ + "name":"CreateTicketV2", + "http":{ + "method":"POST", + "requestUri":"/ticketsv2" + }, + "input":{"shape":"CreateTicketV2Request"}, + "output":{"shape":"CreateTicketV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Grants permission to create a ticket in the chosen ITSM based on finding information for the provided finding metadata UID. This API is in preview release and subject to change.

        " + }, "DeclineInvitations":{ "name":"DeclineInvitations", "http":{ @@ -371,6 +477,42 @@ ], "documentation":"

        Deletes a custom action target from Security Hub.

        Deleting a custom action target does not affect any findings or insights that were already sent to Amazon CloudWatch Events using the custom action.

        " }, + "DeleteAggregatorV2":{ + "name":"DeleteAggregatorV2", + "http":{ + "method":"DELETE", + "requestUri":"/aggregatorv2/delete/{AggregatorV2Arn+}" + }, + "input":{"shape":"DeleteAggregatorV2Request"}, + "output":{"shape":"DeleteAggregatorV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Deletes the Aggregator V2. This API is in private preview and subject to change.

        " + }, + "DeleteAutomationRuleV2":{ + "name":"DeleteAutomationRuleV2", + "http":{ + "method":"DELETE", + "requestUri":"/automationrulesv2/{Identifier}" + }, + "input":{"shape":"DeleteAutomationRuleV2Request"}, + "output":{"shape":"DeleteAutomationRuleV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Deletes a V2 automation rule. This API is in private preview and subject to change.

        " + }, "DeleteConfigurationPolicy":{ "name":"DeleteConfigurationPolicy", "http":{ @@ -390,6 +532,24 @@ ], "documentation":"

        Deletes a configuration policy. Only the Security Hub delegated administrator can invoke this operation from the home Region. For the deletion to succeed, you must first disassociate a configuration policy from target accounts, organizational units, or the root by invoking the StartConfigurationPolicyDisassociation operation.

        " }, + "DeleteConnectorV2":{ + "name":"DeleteConnectorV2", + "http":{ + "method":"DELETE", + "requestUri":"/connectorsv2/{ConnectorId+}" + }, + "input":{"shape":"DeleteConnectorV2Request"}, + "output":{"shape":"DeleteConnectorV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Grants permission to delete a connectorV2. This API is in preview release and subject to change.

        " + }, "DeleteFindingAggregator":{ "name":"DeleteFindingAggregator", "http":{ @@ -524,6 +684,39 @@ ], "documentation":"

        Returns information about product integrations in Security Hub.

        You can optionally provide an integration ARN. If you provide an integration ARN, then the results only include that integration.

        If you don't provide an integration ARN, then the results include all of the available product integrations.

        " }, + "DescribeProductsV2":{ + "name":"DescribeProductsV2", + "http":{ + "method":"GET", + "requestUri":"/productsV2" + }, + "input":{"shape":"DescribeProductsV2Request"}, + "output":{"shape":"DescribeProductsV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Gets information about the product integration. This API is in private preview and subject to change.

        " + }, + "DescribeSecurityHubV2":{ + "name":"DescribeSecurityHubV2", + "http":{ + "method":"GET", + "requestUri":"/hubv2" + }, + "input":{"shape":"DescribeSecurityHubV2Request"}, + "output":{"shape":"DescribeSecurityHubV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"} + ], + "documentation":"

        Returns details about the service resource in your account. This API is in private preview and subject to change.

        " + }, "DescribeStandards":{ "name":"DescribeStandards", "http":{ @@ -606,6 +799,22 @@ ], "documentation":"

        Disables Security Hub in your account only in the current Amazon Web Services Region. To disable Security Hub in all Regions, you must submit one request per Region where you have enabled Security Hub.

        You can't disable Security Hub in an account that is currently the Security Hub administrator.

        When you disable Security Hub, your existing findings and insights and any Security Hub configuration settings are deleted after 90 days and cannot be recovered. Any standards that were enabled are disabled, and your administrator and member account associations are removed.

        If you want to save your existing findings, you must export them before you disable Security Hub.

        " }, + "DisableSecurityHubV2":{ + "name":"DisableSecurityHubV2", + "http":{ + "method":"DELETE", + "requestUri":"/hubv2" + }, + "input":{"shape":"DisableSecurityHubV2Request"}, + "output":{"shape":"DisableSecurityHubV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

        Disable the service for the current Amazon Web Services Region or specified Amazon Web Services Region. This API is in private preview and subject to change.

        " + }, "DisassociateFromAdministratorAccount":{ "name":"DisassociateFromAdministratorAccount", "http":{ @@ -711,6 +920,22 @@ ], "documentation":"

        Enables Security Hub for your account in the current Region or the Region you specify in the request.

        When you enable Security Hub, you grant to Security Hub the permissions necessary to gather findings from other services that are integrated with Security Hub.

        When you use the EnableSecurityHub operation to enable Security Hub, you also automatically enable the following standards:

        • Center for Internet Security (CIS) Amazon Web Services Foundations Benchmark v1.2.0

        • Amazon Web Services Foundational Security Best Practices

        Other standards are not automatically enabled.

        To opt out of automatically enabled standards, set EnableDefaultStandards to false.

        After you enable Security Hub, to enable a standard, use the BatchEnableStandards operation. To disable a standard, use the BatchDisableStandards operation.

        To learn more, see the setup information in the Security Hub User Guide.

        " }, + "EnableSecurityHubV2":{ + "name":"EnableSecurityHubV2", + "http":{ + "method":"POST", + "requestUri":"/hubv2" + }, + "input":{"shape":"EnableSecurityHubV2Request"}, + "output":{"shape":"EnableSecurityHubV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

        Enables the service in account for the current Amazon Web Services Region or specified Amazon Web Services Region. This API is in private preview and subject to change.

        " + }, "GetAdministratorAccount":{ "name":"GetAdministratorAccount", "http":{ @@ -728,6 +953,42 @@ ], "documentation":"

        Provides the details for the Security Hub administrator account for the current member account.

        Can be used by both member accounts that are managed using Organizations and accounts that were invited manually.

        " }, + "GetAggregatorV2":{ + "name":"GetAggregatorV2", + "http":{ + "method":"GET", + "requestUri":"/aggregatorv2/get/{AggregatorV2Arn+}" + }, + "input":{"shape":"GetAggregatorV2Request"}, + "output":{"shape":"GetAggregatorV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Returns the configuration of the specified Aggregator V2. This API is in private preview and subject to change.

        " + }, + "GetAutomationRuleV2":{ + "name":"GetAutomationRuleV2", + "http":{ + "method":"GET", + "requestUri":"/automationrulesv2/{Identifier}" + }, + "input":{"shape":"GetAutomationRuleV2Request"}, + "output":{"shape":"GetAutomationRuleV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Returns an automation rule for the V2 service. This API is in private preview and subject to change.

        " + }, "GetConfigurationPolicy":{ "name":"GetConfigurationPolicy", "http":{ @@ -764,6 +1025,24 @@ ], "documentation":"

        Returns the association between a configuration and a target account, organizational unit, or the root. The configuration can be a configuration policy or self-managed behavior. Only the Security Hub delegated administrator can invoke this operation from the home Region.

        " }, + "GetConnectorV2":{ + "name":"GetConnectorV2", + "http":{ + "method":"GET", + "requestUri":"/connectorsv2/{ConnectorId+}" + }, + "input":{"shape":"GetConnectorV2Request"}, + "output":{"shape":"GetConnectorV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Grants permission to retrieve details for a connectorV2 based on connector id. This API is in preview release and subject to change.

        " + }, "GetEnabledStandards":{ "name":"GetEnabledStandards", "http":{ @@ -812,7 +1091,24 @@ {"shape":"InvalidAccessException"}, {"shape":"LimitExceededException"} ], - "documentation":"

        Returns history for a Security Hub finding in the last 90 days. The history includes changes made to any fields in the Amazon Web Services Security Finding Format (ASFF).

        " + "documentation":"

        Returns the history of a Security Hub finding for the past 90 days. The history includes changes made to any fields in the Amazon Web Services Security Finding Format (ASFF) except top-level timestamp fields, such as the CreatedAt and UpdatedAt fields.

        This operation might return fewer results than the maximum number of results (MaxResults) specified in a request, even when more results are available. If this occurs, the response includes a NextToken value, which you should use to retrieve the next set of results in the response. The presence of a NextToken value in a response doesn't necessarily indicate that the results are incomplete. However, you should continue to specify a NextToken value until you receive a response that doesn't include this value.

        " + }, + "GetFindingStatisticsV2":{ + "name":"GetFindingStatisticsV2", + "http":{ + "method":"POST", + "requestUri":"/findingsv2/statistics" + }, + "input":{"shape":"GetFindingStatisticsV2Request"}, + "output":{"shape":"GetFindingStatisticsV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Returns aggregated statistical data about findings. GetFindingStatisticsV2 use securityhub:GetAdhocInsightResults in the Action element of an IAM policy statement. You must have permission to perform the s action. This API is in private preview and subject to change.

        " }, "GetFindings":{ "name":"GetFindings", @@ -830,6 +1126,23 @@ ], "documentation":"

        Returns a list of findings that match the specified criteria.

        If cross-Region aggregation is enabled, then when you call GetFindings from the home Region, the results include all of the matching findings from both the home Region and linked Regions.

        " }, + "GetFindingsV2":{ + "name":"GetFindingsV2", + "http":{ + "method":"POST", + "requestUri":"/findingsv2" + }, + "input":{"shape":"GetFindingsV2Request"}, + "output":{"shape":"GetFindingsV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Return a list of findings that match the specified criteria. GetFindings and GetFindingsV2 both use securityhub:GetFindings in the Action element of an IAM policy statement. You must have permission to perform the securityhub:GetFindings action. This API is in private preview and subject to change.

        " + }, "GetInsightResults":{ "name":"GetInsightResults", "http":{ @@ -916,6 +1229,42 @@ ], "documentation":"

        Returns the details for the Security Hub member accounts for the specified account IDs.

        An administrator account can be either the delegated Security Hub administrator account for an organization or an administrator account that enabled Security Hub manually.

        The results include both member accounts that are managed using Organizations and accounts that were invited manually.

        " }, + "GetResourcesStatisticsV2":{ + "name":"GetResourcesStatisticsV2", + "http":{ + "method":"POST", + "requestUri":"/resourcesv2/statistics" + }, + "input":{"shape":"GetResourcesStatisticsV2Request"}, + "output":{"shape":"GetResourcesStatisticsV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Retrieves statistical information about Amazon Web Services resources and their associated security findings. This API is in private preview and subject to change.

        " + }, + "GetResourcesV2":{ + "name":"GetResourcesV2", + "http":{ + "method":"POST", + "requestUri":"/resourcesv2" + }, + "input":{"shape":"GetResourcesV2Request"}, + "output":{"shape":"GetResourcesV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Returns a list of resources. This API is in private preview and subject to change.

        " + }, "GetSecurityControlDefinition":{ "name":"GetSecurityControlDefinition", "http":{ @@ -950,6 +1299,24 @@ ], "documentation":"

        We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide.

        Invites other Amazon Web Services accounts to become member accounts for the Security Hub administrator account that the invitation is sent from.

        This operation is only used to invite accounts that don't belong to an Amazon Web Services organization. Organization accounts don't receive invitations.

        Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub.

        When the account owner enables Security Hub and accepts the invitation to become a member account, the administrator account can view the findings generated in the member account.

        " }, + "ListAggregatorsV2":{ + "name":"ListAggregatorsV2", + "http":{ + "method":"GET", + "requestUri":"/aggregatorv2/list" + }, + "input":{"shape":"ListAggregatorsV2Request"}, + "output":{"shape":"ListAggregatorsV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Retrieves a list of V2 aggregators. This API is in private preview and subject to change.

        " + }, "ListAutomationRules":{ "name":"ListAutomationRules", "http":{ @@ -967,6 +1334,23 @@ ], "documentation":"

        A list of automation rules and their metadata for the calling account.

        " }, + "ListAutomationRulesV2":{ + "name":"ListAutomationRulesV2", + "http":{ + "method":"GET", + "requestUri":"/automationrulesv2/list" + }, + "input":{"shape":"ListAutomationRulesV2Request"}, + "output":{"shape":"ListAutomationRulesV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Returns a list of automation rules and metadata for the calling account. This API is in private preview and subject to change.

        " + }, "ListConfigurationPolicies":{ "name":"ListConfigurationPolicies", "http":{ @@ -1002,6 +1386,24 @@ ], "documentation":"

        Provides information about the associations for your configuration policies and self-managed behavior. Only the Security Hub delegated administrator can invoke this operation from the home Region.

        " }, + "ListConnectorsV2":{ + "name":"ListConnectorsV2", + "http":{ + "method":"GET", + "requestUri":"/connectorsv2" + }, + "input":{"shape":"ListConnectorsV2Request"}, + "output":{"shape":"ListConnectorsV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Grants permission to retrieve a list of connectorsV2 and their metadata for the calling account. This API is in preview release and subject to change.

        " + }, "ListEnabledProductsForImport":{ "name":"ListEnabledProductsForImport", "http":{ @@ -1212,6 +1614,42 @@ ], "documentation":"

        Updates the name and description of a custom action target in Security Hub.

        " }, + "UpdateAggregatorV2":{ + "name":"UpdateAggregatorV2", + "http":{ + "method":"PATCH", + "requestUri":"/aggregatorv2/update/{AggregatorV2Arn+}" + }, + "input":{"shape":"UpdateAggregatorV2Request"}, + "output":{"shape":"UpdateAggregatorV2Response"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Udpates the configuration for the Aggregator V2. This API is in private preview and subject to change.

        " + }, + "UpdateAutomationRuleV2":{ + "name":"UpdateAutomationRuleV2", + "http":{ + "method":"PATCH", + "requestUri":"/automationrulesv2/{Identifier}" + }, + "input":{"shape":"UpdateAutomationRuleV2Request"}, + "output":{"shape":"UpdateAutomationRuleV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

        Updates a V2 automation rule. This API is in private preview and subject to change.

        " + }, "UpdateConfigurationPolicy":{ "name":"UpdateConfigurationPolicy", "http":{ @@ -1231,6 +1669,24 @@ ], "documentation":"

        Updates a configuration policy. Only the Security Hub delegated administrator can invoke this operation from the home Region.

        " }, + "UpdateConnectorV2":{ + "name":"UpdateConnectorV2", + "http":{ + "method":"PATCH", + "requestUri":"/connectorsv2/{ConnectorId+}" + }, + "input":{"shape":"UpdateConnectorV2Request"}, + "output":{"shape":"UpdateConnectorV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

        Grants permission to update a connectorV2 based on its id and input parameters. This API is in preview release and subject to change.

        " + }, "UpdateFindingAggregator":{ "name":"UpdateFindingAggregator", "http":{ @@ -1379,8 +1835,7 @@ }, "AcceptAdministratorInvitationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "AcceptInvitationRequest":{ "type":"structure", @@ -1401,8 +1856,7 @@ }, "AcceptInvitationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "AccessDeniedException":{ "type":"structure", @@ -1690,6 +2144,27 @@ "max":10, "min":1 }, + "AggregatorV2":{ + "type":"structure", + "members":{ + "AggregatorV2Arn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the aggregatorV2.

        " + } + }, + "documentation":"

        Specifies a cross-Region data aggregation configuration, including the aggregation Region and any linked Regions.

        " + }, + "AggregatorV2List":{ + "type":"list", + "member":{"shape":"AggregatorV2"} + }, + "AllowedOperators":{ + "type":"string", + "enum":[ + "AND", + "OR" + ] + }, "AlphaNumericNonEmptyString":{ "type":"string", "pattern":"^([^\\u0000-\\u007F]|[-_ a-zA-Z0-9])+$" @@ -1813,10 +2288,56 @@ }, "documentation":"

        One or more actions that Security Hub takes when a finding matches the defined criteria of a rule.

        " }, + "AutomationRulesActionListV2":{ + "type":"list", + "member":{"shape":"AutomationRulesActionV2"}, + "max":1, + "min":1 + }, "AutomationRulesActionType":{ "type":"string", "enum":["FINDING_FIELDS_UPDATE"] }, + "AutomationRulesActionTypeListV2":{ + "type":"list", + "member":{"shape":"AutomationRulesActionTypeObjectV2"} + }, + "AutomationRulesActionTypeObjectV2":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"AutomationRulesActionTypeV2", + "documentation":"

        The category of action to be executed by the automation rule.

        " + } + }, + "documentation":"

        Allows you to customize security response workflows.

        " + }, + "AutomationRulesActionTypeV2":{ + "type":"string", + "enum":[ + "FINDING_FIELDS_UPDATE", + "EXTERNAL_INTEGRATION" + ] + }, + "AutomationRulesActionV2":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"AutomationRulesActionTypeV2", + "documentation":"

        The category of action to be executed by the automation rule.

        " + }, + "FindingFieldsUpdate":{ + "shape":"AutomationRulesFindingFieldsUpdateV2", + "documentation":"

        The changes to be applied to fields in a security finding when an automation rule is triggered.

        " + }, + "ExternalIntegrationConfiguration":{ + "shape":"ExternalIntegrationConfiguration", + "documentation":"

        The settings for integrating automation rule actions with external systems or service.

        " + } + }, + "documentation":"

        Allows you to configure automated responses.

        " + }, "AutomationRulesArnsList":{ "type":"list", "member":{"shape":"NonEmptyString"}, @@ -1910,6 +2431,24 @@ }, "documentation":"

        Identifies the finding fields that the automation rule action updates when a finding matches the defined criteria.

        " }, + "AutomationRulesFindingFieldsUpdateV2":{ + "type":"structure", + "members":{ + "SeverityId":{ + "shape":"Integer", + "documentation":"

        The severity level to be assigned to findings that match the automation rule criteria.

        " + }, + "Comment":{ + "shape":"NonEmptyString", + "documentation":"

        Notes or contextual information for findings that are modified by the automation rule.

        " + }, + "StatusId":{ + "shape":"Integer", + "documentation":"

        The status to be applied to findings that match automation rule criteria.

        " + } + }, + "documentation":"

        Allows you to define the structure for modifying specific fields in security findings.

        " + }, "AutomationRulesFindingFilters":{ "type":"structure", "members":{ @@ -2114,10 +2653,56 @@ "type":"list", "member":{"shape":"AutomationRulesMetadata"} }, - "AvailabilityZone":{ + "AutomationRulesMetadataListV2":{ + "type":"list", + "member":{"shape":"AutomationRulesMetadataV2"} + }, + "AutomationRulesMetadataV2":{ "type":"structure", "members":{ - "ZoneName":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the automation rule.

        " + }, + "RuleId":{ + "shape":"NonEmptyString", + "documentation":"

        The ID of the automation rule.

        " + }, + "RuleOrder":{ + "shape":"RuleOrderValueV2", + "documentation":"

        The value for the rule priority.

        " + }, + "RuleName":{ + "shape":"NonEmptyString", + "documentation":"

        The name of the automation rule.

        " + }, + "RuleStatus":{ + "shape":"RuleStatusV2", + "documentation":"

        The status of the automation rule.

        " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

        An explanation for the purpose and funcitonality of the automation rule.

        " + }, + "Actions":{ + "shape":"AutomationRulesActionTypeListV2", + "documentation":"

        The list of action to be performed when the rule criteria is met.

        " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp for when the automation rule was created.

        " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp for the most recent modification to the automation rule.

        " + } + }, + "documentation":"

        Includes essential metadata information about automation rules.

        " + }, + "AvailabilityZone":{ + "type":"structure", + "members":{ + "ZoneName":{ "shape":"NonEmptyString", "documentation":"

        The name of the Availability Zone.

        " }, @@ -15733,6 +16318,101 @@ "type":"list", "member":{"shape":"BatchUpdateFindingsUnprocessedFinding"} }, + "BatchUpdateFindingsV2ProcessedFinding":{ + "type":"structure", + "members":{ + "FindingIdentifier":{ + "shape":"OcsfFindingIdentifier", + "documentation":"

        The finding identifier of a processed finding.

        " + }, + "MetadataUid":{ + "shape":"NonEmptyString", + "documentation":"

        The metadata.uid of a processed finding.

        " + } + }, + "documentation":"

        The list of findings that were updated.

        " + }, + "BatchUpdateFindingsV2ProcessedFindingsList":{ + "type":"list", + "member":{"shape":"BatchUpdateFindingsV2ProcessedFinding"} + }, + "BatchUpdateFindingsV2Request":{ + "type":"structure", + "members":{ + "MetadataUids":{ + "shape":"MetadataUidList", + "documentation":"

        The list of finding metadata.uid to indicate findings to update. Finding metadata.uid is a globally unique identifier associated with the finding. Customers cannot use MetadataUids together with FindingIdentifiers.

        " + }, + "FindingIdentifiers":{ + "shape":"OcsfFindingIdentifierList", + "documentation":"

        Provides information to identify a specific V2 finding.

        " + }, + "Comment":{ + "shape":"NonEmptyString", + "documentation":"

        The updated value for a user provided comment about the finding. Minimum character length 1. Maximum character length 512.

        " + }, + "SeverityId":{ + "shape":"Integer", + "documentation":"

        The updated value for the normalized severity identifier. The severity ID is an integer with the allowed enum values [0, 1, 2, 3, 4, 5, 99]. When customer provides the updated severity ID, the string sibling severity will automatically be updated in the finding.

        " + }, + "StatusId":{ + "shape":"Integer", + "documentation":"

        The updated value for the normalized status identifier. The status ID is an integer with the allowed enum values [0, 1, 2, 3, 4, 5, 6, 99]. When customer provides the updated status ID, the string sibling status will automatically be updated in the finding.

        " + } + } + }, + "BatchUpdateFindingsV2Response":{ + "type":"structure", + "required":[ + "ProcessedFindings", + "UnprocessedFindings" + ], + "members":{ + "ProcessedFindings":{ + "shape":"BatchUpdateFindingsV2ProcessedFindingsList", + "documentation":"

        The list of findings that were updated successfully.

        " + }, + "UnprocessedFindings":{ + "shape":"BatchUpdateFindingsV2UnprocessedFindingsList", + "documentation":"

        The list of V2 findings that were not updated.

        " + } + } + }, + "BatchUpdateFindingsV2UnprocessedFinding":{ + "type":"structure", + "members":{ + "FindingIdentifier":{ + "shape":"OcsfFindingIdentifier", + "documentation":"

        The finding identifier of an unprocessed finding.

        " + }, + "MetadataUid":{ + "shape":"NonEmptyString", + "documentation":"

        The metadata.uid of an unprocessed finding.

        " + }, + "ErrorCode":{ + "shape":"BatchUpdateFindingsV2UnprocessedFindingErrorCode", + "documentation":"

        Indicates the specific type of error preventing successful processing of a finding during a batch update operation.

        " + }, + "ErrorMessage":{ + "shape":"NonEmptyString", + "documentation":"

        A detailed description of why a finding could not be processed during a batch update operation.

        " + } + }, + "documentation":"

        The list of findings that were not updated.

        " + }, + "BatchUpdateFindingsV2UnprocessedFindingErrorCode":{ + "type":"string", + "enum":[ + "ResourceNotFoundException", + "ValidationException", + "InternalServerException", + "ConflictException" + ] + }, + "BatchUpdateFindingsV2UnprocessedFindingsList":{ + "type":"list", + "member":{"shape":"BatchUpdateFindingsV2UnprocessedFinding"} + }, "BatchUpdateStandardsControlAssociationsRequest":{ "type":"structure", "required":["StandardsControlAssociationUpdates"], @@ -15883,6 +16563,12 @@ }, "documentation":"

        Provides details about the current status of the sensitive data detection.

        " }, + "ClientToken":{ + "type":"string", + "max":63, + "min":1, + "pattern":"^[\\x21-\\x7E]{1,64}$" + }, "CloudWatchLogsLogGroupArnConfigDetails":{ "type":"structure", "members":{ @@ -15962,6 +16648,40 @@ "NOT_AVAILABLE" ] }, + "CompositeFilter":{ + "type":"structure", + "members":{ + "StringFilters":{ + "shape":"OcsfStringFilterList", + "documentation":"

        Enables filtering based on string field values.

        " + }, + "DateFilters":{ + "shape":"OcsfDateFilterList", + "documentation":"

        Enables filtering based on date and timestamp fields.

        " + }, + "BooleanFilters":{ + "shape":"OcsfBooleanFilterList", + "documentation":"

        Enables filtering based on boolean field values.

        " + }, + "NumberFilters":{ + "shape":"OcsfNumberFilterList", + "documentation":"

        Enables filtering based on numerical field values.

        " + }, + "MapFilters":{ + "shape":"OcsfMapFilterList", + "documentation":"

        Enables filtering based on map field values.

        " + }, + "Operator":{ + "shape":"AllowedOperators", + "documentation":"

        The logical operator used to combine multiple filter conditions.

        " + } + }, + "documentation":"

        Enables the creation of filtering criteria for security findings.

        " + }, + "CompositeFilterList":{ + "type":"list", + "member":{"shape":"CompositeFilter"} + }, "ConfigurationOptions":{ "type":"structure", "members":{ @@ -16099,6 +16819,16 @@ "type":"list", "member":{"shape":"ConfigurationPolicySummary"} }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"}, + "Code":{"shape":"NonEmptyString"} + }, + "documentation":"

        The request causes conflict with the current state of the service resource.

        ", + "error":{"httpStatusCode":409}, + "exception":true + }, "ConnectionDirection":{ "type":"string", "enum":[ @@ -16106,6 +16836,100 @@ "OUTBOUND" ] }, + "ConnectorAuthStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "FAILED" + ] + }, + "ConnectorProviderName":{ + "type":"string", + "enum":[ + "JIRA_CLOUD", + "SERVICENOW" + ] + }, + "ConnectorRegistrationsV2Request":{ + "type":"structure", + "required":[ + "AuthCode", + "AuthState" + ], + "members":{ + "AuthCode":{ + "shape":"NonEmptyString", + "documentation":"

        The authCode retrieved from authUrl to complete the OAuth 2.0 authorization code flow.

        " + }, + "AuthState":{ + "shape":"NonEmptyString", + "documentation":"

        The authState retrieved from authUrl to complete the OAuth 2.0 authorization code flow.

        " + } + } + }, + "ConnectorRegistrationsV2Response":{ + "type":"structure", + "required":["ConnectorId"], + "members":{ + "ConnectorArn":{ + "shape":"NonEmptyString", + "documentation":"

        The Amazon Resource Name (ARN) of the connectorV2.

        " + }, + "ConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

        The UUID of the connectorV2 to identify connectorV2 resource.

        " + } + } + }, + "ConnectorStatus":{ + "type":"string", + "enum":[ + "CONNECTED", + "FAILED_TO_CONNECT", + "PENDING_CONFIGURATION", + "PENDING_AUTHORIZATION" + ] + }, + "ConnectorSummary":{ + "type":"structure", + "required":[ + "ConnectorId", + "Name", + "ProviderSummary", + "CreatedAt" + ], + "members":{ + "ConnectorArn":{ + "shape":"NonEmptyString", + "documentation":"

        The Amazon Resource Name (ARN) of the connectorV2.

        " + }, + "ConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

        The UUID of the connectorV2 to identify connectorV2 resource.

        " + }, + "Name":{ + "shape":"NonEmptyString", + "documentation":"

        The Name field contains the user-defined name assigned to the integration connector. This helps identify and manage multiple connectors within Security Hub.

        " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

        The description of the connectorV2.

        " + }, + "ProviderSummary":{ + "shape":"ProviderSummary", + "documentation":"

        The connectorV2 third party provider configuration summary.

        " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

        ISO 8601 UTC timestamp for the time create the connectorV2.

        " + } + }, + "documentation":"

        A condensed overview of the connectorV2..

        " + }, + "ConnectorSummaryList":{ + "type":"list", + "member":{"shape":"ConnectorSummary"} + }, "ContainerDetails":{ "type":"structure", "members":{ @@ -16200,6 +17024,50 @@ } } }, + "CreateAggregatorV2Request":{ + "type":"structure", + "required":["RegionLinkingMode"], + "members":{ + "RegionLinkingMode":{ + "shape":"NonEmptyString", + "documentation":"

        Determines how Regions are linked to an Aggregator V2.

        " + }, + "LinkedRegions":{ + "shape":"StringList", + "documentation":"

        The list of Regions that are linked to the aggregation Region.

        " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

        A list of key-value pairs to be applied to the AggregatorV2.

        " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

        A unique identifier used to ensure idempotency.

        ", + "idempotencyToken":true + } + } + }, + "CreateAggregatorV2Response":{ + "type":"structure", + "members":{ + "AggregatorV2Arn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the AggregatorV2.

        " + }, + "AggregationRegion":{ + "shape":"NonEmptyString", + "documentation":"

        The Amazon Web Services Region where data is aggregated.

        " + }, + "RegionLinkingMode":{ + "shape":"NonEmptyString", + "documentation":"

        Determines how Regions are linked to an Aggregator V2.

        " + }, + "LinkedRegions":{ + "shape":"StringList", + "documentation":"

        The list of Regions that are linked to the aggregation Region.

        " + } + } + }, "CreateAutomationRuleRequest":{ "type":"structure", "required":[ @@ -16253,6 +17121,64 @@ } } }, + "CreateAutomationRuleV2Request":{ + "type":"structure", + "required":[ + "RuleName", + "Description", + "RuleOrder", + "Criteria", + "Actions" + ], + "members":{ + "RuleName":{ + "shape":"NonEmptyString", + "documentation":"

        The name of the V2 automation rule.

        " + }, + "RuleStatus":{ + "shape":"RuleStatusV2", + "documentation":"

        The status of the V2 automation rule.

        " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

        A description of the V2 automation rule.

        " + }, + "RuleOrder":{ + "shape":"RuleOrderValueV2", + "documentation":"

        The value for the rule priority.

        " + }, + "Criteria":{ + "shape":"Criteria", + "documentation":"

        The filtering type and configuration of the automation rule.

        " + }, + "Actions":{ + "shape":"AutomationRulesActionListV2", + "documentation":"

        A list of actions to be performed when the rule criteria is met.

        " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

        A list of key-value pairs associated with the V2 automation rule.

        " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

        A unique identifier used to ensure idempotency.

        ", + "idempotencyToken":true + } + } + }, + "CreateAutomationRuleV2Response":{ + "type":"structure", + "members":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the V2 automation rule.

        " + }, + "RuleId":{ + "shape":"NonEmptyString", + "documentation":"

        The ID of the V2 automation rule.

        " + } + } + }, "CreateConfigurationPolicyRequest":{ "type":"structure", "required":[ @@ -16311,26 +17237,78 @@ } } }, - "CreateFindingAggregatorRequest":{ + "CreateConnectorV2Request":{ "type":"structure", - "required":["RegionLinkingMode"], + "required":[ + "Name", + "Provider" + ], "members":{ - "RegionLinkingMode":{ + "Name":{ "shape":"NonEmptyString", - "documentation":"

        Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them.

        The selected option also determines how to use the Regions provided in the Regions list.

        The options are as follows:

        • ALL_REGIONS - Aggregates findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.

        • ALL_REGIONS_EXCEPT_SPECIFIED - Aggregates findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the Regions parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.

        • SPECIFIED_REGIONS - Aggregates findings only from the Regions listed in the Regions parameter. Security Hub does not automatically aggregate findings from new Regions.

        • NO_REGIONS - Aggregates no data because no Regions are selected as linked Regions.

        " + "documentation":"

        The unique name of the connectorV2.

        " }, - "Regions":{ - "shape":"StringList", - "documentation":"

        If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that don't replicate and send findings to the home Region.

        If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do replicate and send findings to the home Region.

        An InvalidInputException error results if you populate this field while RegionLinkingMode is NO_REGIONS.

        " + "Description":{ + "shape":"NonEmptyString", + "documentation":"

        The description of the connectorV2.

        " + }, + "Provider":{ + "shape":"ProviderConfiguration", + "documentation":"

        The third-party provider’s service configuration.

        " + }, + "KmsKeyArn":{ + "shape":"NonEmptyString", + "documentation":"

        The Amazon Resource Name (ARN) of KMS key used to encrypt secrets for the connectorV2.

        " + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

        The tags to add to the connectorV2 when you create.

        " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

        A unique identifier used to ensure idempotency.

        ", + "idempotencyToken":true } } }, - "CreateFindingAggregatorResponse":{ + "CreateConnectorV2Response":{ "type":"structure", + "required":["ConnectorId"], "members":{ - "FindingAggregatorArn":{ + "ConnectorArn":{ "shape":"NonEmptyString", - "documentation":"

        The ARN of the finding aggregator. You use the finding aggregator ARN to retrieve details for, update, and stop cross-Region aggregation.

        " + "documentation":"

        The Amazon Resource Name (ARN) of the connectorV2.

        " + }, + "ConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

        The UUID of the connectorV2 to identify connectorV2 resource.

        " + }, + "AuthUrl":{ + "shape":"NonEmptyString", + "documentation":"

        The Url provide to customers for OAuth auth code flow.

        " + } + } + }, + "CreateFindingAggregatorRequest":{ + "type":"structure", + "required":["RegionLinkingMode"], + "members":{ + "RegionLinkingMode":{ + "shape":"NonEmptyString", + "documentation":"

        Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them.

        The selected option also determines how to use the Regions provided in the Regions list.

        The options are as follows:

        • ALL_REGIONS - Aggregates findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.

        • ALL_REGIONS_EXCEPT_SPECIFIED - Aggregates findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the Regions parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.

        • SPECIFIED_REGIONS - Aggregates findings only from the Regions listed in the Regions parameter. Security Hub does not automatically aggregate findings from new Regions.

        • NO_REGIONS - Aggregates no data because no Regions are selected as linked Regions.

        " + }, + "Regions":{ + "shape":"StringList", + "documentation":"

        If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that don't replicate and send findings to the home Region.

        If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do replicate and send findings to the home Region.

        An InvalidInputException error results if you populate this field while RegionLinkingMode is NO_REGIONS.

        " + } + } + }, + "CreateFindingAggregatorResponse":{ + "type":"structure", + "members":{ + "FindingAggregatorArn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the finding aggregator. You use the finding aggregator ARN to retrieve details for, update, and stop cross-Region aggregation.

        " }, "FindingAggregationRegion":{ "shape":"NonEmptyString", @@ -16397,6 +17375,53 @@ } } }, + "CreateTicketV2Request":{ + "type":"structure", + "required":[ + "ConnectorId", + "FindingMetadataUid" + ], + "members":{ + "ConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

        The UUID of the connectorV2 to identify connectorV2 resource.

        " + }, + "FindingMetadataUid":{ + "shape":"NonEmptyString", + "documentation":"

        The the unique ID for the finding.

        " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

        The client idempotency token.

        ", + "idempotencyToken":true + } + } + }, + "CreateTicketV2Response":{ + "type":"structure", + "required":["TicketId"], + "members":{ + "TicketId":{ + "shape":"NonEmptyString", + "documentation":"

        The ID for the ticketv2.

        " + }, + "TicketSrcUrl":{ + "shape":"NonEmptyString", + "documentation":"

        The url to the created ticket.

        " + } + } + }, + "Criteria":{ + "type":"structure", + "members":{ + "OcsfFindingCriteria":{ + "shape":"OcsfFindingFilters", + "documentation":"

        The filtering conditions that align with OCSF standards.

        " + } + }, + "documentation":"

        Defines the parameters and conditions used to evaluate and filter security findings.

        ", + "union":true + }, "CrossAccountMaxResults":{ "type":"integer", "max":50, @@ -16571,6 +17596,38 @@ } } }, + "DeleteAggregatorV2Request":{ + "type":"structure", + "required":["AggregatorV2Arn"], + "members":{ + "AggregatorV2Arn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the Aggregator V2.

        ", + "location":"uri", + "locationName":"AggregatorV2Arn" + } + } + }, + "DeleteAggregatorV2Response":{ + "type":"structure", + "members":{} + }, + "DeleteAutomationRuleV2Request":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the V2 automation rule.

        ", + "location":"uri", + "locationName":"Identifier" + } + } + }, + "DeleteAutomationRuleV2Response":{ + "type":"structure", + "members":{} + }, "DeleteConfigurationPolicyRequest":{ "type":"structure", "required":["Identifier"], @@ -16585,9 +17642,24 @@ }, "DeleteConfigurationPolicyResponse":{ "type":"structure", + "members":{} + }, + "DeleteConnectorV2Request":{ + "type":"structure", + "required":["ConnectorId"], "members":{ + "ConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

        The UUID of the connectorV2 to identify connectorV2 resource.

        ", + "location":"uri", + "locationName":"ConnectorId" + } } }, + "DeleteConnectorV2Response":{ + "type":"structure", + "members":{} + }, "DeleteFindingAggregatorRequest":{ "type":"structure", "required":["FindingAggregatorArn"], @@ -16602,8 +17674,7 @@ }, "DeleteFindingAggregatorResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteInsightRequest":{ "type":"structure", @@ -16730,8 +17801,7 @@ }, "DescribeOrganizationConfigurationRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeOrganizationConfigurationResponse":{ "type":"structure", @@ -16788,6 +17858,54 @@ } } }, + "DescribeProductsV2Request":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The token required for pagination. On your first call, set the value of this parameter to NULL. For subsequent calls, to continue listing data, set the value of this parameter to the value returned in the previous response.

        ", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of results to return.

        ", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "DescribeProductsV2Response":{ + "type":"structure", + "required":["ProductsV2"], + "members":{ + "ProductsV2":{ + "shape":"ProductsV2List", + "documentation":"

        Gets information about the product integration.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The pagination token to use to request the next page of results. Otherwise, this parameter is null.

        " + } + } + }, + "DescribeSecurityHubV2Request":{ + "type":"structure", + "members":{} + }, + "DescribeSecurityHubV2Response":{ + "type":"structure", + "members":{ + "HubV2Arn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the service resource.

        " + }, + "SubscribedAt":{ + "shape":"NonEmptyString", + "documentation":"

        The date and time when the service was enabled in the account.

        " + } + } + }, "DescribeStandardsControlsRequest":{ "type":"structure", "required":["StandardsSubscriptionArn"], @@ -16879,8 +17997,7 @@ }, "DisableImportFindingsForProductResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisableOrganizationAdminAccountRequest":{ "type":"structure", @@ -16889,23 +18006,32 @@ "AdminAccountId":{ "shape":"NonEmptyString", "documentation":"

        The Amazon Web Services account identifier of the Security Hub administrator account.

        " + }, + "Feature":{ + "shape":"SecurityHubFeature", + "documentation":"

        The feature for which the delegated admin account is disabled. Defaults to Security Hub if not specified.

        " } } }, "DisableOrganizationAdminAccountResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisableSecurityHubRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DisableSecurityHubResponse":{ "type":"structure", - "members":{ - } + "members":{} + }, + "DisableSecurityHubV2Request":{ + "type":"structure", + "members":{} + }, + "DisableSecurityHubV2Response":{ + "type":"structure", + "members":{} }, "DisabledSecurityControlIdentifierList":{ "type":"list", @@ -16913,23 +18039,19 @@ }, "DisassociateFromAdministratorAccountRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateFromAdministratorAccountResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateFromMasterAccountRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateFromMasterAccountResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateMembersRequest":{ "type":"structure", @@ -16943,8 +18065,7 @@ }, "DisassociateMembersResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DnsRequestAction":{ "type":"structure", @@ -17009,12 +18130,24 @@ "AdminAccountId":{ "shape":"NonEmptyString", "documentation":"

        The Amazon Web Services account identifier of the account to designate as the Security Hub administrator account.

        " + }, + "Feature":{ + "shape":"SecurityHubFeature", + "documentation":"

        The feature for which the delegated admin account is enabled. Defaults to Security Hub if not specified.

        " } } }, "EnableOrganizationAdminAccountResponse":{ "type":"structure", "members":{ + "AdminAccountId":{ + "shape":"NonEmptyString", + "documentation":"

        The Amazon Web Services account identifier of the account to designate as the Security Hub administrator account.

        " + }, + "Feature":{ + "shape":"SecurityHubFeature", + "documentation":"

        The feature where the delegated administrator is enabled. The default is Security Hub CSPM if no delegated administrator is specified in the request.

        " + } } }, "EnableSecurityHubRequest":{ @@ -17035,8 +18168,25 @@ } }, "EnableSecurityHubResponse":{ + "type":"structure", + "members":{} + }, + "EnableSecurityHubV2Request":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

        The tags to add to the hub V2 resource when you enable Security Hub.

        " + } + } + }, + "EnableSecurityHubV2Response":{ "type":"structure", "members":{ + "HubV2Arn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the V2 resource that was created.

        " + } } }, "EnabledSecurityControlIdentifierList":{ @@ -17079,6 +18229,16 @@ }, "documentation":"

        The options for customizing a security control parameter that is a list of enums.

        " }, + "ExternalIntegrationConfiguration":{ + "type":"structure", + "members":{ + "ConnectorArn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the connector that establishes the integration.

        " + } + }, + "documentation":"

        Defines the settings and parameters required for integrating external security tools and services.

        " + }, "FieldMap":{ "type":"map", "key":{"shape":"NonEmptyString"}, @@ -17348,8 +18508,7 @@ }, "GetAdministratorAccountRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetAdministratorAccountResponse":{ "type":"structure", @@ -17357,6 +18516,96 @@ "Administrator":{"shape":"Invitation"} } }, + "GetAggregatorV2Request":{ + "type":"structure", + "required":["AggregatorV2Arn"], + "members":{ + "AggregatorV2Arn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the Aggregator V2.

        ", + "location":"uri", + "locationName":"AggregatorV2Arn" + } + } + }, + "GetAggregatorV2Response":{ + "type":"structure", + "members":{ + "AggregatorV2Arn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the Aggregator V2.

        " + }, + "AggregationRegion":{ + "shape":"NonEmptyString", + "documentation":"

        The Amazon Web Services Region where data is aggregated.

        " + }, + "RegionLinkingMode":{ + "shape":"NonEmptyString", + "documentation":"

        Determines how Regions are linked to an Aggregator V2.

        " + }, + "LinkedRegions":{ + "shape":"StringList", + "documentation":"

        The list of Regions that are linked to the aggregation Region.

        " + } + } + }, + "GetAutomationRuleV2Request":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the V2 automation rule.

        ", + "location":"uri", + "locationName":"Identifier" + } + } + }, + "GetAutomationRuleV2Response":{ + "type":"structure", + "members":{ + "RuleArn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the V2 automation rule.

        " + }, + "RuleId":{ + "shape":"NonEmptyString", + "documentation":"

        The ID of the V2 automation rule.

        " + }, + "RuleOrder":{ + "shape":"RuleOrderValueV2", + "documentation":"

        The value for the rule priority.

        " + }, + "RuleName":{ + "shape":"NonEmptyString", + "documentation":"

        The name of the V2 automation rule.

        " + }, + "RuleStatus":{ + "shape":"RuleStatusV2", + "documentation":"

        The status of the V2 automation automation rule.

        " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

        A description of the automation rule.

        " + }, + "Criteria":{ + "shape":"Criteria", + "documentation":"

        The filtering type and configuration of the V2 automation rule.

        " + }, + "Actions":{ + "shape":"AutomationRulesActionListV2", + "documentation":"

        A list of actions performed when the rule criteria is met.

        " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp when the V2 automation rule was created.

        " + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

        The timestamp when the V2 automation rule was updated.

        " + } + } + }, "GetConfigurationPolicyAssociationRequest":{ "type":"structure", "required":["Target"], @@ -17445,36 +18694,97 @@ } } }, - "GetEnabledStandardsRequest":{ + "GetConnectorV2Request":{ "type":"structure", + "required":["ConnectorId"], "members":{ - "StandardsSubscriptionArns":{ - "shape":"StandardsSubscriptionArns", - "documentation":"

        The list of the standards subscription ARNs for the standards to retrieve.

        " - }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

        The token that is required for pagination. On your first call to the GetEnabledStandards operation, set the value of this parameter to NULL.

        For subsequent calls to the operation, to continue listing data, set the value of this parameter to the value returned from the previous response.

        " - }, - "MaxResults":{ - "shape":"MaxResults", - "documentation":"

        The maximum number of results to return in the response.

        " + "ConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

        The UUID of the connectorV2 to identify connectorV2 resource.

        ", + "location":"uri", + "locationName":"ConnectorId" } } }, - "GetEnabledStandardsResponse":{ + "GetConnectorV2Response":{ "type":"structure", + "required":[ + "ConnectorId", + "Name", + "CreatedAt", + "LastUpdatedAt", + "Health", + "ProviderDetail" + ], "members":{ - "StandardsSubscriptions":{ - "shape":"StandardsSubscriptions", - "documentation":"

        The list of StandardsSubscriptions objects that include information about the enabled standards.

        " + "ConnectorArn":{ + "shape":"NonEmptyString", + "documentation":"

        The Amazon Resource Name (ARN) of the connectorV2.

        " }, - "NextToken":{ - "shape":"NextToken", - "documentation":"

        The pagination token to use to request the next page of results.

        " - } - } - }, + "ConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

        The UUID of the connectorV2 to identify connectorV2 resource.

        " + }, + "Name":{ + "shape":"NonEmptyString", + "documentation":"

        The name of the connectorV2.

        " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

        The description of the connectorV2.

        " + }, + "KmsKeyArn":{ + "shape":"NonEmptyString", + "documentation":"

        The Amazon Resource Name (ARN) of KMS key used for the connectorV2.

        " + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

        ISO 8601 UTC timestamp for the time create the connectorV2.

        " + }, + "LastUpdatedAt":{ + "shape":"Timestamp", + "documentation":"

        ISO 8601 UTC timestamp for the time update the connectorV2 connectorStatus.

        " + }, + "Health":{ + "shape":"HealthCheck", + "documentation":"

        The current health status for connectorV2

        " + }, + "ProviderDetail":{ + "shape":"ProviderDetail", + "documentation":"

        The third-party provider detail for a service configuration.

        " + } + } + }, + "GetEnabledStandardsRequest":{ + "type":"structure", + "members":{ + "StandardsSubscriptionArns":{ + "shape":"StandardsSubscriptionArns", + "documentation":"

        The list of the standards subscription ARNs for the standards to retrieve.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The token that is required for pagination. On your first call to the GetEnabledStandards operation, set the value of this parameter to NULL.

        For subsequent calls to the operation, to continue listing data, set the value of this parameter to the value returned from the previous response.

        " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of results to return in the response.

        " + } + } + }, + "GetEnabledStandardsResponse":{ + "type":"structure", + "members":{ + "StandardsSubscriptions":{ + "shape":"StandardsSubscriptions", + "documentation":"

        The list of StandardsSubscriptions objects that include information about the enabled standards.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The pagination token to use to request the next page of results.

        " + } + } + }, "GetFindingAggregatorRequest":{ "type":"structure", "required":["FindingAggregatorArn"], @@ -17544,6 +18854,33 @@ } } }, + "GetFindingStatisticsV2Request":{ + "type":"structure", + "required":["GroupByRules"], + "members":{ + "GroupByRules":{ + "shape":"GroupByRules", + "documentation":"

        Specifies how security findings should be aggregated and organized in the statistical analysis. It can accept up to 5 groupBy fields in a single call.

        " + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

        Orders the aggregation count in descending or ascending order. Descending order is the default.

        " + }, + "MaxStatisticResults":{ + "shape":"MaxStatisticResults", + "documentation":"

        The maximum number of results to be returned.

        " + } + } + }, + "GetFindingStatisticsV2Response":{ + "type":"structure", + "members":{ + "GroupByResults":{ + "shape":"GroupByResults", + "documentation":"

        Aggregated statistics about security findings based on specified grouping criteria.

        " + } + } + }, "GetFindingsRequest":{ "type":"structure", "members":{ @@ -17579,6 +18916,40 @@ } } }, + "GetFindingsV2Request":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"OcsfFindingFilters", + "documentation":"

        The finding attributes used to define a condition to filter the returned OCSF findings. You can filter up to 10 composite filters. For each filter type inside of a composite filter, you can provide up to 20 filters.

        " + }, + "SortCriteria":{ + "shape":"SortCriteria", + "documentation":"

        The finding attributes used to sort the list of returned findings.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The token required for pagination. On your first call, set the value of this parameter to NULL. For subsequent calls, to continue listing data, set the value of this parameter to the value returned in the previous response.

        " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of results to return.

        " + } + } + }, + "GetFindingsV2Response":{ + "type":"structure", + "members":{ + "Findings":{ + "shape":"OcsfFindingsList", + "documentation":"

        An array of security findings returned by the operation.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The pagination token to use to request the next page of results. Otherwise, this parameter is null.

        " + } + } + }, "GetInsightResultsRequest":{ "type":"structure", "required":["InsightArn"], @@ -17634,8 +19005,7 @@ }, "GetInvitationsCountRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetInvitationsCountResponse":{ "type":"structure", @@ -17648,8 +19018,7 @@ }, "GetMasterAccountRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetMasterAccountResponse":{ "type":"structure", @@ -17683,6 +19052,69 @@ } } }, + "GetResourcesStatisticsV2Request":{ + "type":"structure", + "required":["GroupByRules"], + "members":{ + "GroupByRules":{ + "shape":"ResourceGroupByRules", + "documentation":"

        How resource statistics should be aggregated and organized in the response.

        " + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

        Sorts aggregated statistics.

        " + }, + "MaxStatisticResults":{ + "shape":"MaxStatisticResults", + "documentation":"

        The maximum number of results to be returned.

        " + } + } + }, + "GetResourcesStatisticsV2Response":{ + "type":"structure", + "required":["GroupByResults"], + "members":{ + "GroupByResults":{ + "shape":"GroupByResults", + "documentation":"

        The aggregated statistics about resources based on the specified grouping rule.

        " + } + } + }, + "GetResourcesV2Request":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"ResourcesFilters", + "documentation":"

        Filters resources based on a set of criteria.

        " + }, + "SortCriteria":{ + "shape":"SortCriteria", + "documentation":"

        The finding attributes used to sort the list of returned findings.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The token required for pagination. On your first call, set the value of this parameter to NULL. For subsequent calls, to continue listing data, set the value of this parameter to the value returned in the previous response.

        " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of results to return.

        " + } + } + }, + "GetResourcesV2Response":{ + "type":"structure", + "required":["Resources"], + "members":{ + "Resources":{ + "shape":"Resources", + "documentation":"

        Filters resources based on a set of criteria.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The pagination token to use to request the next page of results. Otherwise, this parameter is null.

        " + } + } + }, "GetSecurityControlDefinitionRequest":{ "type":"structure", "required":["SecurityControlId"], @@ -17702,6 +19134,105 @@ "SecurityControlDefinition":{"shape":"SecurityControlDefinition"} } }, + "GroupByField":{ + "type":"string", + "enum":[ + "activity_name", + "cloud.account.uid", + "cloud.provider", + "cloud.region", + "compliance.assessments.name", + "compliance.status", + "compliance.control", + "finding_info.title", + "finding_info.types", + "metadata.product.name", + "metadata.product.uid", + "resources.type", + "resources.uid", + "severity", + "status", + "vulnerabilities.fix_coverage", + "class_name" + ] + }, + "GroupByResult":{ + "type":"structure", + "members":{ + "GroupByField":{ + "shape":"NonEmptyString", + "documentation":"

        The attribute by which filtered security findings should be grouped.

        " + }, + "GroupByValues":{ + "shape":"GroupByValues", + "documentation":"

        An array of grouped values and their respective counts for each GroupByField.

        " + } + }, + "documentation":"

        Represents finding statistics grouped by GroupedByField.

        " + }, + "GroupByResults":{ + "type":"list", + "member":{"shape":"GroupByResult"} + }, + "GroupByRule":{ + "type":"structure", + "required":["GroupByField"], + "members":{ + "Filters":{ + "shape":"OcsfFindingFilters", + "documentation":"

        The criteria used to select which security findings should be included in the grouping operation.

        " + }, + "GroupByField":{ + "shape":"GroupByField", + "documentation":"

        The attribute by which filtered findings should be grouped.

        " + } + }, + "documentation":"

        Defines the how the finding attribute should be grouped.

        " + }, + "GroupByRules":{ + "type":"list", + "member":{"shape":"GroupByRule"} + }, + "GroupByValue":{ + "type":"structure", + "members":{ + "FieldValue":{ + "shape":"NonEmptyString", + "documentation":"

        The value of the field by which findings are grouped.

        " + }, + "Count":{ + "shape":"Integer", + "documentation":"

        The number of findings for a specific FieldValue and GroupByField.

        " + } + }, + "documentation":"

        Represents individual aggregated results when grouping security findings for each GroupByField.

        " + }, + "GroupByValues":{ + "type":"list", + "member":{"shape":"GroupByValue"} + }, + "HealthCheck":{ + "type":"structure", + "required":[ + "ConnectorStatus", + "LastCheckedAt" + ], + "members":{ + "ConnectorStatus":{ + "shape":"ConnectorStatus", + "documentation":"

        The status of the connectorV2.

        " + }, + "Message":{ + "shape":"NonEmptyString", + "documentation":"

        The message for the reason of connectorStatus change.

        " + }, + "LastCheckedAt":{ + "shape":"Timestamp", + "documentation":"

        ISO 8601 UTC timestamp for the time check the health status of the connectorV2.

        " + } + }, + "documentation":"

        Information about the operational status and health of a connectorV2.

        " + }, "IcmpTypeCode":{ "type":"structure", "members":{ @@ -17905,6 +19436,18 @@ "type":"list", "member":{"shape":"IntegrationType"} }, + "IntegrationV2Type":{ + "type":"string", + "enum":[ + "SEND_FINDINGS_TO_SECURITY_HUB", + "RECEIVE_FINDINGS_FROM_SECURITY_HUB", + "UPDATE_FINDINGS_IN_SECURITY_HUB" + ] + }, + "IntegrationV2TypeList":{ + "type":"list", + "member":{"shape":"IntegrationV2Type"} + }, "InternalException":{ "type":"structure", "members":{ @@ -17915,6 +19458,16 @@ "error":{"httpStatusCode":500}, "exception":true }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"}, + "Code":{"shape":"NonEmptyString"} + }, + "documentation":"

        The request has failed due to an internal failure of the service.

        ", + "error":{"httpStatusCode":500}, + "exception":true + }, "InvalidAccessException":{ "type":"structure", "members":{ @@ -18038,6 +19591,53 @@ "type":"list", "member":{"shape":"Ipv6CidrBlockAssociation"} }, + "JiraCloudDetail":{ + "type":"structure", + "members":{ + "CloudId":{ + "shape":"NonEmptyString", + "documentation":"

        The cloud id of the Jira Cloud.

        " + }, + "ProjectKey":{ + "shape":"NonEmptyString", + "documentation":"

        The projectKey of Jira Cloud.

        " + }, + "Domain":{ + "shape":"NonEmptyString", + "documentation":"

        The URL domain of your Jira Cloud instance.

        " + }, + "AuthUrl":{ + "shape":"NonEmptyString", + "documentation":"

        The URL to provide to customers for OAuth auth code flow.

        " + }, + "AuthStatus":{ + "shape":"ConnectorAuthStatus", + "documentation":"

        The status of the authorization between Jira Cloud and the service.

        " + } + }, + "documentation":"

        Information about the configuration and status of a Jira Cloud integration.

        " + }, + "JiraCloudProviderConfiguration":{ + "type":"structure", + "members":{ + "ProjectKey":{ + "shape":"NonEmptyString", + "documentation":"

        The project key for a JiraCloud instance.

        " + } + }, + "documentation":"

        The initial configuration settings required to establish an integration between Security Hub and Jira Cloud.

        " + }, + "JiraCloudUpdateConfiguration":{ + "type":"structure", + "required":["ProjectKey"], + "members":{ + "ProjectKey":{ + "shape":"NonEmptyString", + "documentation":"

        The project key for a JiraCloud instance.

        " + } + }, + "documentation":"

        The parameters used to modify an existing Jira Cloud integration.

        " + }, "KeywordFilter":{ "type":"structure", "members":{ @@ -18062,6 +19662,36 @@ "error":{"httpStatusCode":429}, "exception":true }, + "ListAggregatorsV2Request":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The token required for pagination. On your first call, set the value of this parameter to NULL. For subsequent calls, to continue listing data, set the value of this parameter to the value returned in the previous response.

        ", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of results to return.

        ", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListAggregatorsV2Response":{ + "type":"structure", + "members":{ + "AggregatorsV2":{ + "shape":"AggregatorV2List", + "documentation":"

        An array of aggregators.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The pagination token to use to request the next page of results. Otherwise, this parameter is null.

        " + } + } + }, "ListAutomationRulesRequest":{ "type":"structure", "members":{ @@ -18092,6 +19722,36 @@ } } }, + "ListAutomationRulesV2Request":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The token required for pagination. On your first call, set the value of this parameter to NULL. For subsequent calls, to continue listing data, set the value of this parameter to the value returned in the previous response.

        ", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of results to return.

        ", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListAutomationRulesV2Response":{ + "type":"structure", + "members":{ + "Rules":{ + "shape":"AutomationRulesMetadataListV2", + "documentation":"

        An array of automation rules.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The pagination token to use to request the next page of results. Otherwise, this parameter is null.

        " + } + } + }, "ListConfigurationPoliciesRequest":{ "type":"structure", "members":{ @@ -18152,6 +19812,49 @@ } } }, + "ListConnectorsV2Request":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The pagination token per the Amazon Web Services Pagination standard

        ", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        The maximum number of results to be returned.

        ", + "location":"querystring", + "locationName":"MaxResults" + }, + "ProviderName":{ + "shape":"ConnectorProviderName", + "documentation":"

        The name of the third-party provider.

        ", + "location":"querystring", + "locationName":"ProviderName" + }, + "ConnectorStatus":{ + "shape":"ConnectorStatus", + "documentation":"

        The status for the connectorV2.

        ", + "location":"querystring", + "locationName":"ConnectorStatus" + } + } + }, + "ListConnectorsV2Response":{ + "type":"structure", + "required":["Connectors"], + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

        The pagination token to use to request the next page of results. Otherwise, this parameter is null.

        " + }, + "Connectors":{ + "shape":"ConnectorSummaryList", + "documentation":"

        An array of connectorV2 summaries.

        " + } + } + }, "ListEnabledProductsForImportRequest":{ "type":"structure", "members":{ @@ -18292,6 +19995,12 @@ "documentation":"

        The token that is required for pagination. On your first call to the ListOrganizationAdminAccounts operation, set the value of this parameter to NULL. For subsequent calls to the operation, to continue listing data, set the value of this parameter to the value returned from the previous response.

        ", "location":"querystring", "locationName":"NextToken" + }, + "Feature":{ + "shape":"SecurityHubFeature", + "documentation":"

        The feature where the delegated administrator account is listed. Defaults to Security Hub if not specified.

        ", + "location":"querystring", + "locationName":"Feature" } } }, @@ -18305,6 +20014,10 @@ "NextToken":{ "shape":"NextToken", "documentation":"

        The pagination token to use to request the next page of results.

        " + }, + "Feature":{ + "shape":"SecurityHubFeature", + "documentation":"

        The feature where the delegated administrator account is listed. Defaults to Security Hub CSPM if not specified.

        " } } }, @@ -18510,6 +20223,11 @@ "max":100, "min":1 }, + "MaxStatisticResults":{ + "type":"integer", + "max":400, + "min":1 + }, "Member":{ "type":"structure", "members":{ @@ -18550,6 +20268,12 @@ "type":"list", "member":{"shape":"Member"} }, + "MetadataUidList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":100, + "min":0 + }, "Network":{ "type":"structure", "members":{ @@ -18835,61 +20559,257 @@ }, "documentation":"

        The updated note.

        " }, - "NumberFilter":{ + "NumberFilter":{ + "type":"structure", + "members":{ + "Gte":{ + "shape":"Double", + "documentation":"

        The greater-than-equal condition to be applied to a single field when querying for findings.

        " + }, + "Lte":{ + "shape":"Double", + "documentation":"

        The less-than-equal condition to be applied to a single field when querying for findings.

        " + }, + "Eq":{ + "shape":"Double", + "documentation":"

        The equal-to condition to be applied to a single field when querying for findings.

        " + }, + "Gt":{ + "shape":"Double", + "documentation":"

        The greater-than condition to be applied to a single field when querying for findings.

        " + }, + "Lt":{ + "shape":"Double", + "documentation":"

        The less-than condition to be applied to a single field when querying for findings.

        " + } + }, + "documentation":"

        A number filter for querying findings.

        " + }, + "NumberFilterList":{ + "type":"list", + "member":{"shape":"NumberFilter"} + }, + "Occurrences":{ + "type":"structure", + "members":{ + "LineRanges":{ + "shape":"Ranges", + "documentation":"

        Occurrences of sensitive data detected in a non-binary text file or a Microsoft Word file. Non-binary text files include files such as HTML, XML, JSON, and TXT files.

        " + }, + "OffsetRanges":{ + "shape":"Ranges", + "documentation":"

        Occurrences of sensitive data detected in a binary text file.

        " + }, + "Pages":{ + "shape":"Pages", + "documentation":"

        Occurrences of sensitive data in an Adobe Portable Document Format (PDF) file.

        " + }, + "Records":{ + "shape":"Records", + "documentation":"

        Occurrences of sensitive data in an Apache Avro object container or an Apache Parquet file.

        " + }, + "Cells":{ + "shape":"Cells", + "documentation":"

        Occurrences of sensitive data detected in Microsoft Excel workbooks, comma-separated value (CSV) files, or tab-separated value (TSV) files.

        " + } + }, + "documentation":"

        The detected occurrences of sensitive data.

        " + }, + "OcsfBooleanField":{ + "type":"string", + "enum":[ + "compliance.assessments.meets_criteria", + "vulnerabilities.is_exploit_available", + "vulnerabilities.is_fix_available" + ] + }, + "OcsfBooleanFilter":{ + "type":"structure", + "members":{ + "FieldName":{ + "shape":"OcsfBooleanField", + "documentation":"

        The name of the field.

        " + }, + "Filter":{"shape":"BooleanFilter"} + }, + "documentation":"

        Enables filtering of security findings based on boolean field values in OCSF.

        " + }, + "OcsfBooleanFilterList":{ + "type":"list", + "member":{"shape":"OcsfBooleanFilter"} + }, + "OcsfDateField":{ + "type":"string", + "enum":[ + "finding_info.created_time_dt", + "finding_info.first_seen_time_dt", + "finding_info.last_seen_time_dt", + "finding_info.modified_time_dt" + ] + }, + "OcsfDateFilter":{ + "type":"structure", + "members":{ + "FieldName":{ + "shape":"OcsfDateField", + "documentation":"

        The name of the field.

        " + }, + "Filter":{"shape":"DateFilter"} + }, + "documentation":"

        Enables filtering of security findings based on date and timestamp fields in OCSF.

        " + }, + "OcsfDateFilterList":{ + "type":"list", + "member":{"shape":"OcsfDateFilter"} + }, + "OcsfFinding":{ + "type":"structure", + "members":{}, + "document":true + }, + "OcsfFindingFilters":{ + "type":"structure", + "members":{ + "CompositeFilters":{ + "shape":"CompositeFilterList", + "documentation":"

        Enables the creation of complex filtering conditions by combining filter criteria.

        " + }, + "CompositeOperator":{ + "shape":"AllowedOperators", + "documentation":"

        The logical operators used to combine the filtering on multiple CompositeFilters.

        " + } + }, + "documentation":"

        Specifies the filtering criteria for security findings using OCSF.

        " + }, + "OcsfFindingIdentifier":{ + "type":"structure", + "required":[ + "CloudAccountUid", + "FindingInfoUid", + "MetadataProductUid" + ], + "members":{ + "CloudAccountUid":{ + "shape":"NonEmptyString", + "documentation":"

        Finding cloud.account.uid, which is a unique identifier in the Amazon Web Services account..

        " + }, + "FindingInfoUid":{ + "shape":"NonEmptyString", + "documentation":"

        Finding finding_info.uid, which is a unique identifier for the finding from the finding provider.

        " + }, + "MetadataProductUid":{ + "shape":"NonEmptyString", + "documentation":"

        Finding metadata.product.uid, which is a unique identifier for the product.

        " + } + }, + "documentation":"

        Provides a standard to identify security findings using OCSF.

        " + }, + "OcsfFindingIdentifierList":{ + "type":"list", + "member":{"shape":"OcsfFindingIdentifier"}, + "max":100, + "min":0 + }, + "OcsfFindingsList":{ + "type":"list", + "member":{"shape":"OcsfFinding"} + }, + "OcsfMapField":{ + "type":"string", + "enum":["resources.tags"] + }, + "OcsfMapFilter":{ + "type":"structure", + "members":{ + "FieldName":{ + "shape":"OcsfMapField", + "documentation":"

        The name of the field.

        " + }, + "Filter":{"shape":"MapFilter"} + }, + "documentation":"

        Enables filtering of security findings based on map field values in OCSF.

        " + }, + "OcsfMapFilterList":{ + "type":"list", + "member":{"shape":"OcsfMapFilter"} + }, + "OcsfNumberField":{ + "type":"string", + "enum":[ + "activity_id", + "compliance.status_id", + "confidence_score", + "severity_id", + "status_id", + "finding_info.related_events_count" + ] + }, + "OcsfNumberFilter":{ "type":"structure", "members":{ - "Gte":{ - "shape":"Double", - "documentation":"

        The greater-than-equal condition to be applied to a single field when querying for findings.

        " - }, - "Lte":{ - "shape":"Double", - "documentation":"

        The less-than-equal condition to be applied to a single field when querying for findings.

        " - }, - "Eq":{ - "shape":"Double", - "documentation":"

        The equal-to condition to be applied to a single field when querying for findings.

        " - }, - "Gt":{ - "shape":"Double", - "documentation":"

        The greater-than condition to be applied to a single field when querying for findings.

        " + "FieldName":{ + "shape":"OcsfNumberField", + "documentation":"

        The name of the field.

        " }, - "Lt":{ - "shape":"Double", - "documentation":"

        The less-than condition to be applied to a single field when querying for findings.

        " - } + "Filter":{"shape":"NumberFilter"} }, - "documentation":"

        A number filter for querying findings.

        " + "documentation":"

        Enables filtering of security findings based on numerical field values in OCSF.

        " }, - "NumberFilterList":{ + "OcsfNumberFilterList":{ "type":"list", - "member":{"shape":"NumberFilter"} + "member":{"shape":"OcsfNumberFilter"} }, - "Occurrences":{ + "OcsfStringField":{ + "type":"string", + "enum":[ + "metadata.uid", + "activity_name", + "cloud.account.uid", + "cloud.provider", + "cloud.region", + "compliance.assessments.category", + "compliance.assessments.name", + "compliance.control", + "compliance.status", + "compliance.standards", + "finding_info.desc", + "finding_info.src_url", + "finding_info.title", + "finding_info.types", + "finding_info.uid", + "finding_info.related_events.uid", + "finding_info.related_events.product.uid", + "finding_info.related_events.title", + "metadata.product.name", + "metadata.product.uid", + "metadata.product.vendor_name", + "remediation.desc", + "remediation.references", + "resources.cloud_partition", + "resources.region", + "resources.type", + "resources.uid", + "severity", + "status", + "comment", + "vulnerabilities.fix_coverage", + "class_name" + ] + }, + "OcsfStringFilter":{ "type":"structure", "members":{ - "LineRanges":{ - "shape":"Ranges", - "documentation":"

        Occurrences of sensitive data detected in a non-binary text file or a Microsoft Word file. Non-binary text files include files such as HTML, XML, JSON, and TXT files.

        " - }, - "OffsetRanges":{ - "shape":"Ranges", - "documentation":"

        Occurrences of sensitive data detected in a binary text file.

        " - }, - "Pages":{ - "shape":"Pages", - "documentation":"

        Occurrences of sensitive data in an Adobe Portable Document Format (PDF) file.

        " - }, - "Records":{ - "shape":"Records", - "documentation":"

        Occurrences of sensitive data in an Apache Avro object container or an Apache Parquet file.

        " + "FieldName":{ + "shape":"OcsfStringField", + "documentation":"

        The name of the field.

        " }, - "Cells":{ - "shape":"Cells", - "documentation":"

        Occurrences of sensitive data detected in Microsoft Excel workbooks, comma-separated value (CSV) files, or tab-separated value (TSV) files.

        " - } + "Filter":{"shape":"StringFilter"} }, - "documentation":"

        The detected occurrences of sensitive data.

        " + "documentation":"

        Enables filtering of security findings based on string field values in OCSF.

        " + }, + "OcsfStringFilterList":{ + "type":"list", + "member":{"shape":"OcsfStringFilter"} }, "OrganizationConfiguration":{ "type":"structure", @@ -19250,10 +21170,48 @@ "type":"list", "member":{"shape":"NonEmptyString"} }, + "ProductV2":{ + "type":"structure", + "members":{ + "ProductV2Name":{ + "shape":"NonEmptyString", + "documentation":"

        The name of the productV2.

        " + }, + "CompanyName":{ + "shape":"NonEmptyString", + "documentation":"

        The name of the organization or vendor that provides the productV2.

        " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

        Detailed information about the productV2.

        " + }, + "Categories":{ + "shape":"CategoryList", + "documentation":"

        The domains or functional areas the productV2 addresses.

        " + }, + "IntegrationV2Types":{ + "shape":"IntegrationV2TypeList", + "documentation":"

        The type of integration.

        " + }, + "MarketplaceUrl":{ + "shape":"NonEmptyString", + "documentation":"

        The console URL where you can purchase or subscribe to products.

        " + }, + "ActivationUrl":{ + "shape":"NonEmptyString", + "documentation":"

        The URL to the serviceV@ or productV2 documentation about the integration, which includes how to activate the integration.

        " + } + }, + "documentation":"

        Defines the structure for the productV2.

        " + }, "ProductsList":{ "type":"list", "member":{"shape":"Product"} }, + "ProductsV2List":{ + "type":"list", + "member":{"shape":"ProductV2"} + }, "PropagatingVgwSetDetails":{ "type":"structure", "members":{ @@ -19268,6 +21226,61 @@ "type":"list", "member":{"shape":"PropagatingVgwSetDetails"} }, + "ProviderConfiguration":{ + "type":"structure", + "members":{ + "JiraCloud":{ + "shape":"JiraCloudProviderConfiguration", + "documentation":"

        The configuration settings required to establish an integration with Jira Cloud.

        " + }, + "ServiceNow":{ + "shape":"ServiceNowProviderConfiguration", + "documentation":"

        The configuration settings required to establish an integration with ServiceNow ITSM.

        " + } + }, + "documentation":"

        The initial configuration settings required to establish an integration between Security Hub and third-party provider.

        ", + "union":true + }, + "ProviderDetail":{ + "type":"structure", + "members":{ + "JiraCloud":{ + "shape":"JiraCloudDetail", + "documentation":"

        Details about a Jira Cloud integration.

        " + }, + "ServiceNow":{ + "shape":"ServiceNowDetail", + "documentation":"

        Details about a ServiceNow ITSM integration.

        " + } + }, + "documentation":"

        The third-party provider detail for a service configuration.

        ", + "union":true + }, + "ProviderSummary":{ + "type":"structure", + "members":{ + "ProviderName":{ + "shape":"ConnectorProviderName", + "documentation":"

        The name of the provider.

        " + }, + "ConnectorStatus":{ + "shape":"ConnectorStatus", + "documentation":"

        The status for the connectorV2.

        " + } + }, + "documentation":"

        The connectorV2 third-party provider configuration summary.

        " + }, + "ProviderUpdateConfiguration":{ + "type":"structure", + "members":{ + "JiraCloud":{ + "shape":"JiraCloudUpdateConfiguration", + "documentation":"

        The parameters required to update the configuration for a Jira Cloud integration.

        " + } + }, + "documentation":"

        The parameters required to update the configuration of an integration provider.

        ", + "union":true + }, "Range":{ "type":"structure", "members":{ @@ -19431,6 +21444,24 @@ "type":"string", "pattern":"^arn:aws:securityhub:.*" }, + "ResourceCategory":{ + "type":"string", + "enum":[ + "Compute", + "Database", + "Storage", + "Code", + "AI/ML", + "Identity", + "Network", + "Other" + ] + }, + "ResourceConfig":{ + "type":"structure", + "members":{}, + "document":true + }, "ResourceConflictException":{ "type":"structure", "members":{ @@ -19776,84 +21807,413 @@ "shape":"AwsAppSyncGraphQlApiDetails", "documentation":"

        Provides details about an AppSync Graph QL API, which lets you query multiple databases, microservices, and APIs from a single GraphQL endpoint.

        " }, - "AwsEventSchemasRegistry":{ - "shape":"AwsEventSchemasRegistryDetails", - "documentation":"

        A schema defines the structure of events that are sent to Amazon EventBridge. Schema registries are containers for schemas. They collect and organize schemas so that your schemas are in logical groups.

        " + "AwsEventSchemasRegistry":{ + "shape":"AwsEventSchemasRegistryDetails", + "documentation":"

        A schema defines the structure of events that are sent to Amazon EventBridge. Schema registries are containers for schemas. They collect and organize schemas so that your schemas are in logical groups.

        " + }, + "AwsGuardDutyDetector":{ + "shape":"AwsGuardDutyDetectorDetails", + "documentation":"

        Provides details about an Amazon GuardDuty detector. A detector is an object that represents the GuardDuty service. A detector is required for GuardDuty to become operational.

        " + }, + "AwsStepFunctionStateMachine":{ + "shape":"AwsStepFunctionStateMachineDetails", + "documentation":"

        Provides details about an Step Functions state machine, which is a workflow consisting of a series of event-driven steps.

        " + }, + "AwsAthenaWorkGroup":{ + "shape":"AwsAthenaWorkGroupDetails", + "documentation":"

        Provides information about an Amazon Athena workgroup. A workgroup helps you separate users, teams, applications, or workloads. It also helps you set limits on data processing and track costs.

        " + }, + "AwsEventsEventbus":{ + "shape":"AwsEventsEventbusDetails", + "documentation":"

        Provides details about Amazon EventBridge event bus for an endpoint. An event bus is a router that receives events and delivers them to zero or more destinations, or targets.

        " + }, + "AwsDmsEndpoint":{ + "shape":"AwsDmsEndpointDetails", + "documentation":"

        Provides details about an Database Migration Service (DMS) endpoint. An endpoint provides connection, data store type, and location information about your data store.

        " + }, + "AwsEventsEndpoint":{ + "shape":"AwsEventsEndpointDetails", + "documentation":"

        Provides details about an Amazon EventBridge global endpoint. The endpoint can improve your application’s availability by making it Regional-fault tolerant.

        " + }, + "AwsDmsReplicationTask":{ + "shape":"AwsDmsReplicationTaskDetails", + "documentation":"

        Provides details about an DMS replication task. A replication task moves a set of data from the source endpoint to the target endpoint.

        " + }, + "AwsDmsReplicationInstance":{ + "shape":"AwsDmsReplicationInstanceDetails", + "documentation":"

        Provides details about an DMS replication instance. DMS uses a replication instance to connect to your source data store, read the source data, and format the data for consumption by the target data store.

        " + }, + "AwsRoute53HostedZone":{ + "shape":"AwsRoute53HostedZoneDetails", + "documentation":"

        Provides details about an Amazon Route 53 hosted zone, including the four name servers assigned to the hosted zone. A hosted zone represents a collection of records that can be managed together, belonging to a single parent domain name.

        " + }, + "AwsMskCluster":{ + "shape":"AwsMskClusterDetails", + "documentation":"

        Provides details about an Amazon Managed Streaming for Apache Kafka (Amazon MSK) cluster.

        " + }, + "AwsS3AccessPoint":{ + "shape":"AwsS3AccessPointDetails", + "documentation":"

        Provides details about an Amazon Simple Storage Service (Amazon S3) access point. S3 access points are named network endpoints that are attached to S3 buckets that you can use to perform S3 object operations.

        " + }, + "AwsEc2ClientVpnEndpoint":{ + "shape":"AwsEc2ClientVpnEndpointDetails", + "documentation":"

        Provides details about an Client VPN endpoint. A Client VPN endpoint is the resource that you create and configure to enable and manage client VPN sessions. It's the termination point for all client VPN sessions.

        " + } + }, + "documentation":"

        Additional details about a resource related to a finding.

        To provide the details, use the object that corresponds to the resource type. For example, if the resource type is AwsEc2Instance, then you use the AwsEc2Instance object to provide the details.

        If the type-specific object does not contain all of the fields you want to populate, then you use the Other object to populate those additional fields.

        You also use the Other object to populate the details when the selected type does not have a corresponding object.

        " + }, + "ResourceFindingsSummary":{ + "type":"structure", + "required":[ + "FindingType", + "ProductName", + "TotalFindings" + ], + "members":{ + "FindingType":{ + "shape":"NonEmptyString", + "documentation":"

        The category or classification of the security finding.

        " + }, + "ProductName":{ + "shape":"NonEmptyString", + "documentation":"

        The name of the product associated with the security finding.

        " + }, + "TotalFindings":{ + "shape":"Integer", + "documentation":"

        The total count of security findings.

        " + }, + "Severities":{ + "shape":"ResourceSeverityBreakdown", + "documentation":"

        A breakdown of security findings by their severity levels.

        " + } + }, + "documentation":"

        A list of summaries for all finding types on a resource.

        " + }, + "ResourceFindingsSummaryList":{ + "type":"list", + "member":{"shape":"ResourceFindingsSummary"} + }, + "ResourceGroupByField":{ + "type":"string", + "enum":[ + "account_id", + "region", + "resource_category", + "resource_type", + "resource_name", + "findings_summary.finding_type" + ] + }, + "ResourceGroupByRule":{ + "type":"structure", + "required":["GroupByField"], + "members":{ + "GroupByField":{ + "shape":"ResourceGroupByField", + "documentation":"

        Specifies the attribute that resources should be grouped by.

        " + }, + "Filters":{ + "shape":"ResourcesFilters", + "documentation":"

        The criteria used to select resources and associated security findings.

        " + } + }, + "documentation":"

        Defines the configuration for organizing and categorizing Amazon Web Services resources based on associated security findings.

        " + }, + "ResourceGroupByRules":{ + "type":"list", + "member":{"shape":"ResourceGroupByRule"} + }, + "ResourceInUseException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"}, + "Code":{"shape":"NonEmptyString"} + }, + "documentation":"

        The request was rejected because it conflicts with the resource's availability. For example, you tried to update a security control that's currently in the UPDATING state.

        ", + "error":{"httpStatusCode":400}, + "exception":true + }, + "ResourceList":{ + "type":"list", + "member":{"shape":"Resource"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"}, + "Code":{"shape":"NonEmptyString"} + }, + "documentation":"

        The request was rejected because we can't find the specified resource.

        ", + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourceResult":{ + "type":"structure", + "required":[ + "ResourceId", + "AccountId", + "Region", + "ResourceDetailCaptureTimeDt", + "ResourceConfig" + ], + "members":{ + "ResourceArn":{ + "shape":"NonEmptyString", + "documentation":"

        Specifies the ARN that uniquely identifies a resource.

        " + }, + "ResourceId":{ + "shape":"NonEmptyString", + "documentation":"

        The unique identifier for a resource.

        " + }, + "AccountId":{ + "shape":"NonEmptyString", + "documentation":"

        The Amazon Web Services account that owns the resource.

        " + }, + "Region":{ + "shape":"NonEmptyString", + "documentation":"

        The Amazon Web Services Region where the resource is located.

        " + }, + "ResourceCategory":{ + "shape":"ResourceCategory", + "documentation":"

        The grouping where the resource belongs.

        " + }, + "ResourceType":{ + "shape":"NonEmptyString", + "documentation":"

        The type of resource.

        " + }, + "ResourceName":{ + "shape":"NonEmptyString", + "documentation":"

        The name of the resource.

        " + }, + "ResourceCreationTimeDt":{ + "shape":"NonEmptyString", + "documentation":"

        The time when the resource was created.

        " + }, + "ResourceDetailCaptureTimeDt":{ + "shape":"NonEmptyString", + "documentation":"

        The timestamp when information about the resource was captured.

        " + }, + "FindingsSummary":{ + "shape":"ResourceFindingsSummaryList", + "documentation":"

        An aggregated view of security findings associated with a resource.

        " + }, + "ResourceTags":{ + "shape":"ResourceTagList", + "documentation":"

        The key-value pairs associated with a resource.

        " + }, + "ResourceConfig":{ + "shape":"ResourceConfig", + "documentation":"

        The configuration details of a resource.

        " + } + }, + "documentation":"

        Provides comprehensive details about an Amazon Web Services resource and its associated security findings.

        " + }, + "ResourceSeverityBreakdown":{ + "type":"structure", + "members":{ + "Other":{ + "shape":"Integer", + "documentation":"

        The number of findings not in any of the severity categories.

        " + }, + "Fatal":{ + "shape":"Integer", + "documentation":"

        The number of findings with a severity level of fatal.

        " }, - "AwsGuardDutyDetector":{ - "shape":"AwsGuardDutyDetectorDetails", - "documentation":"

        Provides details about an Amazon GuardDuty detector. A detector is an object that represents the GuardDuty service. A detector is required for GuardDuty to become operational.

        " + "Critical":{ + "shape":"Integer", + "documentation":"

        The number of findings with a severity level of critical.

        " }, - "AwsStepFunctionStateMachine":{ - "shape":"AwsStepFunctionStateMachineDetails", - "documentation":"

        Provides details about an Step Functions state machine, which is a workflow consisting of a series of event-driven steps.

        " + "High":{ + "shape":"Integer", + "documentation":"

        The number of findings with a severity level of high.

        " }, - "AwsAthenaWorkGroup":{ - "shape":"AwsAthenaWorkGroupDetails", - "documentation":"

        Provides information about an Amazon Athena workgroup. A workgroup helps you separate users, teams, applications, or workloads. It also helps you set limits on data processing and track costs.

        " + "Medium":{ + "shape":"Integer", + "documentation":"

        The number of findings with a severity level of medium.

        " }, - "AwsEventsEventbus":{ - "shape":"AwsEventsEventbusDetails", - "documentation":"

        Provides details about Amazon EventBridge event bus for an endpoint. An event bus is a router that receives events and delivers them to zero or more destinations, or targets.

        " + "Low":{ + "shape":"Integer", + "documentation":"

        The number of findings with a severity level of low.

        " }, - "AwsDmsEndpoint":{ - "shape":"AwsDmsEndpointDetails", - "documentation":"

        Provides details about an Database Migration Service (DMS) endpoint. An endpoint provides connection, data store type, and location information about your data store.

        " + "Informational":{ + "shape":"Integer", + "documentation":"

        The number of findings that provide security-related information.

        " }, - "AwsEventsEndpoint":{ - "shape":"AwsEventsEndpointDetails", - "documentation":"

        Provides details about an Amazon EventBridge global endpoint. The endpoint can improve your application’s availability by making it Regional-fault tolerant.

        " + "Unknown":{ + "shape":"Integer", + "documentation":"

        The number of findings with a severity level cannot be determined.

        " + } + }, + "documentation":"

        A comprehensive distribution of security findings by severity level for Amazon Web Services resources.

        " + }, + "ResourceTag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"NonEmptyString", + "documentation":"

        The identifier or name of the tag.

        " }, - "AwsDmsReplicationTask":{ - "shape":"AwsDmsReplicationTaskDetails", - "documentation":"

        Provides details about an DMS replication task. A replication task moves a set of data from the source endpoint to the target endpoint.

        " + "Value":{ + "shape":"NonEmptyString", + "documentation":"

        The data associated with the tag key.

        " + } + }, + "documentation":"

        Represents tag information associated with Amazon Web Services resources.

        " + }, + "ResourceTagList":{ + "type":"list", + "member":{"shape":"ResourceTag"} + }, + "Resources":{ + "type":"list", + "member":{"shape":"ResourceResult"} + }, + "ResourcesCompositeFilter":{ + "type":"structure", + "members":{ + "StringFilters":{ + "shape":"ResourcesStringFilterList", + "documentation":"

        Enables filtering based on string field values.

        " }, - "AwsDmsReplicationInstance":{ - "shape":"AwsDmsReplicationInstanceDetails", - "documentation":"

        Provides details about an DMS replication instance. DMS uses a replication instance to connect to your source data store, read the source data, and format the data for consumption by the target data store.

        " + "DateFilters":{ + "shape":"ResourcesDateFilterList", + "documentation":"

        Enables filtering based on date and timestamp field values.

        " }, - "AwsRoute53HostedZone":{ - "shape":"AwsRoute53HostedZoneDetails", - "documentation":"

        Provides details about an Amazon Route 53 hosted zone, including the four name servers assigned to the hosted zone. A hosted zone represents a collection of records that can be managed together, belonging to a single parent domain name.

        " + "NumberFilters":{ + "shape":"ResourcesNumberFilterList", + "documentation":"

        Enables filtering based on numerical field values.

        " }, - "AwsMskCluster":{ - "shape":"AwsMskClusterDetails", - "documentation":"

        Provides details about an Amazon Managed Streaming for Apache Kafka (Amazon MSK) cluster.

        " + "MapFilters":{ + "shape":"ResourcesMapFilterList", + "documentation":"

        Enables filtering based on map-based field values.

        " }, - "AwsS3AccessPoint":{ - "shape":"AwsS3AccessPointDetails", - "documentation":"

        Provides details about an Amazon Simple Storage Service (Amazon S3) access point. S3 access points are named network endpoints that are attached to S3 buckets that you can use to perform S3 object operations.

        " + "Operator":{ + "shape":"AllowedOperators", + "documentation":"

        The logical operator used to combine multiple filter conditions.

        " + } + }, + "documentation":"

        Enables the creation of criteria for Amazon Web Services resources in Security Hub.

        " + }, + "ResourcesCompositeFilterList":{ + "type":"list", + "member":{"shape":"ResourcesCompositeFilter"} + }, + "ResourcesDateField":{ + "type":"string", + "enum":[ + "resource_detail_capture_time_dt", + "resource_creation_time_dt" + ] + }, + "ResourcesDateFilter":{ + "type":"structure", + "members":{ + "FieldName":{ + "shape":"ResourcesDateField", + "documentation":"

        The name of the field.

        " }, - "AwsEc2ClientVpnEndpoint":{ - "shape":"AwsEc2ClientVpnEndpointDetails", - "documentation":"

        Provides details about an Client VPN endpoint. A Client VPN endpoint is the resource that you create and configure to enable and manage client VPN sessions. It's the termination point for all client VPN sessions.

        " + "Filter":{"shape":"DateFilter"} + }, + "documentation":"

        Enables the filtering of Amazon Web Services resources based on date and timestamp attributes.

        " + }, + "ResourcesDateFilterList":{ + "type":"list", + "member":{"shape":"ResourcesDateFilter"} + }, + "ResourcesFilters":{ + "type":"structure", + "members":{ + "CompositeFilters":{ + "shape":"ResourcesCompositeFilterList", + "documentation":"

        A collection of complex filtering conditions that can be applied to Amazon Web Services resources.

        " + }, + "CompositeOperator":{ + "shape":"AllowedOperators", + "documentation":"

        The logical operator used to combine multiple filter conditions in the structure.

        " } }, - "documentation":"

        Additional details about a resource related to a finding.

        To provide the details, use the object that corresponds to the resource type. For example, if the resource type is AwsEc2Instance, then you use the AwsEc2Instance object to provide the details.

        If the type-specific object does not contain all of the fields you want to populate, then you use the Other object to populate those additional fields.

        You also use the Other object to populate the details when the selected type does not have a corresponding object.

        " + "documentation":"

        Enables filtering of Amazon Web Services resources based on data.

        " }, - "ResourceInUseException":{ + "ResourcesMapField":{ + "type":"string", + "enum":["tags"] + }, + "ResourcesMapFilter":{ "type":"structure", "members":{ - "Message":{"shape":"NonEmptyString"}, - "Code":{"shape":"NonEmptyString"} + "FieldName":{ + "shape":"ResourcesMapField", + "documentation":"

        The name of the field.

        " + }, + "Filter":{"shape":"MapFilter"} }, - "documentation":"

        The request was rejected because it conflicts with the resource's availability. For example, you tried to update a security control that's currently in the UPDATING state.

        ", - "error":{"httpStatusCode":400}, - "exception":true + "documentation":"

        Enables filtering of Amazon Web Services resources based on key-value map attributes.

        " }, - "ResourceList":{ + "ResourcesMapFilterList":{ "type":"list", - "member":{"shape":"Resource"} + "member":{"shape":"ResourcesMapFilter"} }, - "ResourceNotFoundException":{ + "ResourcesNumberField":{ + "type":"string", + "enum":[ + "findings_summary.total_findings", + "findings_summary.severities.other", + "findings_summary.severities.fatal", + "findings_summary.severities.critical", + "findings_summary.severities.high", + "findings_summary.severities.medium", + "findings_summary.severities.low", + "findings_summary.severities.informational", + "findings_summary.severities.unknown" + ] + }, + "ResourcesNumberFilter":{ "type":"structure", "members":{ - "Message":{"shape":"NonEmptyString"}, - "Code":{"shape":"NonEmptyString"} + "FieldName":{ + "shape":"ResourcesNumberField", + "documentation":"

        The name of the field.

        " + }, + "Filter":{"shape":"NumberFilter"} }, - "documentation":"

        The request was rejected because we can't find the specified resource.

        ", - "error":{"httpStatusCode":404}, - "exception":true + "documentation":"

        Enables filtering of Amazon Web Services resources based on numerical values.

        " + }, + "ResourcesNumberFilterList":{ + "type":"list", + "member":{"shape":"ResourcesNumberFilter"} + }, + "ResourcesStringField":{ + "type":"string", + "enum":[ + "resource_arn", + "resource_id", + "account_id", + "region", + "resource_category", + "resource_type", + "resource_name", + "findings_summary.finding_type", + "findings_summary.product_name" + ] + }, + "ResourcesStringFilter":{ + "type":"structure", + "members":{ + "FieldName":{ + "shape":"ResourcesStringField", + "documentation":"

        The name of the field.

        " + }, + "Filter":{"shape":"StringFilter"} + }, + "documentation":"

        Enables filtering of Amazon Web Services resources based on string field values.

        " + }, + "ResourcesStringFilterList":{ + "type":"list", + "member":{"shape":"ResourcesStringFilter"} }, "Result":{ "type":"structure", @@ -20294,6 +22654,11 @@ "max":1000, "min":1 }, + "RuleOrderValueV2":{ + "type":"float", + "max":1000.0, + "min":1.0 + }, "RuleStatus":{ "type":"string", "enum":[ @@ -20301,6 +22666,13 @@ "DISABLED" ] }, + "RuleStatusV2":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "SecurityControl":{ "type":"structure", "required":[ @@ -20472,6 +22844,13 @@ "type":"list", "member":{"shape":"NonEmptyString"} }, + "SecurityHubFeature":{ + "type":"string", + "enum":[ + "SecurityHub", + "SecurityHubV2" + ] + }, "SecurityHubPolicy":{ "type":"structure", "members":{ @@ -20534,6 +22913,11 @@ "type":"list", "member":{"shape":"SensitiveDataResult"} }, + "SensitiveNonEmptyString":{ + "type":"string", + "pattern":".*\\S.*", + "sensitive":true + }, "Sequence":{ "type":"structure", "members":{ @@ -20560,6 +22944,48 @@ }, "documentation":"

        Contains information about an Amazon GuardDuty Extended Threat Detection attack sequence finding. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

        " }, + "ServiceNowDetail":{ + "type":"structure", + "required":["AuthStatus"], + "members":{ + "InstanceName":{ + "shape":"NonEmptyString", + "documentation":"

        The instanceName of ServiceNow ITSM.

        " + }, + "ClientId":{ + "shape":"NonEmptyString", + "documentation":"

        The clientId of ServiceNow ITSM.

        " + }, + "AuthStatus":{ + "shape":"ConnectorAuthStatus", + "documentation":"

        The status of the authorization between Jira Cloud and the service.

        " + } + }, + "documentation":"

        Information about a ServiceNow ITSM integration.

        " + }, + "ServiceNowProviderConfiguration":{ + "type":"structure", + "required":[ + "InstanceName", + "ClientId", + "ClientSecret" + ], + "members":{ + "InstanceName":{ + "shape":"NonEmptyString", + "documentation":"

        The instance name of ServiceNow ITSM.

        " + }, + "ClientId":{ + "shape":"NonEmptyString", + "documentation":"

        The client ID of ServiceNow ITSM.

        " + }, + "ClientSecret":{ + "shape":"SensitiveNonEmptyString", + "documentation":"

        The client secret of ServiceNow ITSM.

        " + } + }, + "documentation":"

        The initial configuration settings required to establish an integration between Security Hub and ServiceNow ITSM.

        " + }, "Severity":{ "type":"structure", "members":{ @@ -21196,8 +23622,7 @@ }, "StartConfigurationPolicyDisassociationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "StatelessCustomActionDefinition":{ "type":"structure", @@ -21287,7 +23712,7 @@ }, "Comparison":{ "shape":"StringFilterComparison", - "documentation":"

        The condition to apply to a string value when filtering Security Hub findings.

        To search for values that have the filter value, use one of the following comparison operators:

        • To search for values that include the filter value, use CONTAINS. For example, the filter Title CONTAINS CloudFront matches findings that have a Title that includes the string CloudFront.

        • To search for values that exactly match the filter value, use EQUALS. For example, the filter AwsAccountId EQUALS 123456789012 only matches findings that have an account ID of 123456789012.

        • To search for values that start with the filter value, use PREFIX. For example, the filter ResourceRegion PREFIX us matches findings that have a ResourceRegion that starts with us. A ResourceRegion that starts with a different value, such as af, ap, or ca, doesn't match.

        CONTAINS, EQUALS, and PREFIX filters on the same field are joined by OR. A finding matches if it matches any one of those filters. For example, the filters Title CONTAINS CloudFront OR Title CONTAINS CloudWatch match a finding that includes either CloudFront, CloudWatch, or both strings in the title.

        To search for values that don’t have the filter value, use one of the following comparison operators:

        • To search for values that exclude the filter value, use NOT_CONTAINS. For example, the filter Title NOT_CONTAINS CloudFront matches findings that have a Title that excludes the string CloudFront.

        • To search for values other than the filter value, use NOT_EQUALS. For example, the filter AwsAccountId NOT_EQUALS 123456789012 only matches findings that have an account ID other than 123456789012.

        • To search for values that don't start with the filter value, use PREFIX_NOT_EQUALS. For example, the filter ResourceRegion PREFIX_NOT_EQUALS us matches findings with a ResourceRegion that starts with a value other than us.

        NOT_CONTAINS, NOT_EQUALS, and PREFIX_NOT_EQUALS filters on the same field are joined by AND. A finding matches only if it matches all of those filters. For example, the filters Title NOT_CONTAINS CloudFront AND Title NOT_CONTAINS CloudWatch match a finding that excludes both CloudFront and CloudWatch in the title.

        You can’t have both a CONTAINS filter and a NOT_CONTAINS filter on the same field. Similarly, you can't provide both an EQUALS filter and a NOT_EQUALS or PREFIX_NOT_EQUALS filter on the same field. Combining filters in this way returns an error. CONTAINS filters can only be used with other CONTAINS filters. NOT_CONTAINS filters can only be used with other NOT_CONTAINS filters.

        You can combine PREFIX filters with NOT_EQUALS or PREFIX_NOT_EQUALS filters for the same field. Security Hub first processes the PREFIX filters, and then the NOT_EQUALS or PREFIX_NOT_EQUALS filters.

        For example, for the following filters, Security Hub first identifies findings that have resource types that start with either AwsIam or AwsEc2. It then excludes findings that have a resource type of AwsIamPolicy and findings that have a resource type of AwsEc2NetworkInterface.

        • ResourceType PREFIX AwsIam

        • ResourceType PREFIX AwsEc2

        • ResourceType NOT_EQUALS AwsIamPolicy

        • ResourceType NOT_EQUALS AwsEc2NetworkInterface

        CONTAINS and NOT_CONTAINS operators can be used only with automation rules. For more information, see Automation rules in the Security Hub User Guide.

        " + "documentation":"

        The condition to apply to a string value when filtering Security Hub findings.

        To search for values that have the filter value, use one of the following comparison operators:

        • To search for values that include the filter value, use CONTAINS. For example, the filter Title CONTAINS CloudFront matches findings that have a Title that includes the string CloudFront.

        • To search for values that exactly match the filter value, use EQUALS. For example, the filter AwsAccountId EQUALS 123456789012 only matches findings that have an account ID of 123456789012.

        • To search for values that start with the filter value, use PREFIX. For example, the filter ResourceRegion PREFIX us matches findings that have a ResourceRegion that starts with us. A ResourceRegion that starts with a different value, such as af, ap, or ca, doesn't match.

        CONTAINS, EQUALS, and PREFIX filters on the same field are joined by OR. A finding matches if it matches any one of those filters. For example, the filters Title CONTAINS CloudFront OR Title CONTAINS CloudWatch match a finding that includes either CloudFront, CloudWatch, or both strings in the title.

        To search for values that don’t have the filter value, use one of the following comparison operators:

        • To search for values that exclude the filter value, use NOT_CONTAINS. For example, the filter Title NOT_CONTAINS CloudFront matches findings that have a Title that excludes the string CloudFront.

        • To search for values other than the filter value, use NOT_EQUALS. For example, the filter AwsAccountId NOT_EQUALS 123456789012 only matches findings that have an account ID other than 123456789012.

        • To search for values that don't start with the filter value, use PREFIX_NOT_EQUALS. For example, the filter ResourceRegion PREFIX_NOT_EQUALS us matches findings with a ResourceRegion that starts with a value other than us.

        NOT_CONTAINS, NOT_EQUALS, and PREFIX_NOT_EQUALS filters on the same field are joined by AND. A finding matches only if it matches all of those filters. For example, the filters Title NOT_CONTAINS CloudFront AND Title NOT_CONTAINS CloudWatch match a finding that excludes both CloudFront and CloudWatch in the title.

        You can’t have both a CONTAINS filter and a NOT_CONTAINS filter on the same field. Similarly, you can't provide both an EQUALS filter and a NOT_EQUALS or PREFIX_NOT_EQUALS filter on the same field. Combining filters in this way returns an error. CONTAINS filters can only be used with other CONTAINS filters. NOT_CONTAINS filters can only be used with other NOT_CONTAINS filters.

        You can combine PREFIX filters with NOT_EQUALS or PREFIX_NOT_EQUALS filters for the same field. Security Hub first processes the PREFIX filters, and then the NOT_EQUALS or PREFIX_NOT_EQUALS filters.

        For example, for the following filters, Security Hub first identifies findings that have resource types that start with either AwsIam or AwsEc2. It then excludes findings that have a resource type of AwsIamPolicy and findings that have a resource type of AwsEc2NetworkInterface.

        • ResourceType PREFIX AwsIam

        • ResourceType PREFIX AwsEc2

        • ResourceType NOT_EQUALS AwsIamPolicy

        • ResourceType NOT_EQUALS AwsEc2NetworkInterface

        CONTAINS and NOT_CONTAINS operators can be used only with automation rules V1. CONTAINS_WORD operator is only supported in GetFindingsV2, GetFindingStatisticsV2, GetResourcesV2, and GetResourceStatisticsV2 APIs. For more information, see Automation rules in the Security Hub User Guide.

        " } }, "documentation":"

        A string filter for filtering Security Hub findings.

        " @@ -21300,7 +23725,8 @@ "NOT_EQUALS", "PREFIX_NOT_EQUALS", "CONTAINS", - "NOT_CONTAINS" + "NOT_CONTAINS", + "CONTAINS_WORD" ] }, "StringFilterList":{ @@ -21373,8 +23799,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -21494,6 +23919,16 @@ "type":"list", "member":{"shape":"Threat"} }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"}, + "Code":{"shape":"NonEmptyString"} + }, + "documentation":"

        The limit on the number of requests per second was exceeded.

        ", + "error":{"httpStatusCode":429}, + "exception":true + }, "Timestamp":{ "type":"timestamp", "timestampFormat":"iso8601" @@ -21656,8 +24091,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateActionTargetRequest":{ "type":"structure", @@ -21681,9 +24115,92 @@ }, "UpdateActionTargetResponse":{ "type":"structure", + "members":{} + }, + "UpdateAggregatorV2Request":{ + "type":"structure", + "required":[ + "AggregatorV2Arn", + "RegionLinkingMode" + ], + "members":{ + "AggregatorV2Arn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the Aggregator V2.

        ", + "location":"uri", + "locationName":"AggregatorV2Arn" + }, + "RegionLinkingMode":{ + "shape":"NonEmptyString", + "documentation":"

        Determines how Amazon Web Services Regions should be linked to the Aggregator V2.

        " + }, + "LinkedRegions":{ + "shape":"StringList", + "documentation":"

        A list of Amazon Web Services Regions linked to the aggegation Region.

        " + } + } + }, + "UpdateAggregatorV2Response":{ + "type":"structure", + "members":{ + "AggregatorV2Arn":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the Aggregator V2.

        " + }, + "AggregationRegion":{ + "shape":"NonEmptyString", + "documentation":"

        The Amazon Web Services Region where data is aggregated.

        " + }, + "RegionLinkingMode":{ + "shape":"NonEmptyString", + "documentation":"

        Determines how Amazon Web Services Regions should be linked to the Aggregator V2.

        " + }, + "LinkedRegions":{ + "shape":"StringList", + "documentation":"

        A list of Amazon Web Services Regions linked to the aggegation Region.

        " + } + } + }, + "UpdateAutomationRuleV2Request":{ + "type":"structure", + "required":["Identifier"], "members":{ + "Identifier":{ + "shape":"NonEmptyString", + "documentation":"

        The ARN of the automation rule.

        ", + "location":"uri", + "locationName":"Identifier" + }, + "RuleStatus":{ + "shape":"RuleStatusV2", + "documentation":"

        The status of the automation rule.

        " + }, + "RuleOrder":{ + "shape":"RuleOrderValueV2", + "documentation":"

        Represents a value for the rule priority.

        " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

        A description of the automation rule.

        " + }, + "RuleName":{ + "shape":"NonEmptyString", + "documentation":"

        The name of the automation rule.

        " + }, + "Criteria":{ + "shape":"Criteria", + "documentation":"

        The filtering type and configuration of the automation rule.

        " + }, + "Actions":{ + "shape":"AutomationRulesActionListV2", + "documentation":"

        A list of actions to be performed when the rule criteria is met.

        " + } } }, + "UpdateAutomationRuleV2Response":{ + "type":"structure", + "members":{} + }, "UpdateAutomationRulesRequestItem":{ "type":"structure", "required":["RuleArn"], @@ -21790,6 +24307,34 @@ } } }, + "UpdateConnectorV2Request":{ + "type":"structure", + "required":["ConnectorId"], + "members":{ + "ConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

        The UUID of the connectorV2 to identify connectorV2 resource.

        ", + "location":"uri", + "locationName":"ConnectorId" + }, + "ClientSecret":{ + "shape":"SensitiveNonEmptyString", + "documentation":"

        The clientSecret of ServiceNow.

        " + }, + "Description":{ + "shape":"NonEmptyString", + "documentation":"

        The description of the connectorV2.

        " + }, + "Provider":{ + "shape":"ProviderUpdateConfiguration", + "documentation":"

        The third-party provider’s service configuration.

        " + } + } + }, + "UpdateConnectorV2Response":{ + "type":"structure", + "members":{} + }, "UpdateFindingAggregatorRequest":{ "type":"structure", "required":[ @@ -21852,8 +24397,7 @@ }, "UpdateFindingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateInsightRequest":{ "type":"structure", @@ -21881,8 +24425,7 @@ }, "UpdateInsightResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateOrganizationConfigurationRequest":{ "type":"structure", @@ -21901,8 +24444,7 @@ }, "UpdateOrganizationConfigurationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateSecurityControlRequest":{ "type":"structure", @@ -21927,8 +24469,7 @@ }, "UpdateSecurityControlResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateSecurityHubConfigurationRequest":{ "type":"structure", @@ -21945,8 +24486,7 @@ }, "UpdateSecurityHubConfigurationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateStandardsControlRequest":{ "type":"structure", @@ -21970,8 +24510,7 @@ }, "UpdateStandardsControlResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateStatus":{ "type":"string", @@ -21994,6 +24533,16 @@ }, "documentation":"

        Provides Amazon Web Services account information of the user involved in an Amazon GuardDuty Extended Threat Detection attack sequence. GuardDuty generates an attack sequence finding when multiple events align to a potentially suspicious activity. To receive GuardDuty attack sequence findings in Security Hub, you must have GuardDuty enabled. For more information, see GuardDuty Extended Threat Detection in the Amazon GuardDuty User Guide.

        " }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"}, + "Code":{"shape":"NonEmptyString"} + }, + "documentation":"

        The request has failed validation because it's missing required fields or has invalid inputs.

        ", + "error":{"httpStatusCode":400}, + "exception":true + }, "VerificationState":{ "type":"string", "enum":[ diff --git a/services/securityir/pom.xml b/services/securityir/pom.xml index d252b7a6d199..b3071fd50063 100644 --- a/services/securityir/pom.xml +++ b/services/securityir/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT securityir AWS Java SDK :: Services :: Security IR diff --git a/services/securityir/src/main/resources/codegen-resources/customization.config b/services/securityir/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/securityir/src/main/resources/codegen-resources/customization.config +++ b/services/securityir/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/securityir/src/main/resources/codegen-resources/service-2.json b/services/securityir/src/main/resources/codegen-resources/service-2.json index a41dafc46637..1ad944453946 100644 --- a/services/securityir/src/main/resources/codegen-resources/service-2.json +++ b/services/securityir/src/main/resources/codegen-resources/service-2.json @@ -28,9 +28,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to view an existing membership.

        " @@ -50,9 +50,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permissions to cancel an existing membership.

        ", @@ -73,9 +73,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to close an existing case.

        " @@ -95,9 +95,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to create a new case.

        ", @@ -118,9 +118,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to add a comment to an existing case.

        ", @@ -141,9 +141,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permissions to create a new membership.

        ", @@ -164,9 +164,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grant permission to view a designated case.

        " @@ -186,9 +186,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to obtain an Amazon S3 presigned URL to download an attachment.

        " @@ -208,9 +208,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to upload an attachment to a case.

        ", @@ -231,9 +231,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to get details of a designated service membership.

        " @@ -253,9 +253,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permissions to view the aidt log for edits made to a designated case.

        " @@ -275,9 +275,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to list all cases the requester has access to.

        " @@ -297,9 +297,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permissions to list and view comments for a designated case.

        " @@ -319,9 +319,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to query the memberships a principal has access to.

        " @@ -342,8 +342,8 @@ {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ConflictException"}, {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to view currently configured tags on a resource.

        " @@ -364,8 +364,8 @@ {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ConflictException"}, {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to add a tag(s) to a designated resource.

        " @@ -386,8 +386,8 @@ {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ConflictException"}, {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to remove a tag(s) from a designate resource.

        ", @@ -408,9 +408,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to update an existing case.

        " @@ -430,9 +430,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to update an existing case comment.

        ", @@ -453,9 +453,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to update the status for a designated cases. Options include Submitted | Detection and Analysis | Eradication, Containment and Recovery | Post-Incident Activities | Closed.

        " @@ -475,9 +475,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants access to UpdateMembership to change membership configuration.

        ", @@ -498,9 +498,9 @@ {"shape":"ValidationException"}, {"shape":"SecurityIncidentResponseNotActiveException"}, {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"}, {"shape":"InvalidTokenException"} ], "documentation":"

        Grants permission to update the resolver type for a case.

        This is a one-way action and cannot be reversed.

        Options include self-supported > AWS-supported.

        " @@ -557,6 +557,7 @@ "ap-southeast-3", "ap-southeast-4", "ap-southeast-5", + "ap-southeast-7", "ca-central-1", "ca-west-1", "cn-north-1", @@ -572,6 +573,7 @@ "il-central-1", "me-central-1", "me-south-1", + "mx-central-1", "sa-east-1", "us-east-1", "us-east-2", @@ -1824,7 +1826,7 @@ }, "PrincipalId":{ "type":"string", - "pattern":".*(^internal:midway:([a-z]{3,8}|svc-mw-[0-9]{12}[a-zA-Z0-9-]{5,20})$)|(^external:aws:\\d{12}$).*" + "pattern":".*((^AWS Responder)|(^\\d{12}$)|(^security-ir.amazonaws.com)).*" }, "ResolverType":{ "type":"string", diff --git a/services/securitylake/pom.xml b/services/securitylake/pom.xml index 57a6ed943d7d..2141a1ae0317 100644 --- a/services/securitylake/pom.xml +++ b/services/securitylake/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT securitylake AWS Java SDK :: Services :: Security Lake diff --git a/services/securitylake/src/main/resources/codegen-resources/customization.config b/services/securitylake/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/securitylake/src/main/resources/codegen-resources/customization.config +++ b/services/securitylake/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/serverlessapplicationrepository/pom.xml b/services/serverlessapplicationrepository/pom.xml index ab41714525f9..1219f6b8f7b3 100644 --- a/services/serverlessapplicationrepository/pom.xml +++ b/services/serverlessapplicationrepository/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 serverlessapplicationrepository diff --git a/services/serverlessapplicationrepository/src/main/resources/codegen-resources/customization.config b/services/serverlessapplicationrepository/src/main/resources/codegen-resources/customization.config index 3214ab7858eb..f75675b7698b 100644 --- a/services/serverlessapplicationrepository/src/main/resources/codegen-resources/customization.config +++ b/services/serverlessapplicationrepository/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ "verifiedSimpleMethods": [ "listApplications" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/servicecatalog/pom.xml b/services/servicecatalog/pom.xml index 1d07ca2ffd5b..b23164cf5bf3 100644 --- a/services/servicecatalog/pom.xml +++ b/services/servicecatalog/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT servicecatalog AWS Java SDK :: Services :: AWS Service Catalog diff --git a/services/servicecatalog/src/main/resources/codegen-resources/customization.config b/services/servicecatalog/src/main/resources/codegen-resources/customization.config index 87cdcaa03bf7..7f77782fc298 100644 --- a/services/servicecatalog/src/main/resources/codegen-resources/customization.config +++ b/services/servicecatalog/src/main/resources/codegen-resources/customization.config @@ -12,6 +12,5 @@ "searchProvisionedProducts", "getAWSOrganizationsAccessStatus" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/servicecatalogappregistry/pom.xml b/services/servicecatalogappregistry/pom.xml index ebcc8ae80019..bd5f5e1d3429 100644 --- a/services/servicecatalogappregistry/pom.xml +++ b/services/servicecatalogappregistry/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT servicecatalogappregistry AWS Java SDK :: Services :: Service Catalog App Registry diff --git a/services/servicecatalogappregistry/src/main/resources/codegen-resources/customization.config b/services/servicecatalogappregistry/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/servicecatalogappregistry/src/main/resources/codegen-resources/customization.config +++ b/services/servicecatalogappregistry/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/servicediscovery/pom.xml b/services/servicediscovery/pom.xml index b75b5b6281d4..3b33f958d310 100644 --- a/services/servicediscovery/pom.xml +++ b/services/servicediscovery/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 servicediscovery diff --git a/services/servicediscovery/src/main/resources/codegen-resources/customization.config b/services/servicediscovery/src/main/resources/codegen-resources/customization.config index b2c13ed8b65f..e62d340b16a3 100644 --- a/services/servicediscovery/src/main/resources/codegen-resources/customization.config +++ b/services/servicediscovery/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ "listServices" ], "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/servicequotas/pom.xml b/services/servicequotas/pom.xml index 707db867b952..9021f7377639 100644 --- a/services/servicequotas/pom.xml +++ b/services/servicequotas/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT servicequotas AWS Java SDK :: Services :: Service Quotas diff --git a/services/servicequotas/src/main/resources/codegen-resources/customization.config b/services/servicequotas/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/servicequotas/src/main/resources/codegen-resources/customization.config +++ b/services/servicequotas/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/servicequotas/src/main/resources/codegen-resources/service-2.json b/services/servicequotas/src/main/resources/codegen-resources/service-2.json index 61990f4b02f9..63ac6f24105e 100644 --- a/services/servicequotas/src/main/resources/codegen-resources/service-2.json +++ b/services/servicequotas/src/main/resources/codegen-resources/service-2.json @@ -34,6 +34,26 @@ ], "documentation":"

        Associates your quota request template with your organization. When a new Amazon Web Services account is created in your organization, the quota increase requests in the template are automatically applied to the account. You can add a quota increase request for any adjustable quota to your template.

        " }, + "CreateSupportCase":{ + "name":"CreateSupportCase", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateSupportCaseRequest"}, + "output":{"shape":"CreateSupportCaseResponse"}, + "errors":[ + {"shape":"DependencyAccessDeniedException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"AccessDeniedException"}, + {"shape":"NoSuchResourceException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"InvalidResourceStateException"}, + {"shape":"ServiceException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

        Creates a Support case for an existing quota increase request. This call only creates a Support case if the request has a Pending status.

        " + }, "DeleteServiceQuotaIncreaseRequestFromTemplate":{ "name":"DeleteServiceQuotaIncreaseRequestFromTemplate", "http":{ @@ -406,13 +426,11 @@ }, "AssociateServiceQuotaTemplateRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "AssociateServiceQuotaTemplateResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "AwsRegion":{ "type":"string", @@ -420,6 +438,20 @@ "min":1, "pattern":"[a-zA-Z][a-zA-Z0-9-]{1,128}" }, + "CreateSupportCaseRequest":{ + "type":"structure", + "required":["RequestId"], + "members":{ + "RequestId":{ + "shape":"RequestId", + "documentation":"

        The ID of the pending quota increase request for which you want to open a Support case.

        " + } + } + }, + "CreateSupportCaseResponse":{ + "type":"structure", + "members":{} + }, "CustomerServiceEngagementId":{"type":"string"}, "DateTime":{"type":"timestamp"}, "DeleteServiceQuotaIncreaseRequestFromTemplateRequest":{ @@ -446,8 +478,7 @@ }, "DeleteServiceQuotaIncreaseRequestFromTemplateResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DependencyAccessDeniedException":{ "type":"structure", @@ -459,13 +490,11 @@ }, "DisassociateServiceQuotaTemplateRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateServiceQuotaTemplateResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ErrorCode":{ "type":"string", @@ -520,8 +549,7 @@ }, "GetAssociationForServiceQuotaTemplateRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetAssociationForServiceQuotaTemplateResponse":{ "type":"structure", @@ -1428,8 +1456,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -1480,8 +1507,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} } }, "documentation":"

        With Service Quotas, you can view and manage your quotas easily as your Amazon Web Services workloads grow. Quotas, also referred to as limits, are the maximum number of resources that you can create in your Amazon Web Services account. For more information, see the Service Quotas User Guide.

        You need Amazon Web Services CLI version 2.13.20 or higher to view and manage resource-level quotas such as Instances per domain for Amazon OpenSearch Service.

        " diff --git a/services/ses/pom.xml b/services/ses/pom.xml index 1cb0514b4366..56c749ad60a9 100644 --- a/services/ses/pom.xml +++ b/services/ses/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ses AWS Java SDK :: Services :: Amazon SES diff --git a/services/sesv2/pom.xml b/services/sesv2/pom.xml index 1e1da8a68f3c..92f3b6a74ffd 100644 --- a/services/sesv2/pom.xml +++ b/services/sesv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sesv2 AWS Java SDK :: Services :: SESv2 diff --git a/services/sesv2/src/main/resources/codegen-resources/customization.config b/services/sesv2/src/main/resources/codegen-resources/customization.config index fe4c05aef8db..3388694e6427 100644 --- a/services/sesv2/src/main/resources/codegen-resources/customization.config +++ b/services/sesv2/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "enableGenerateCompiledEndpointRules": true, - "enableEndpointAuthSchemeParams": true, - "enableFastUnmarshaller": true + "enableEndpointAuthSchemeParams": true } diff --git a/services/sesv2/src/main/resources/codegen-resources/service-2.json b/services/sesv2/src/main/resources/codegen-resources/service-2.json index e626429257dc..e0b9ef6f1b21 100644 --- a/services/sesv2/src/main/resources/codegen-resources/service-2.json +++ b/services/sesv2/src/main/resources/codegen-resources/service-2.json @@ -1543,8 +1543,7 @@ }, "AccountSuspendedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The message can't be sent because the account's ability to send email has been permanently restricted.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -1566,8 +1565,7 @@ "AdminEmail":{"type":"string"}, "AlreadyExistsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The resource specified in your request already exists.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -1667,8 +1665,7 @@ "AttributesData":{"type":"string"}, "BadRequestException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The input you provided is invalid.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -1924,8 +1921,7 @@ }, "CancelExportJobResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "CaseId":{"type":"string"}, @@ -1986,8 +1982,7 @@ "ComplaintSubType":{"type":"string"}, "ConcurrentModificationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The resource is being modified by another operation or thread.

        ", "error":{"httpStatusCode":500}, "exception":true, @@ -2003,8 +1998,7 @@ }, "ConflictException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        If there is already an ongoing account details update under review.

        ", "error":{"httpStatusCode":409}, "exception":true @@ -2125,8 +2119,7 @@ }, "CreateConfigurationSetEventDestinationResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "CreateConfigurationSetRequest":{ @@ -2171,8 +2164,7 @@ }, "CreateConfigurationSetResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "CreateContactListRequest":{ @@ -2199,8 +2191,7 @@ }, "CreateContactListResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "CreateContactRequest":{ "type":"structure", @@ -2235,8 +2226,7 @@ }, "CreateContactResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "CreateCustomVerificationEmailTemplateRequest":{ "type":"structure", @@ -2278,8 +2268,7 @@ }, "CreateCustomVerificationEmailTemplateResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

        " }, "CreateDedicatedIpPoolRequest":{ @@ -2303,8 +2292,7 @@ }, "CreateDedicatedIpPoolResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "CreateDeliverabilityTestReportRequest":{ @@ -2380,8 +2368,7 @@ }, "CreateEmailIdentityPolicyResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "CreateEmailIdentityRequest":{ @@ -2445,8 +2432,7 @@ }, "CreateEmailTemplateResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

        " }, "CreateExportJobRequest":{ @@ -2704,8 +2690,7 @@ }, "DeleteConfigurationSetEventDestinationResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "DeleteConfigurationSetRequest":{ @@ -2723,8 +2708,7 @@ }, "DeleteConfigurationSetResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "DeleteContactListRequest":{ @@ -2741,8 +2725,7 @@ }, "DeleteContactListResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteContactRequest":{ "type":"structure", @@ -2767,8 +2750,7 @@ }, "DeleteContactResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteCustomVerificationEmailTemplateRequest":{ "type":"structure", @@ -2785,8 +2767,7 @@ }, "DeleteCustomVerificationEmailTemplateResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

        " }, "DeleteDedicatedIpPoolRequest":{ @@ -2804,8 +2785,7 @@ }, "DeleteDedicatedIpPoolResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "DeleteEmailIdentityPolicyRequest":{ @@ -2832,8 +2812,7 @@ }, "DeleteEmailIdentityPolicyResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "DeleteEmailIdentityRequest":{ @@ -2851,8 +2830,7 @@ }, "DeleteEmailIdentityResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "DeleteEmailTemplateRequest":{ @@ -2870,8 +2848,7 @@ }, "DeleteEmailTemplateResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

        " }, "DeleteMultiRegionEndpointRequest":{ @@ -2912,8 +2889,7 @@ }, "DeleteSuppressedDestinationResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "DeliverabilityDashboardAccountStatus":{ @@ -3070,7 +3046,7 @@ }, "SigningAttributesOrigin":{ "shape":"DkimSigningAttributesOrigin", - "documentation":"

        A string that indicates how DKIM was configured for the identity. These are the possible values:

        • AWS_SES – Indicates that DKIM was configured for the identity by using Easy DKIM.

        • EXTERNAL – Indicates that DKIM was configured for the identity by using Bring Your Own DKIM (BYODKIM).

        • AWS_SES_AF_SOUTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Africa (Cape Town) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_NORTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Stockholm) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Mumbai) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_WEST_3 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Paris) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_WEST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (London) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_SOUTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Milan) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_WEST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Ireland) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_NORTHEAST_3 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Osaka) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_NORTHEAST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Seoul) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_ME_SOUTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Middle East (Bahrain) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_NORTHEAST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Tokyo) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_IL_CENTRAL_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Israel (Tel Aviv) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_SA_EAST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in South America (São Paulo) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_CA_CENTRAL_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Canada (Central) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTHEAST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Singapore) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTHEAST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Sydney) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTHEAST_3 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Jakarta) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_CENTRAL_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Frankfurt) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_EAST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in US East (N. Virginia) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_EAST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in US East (Ohio) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_WEST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in US West (N. California) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_WEST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in US West (Oregon) region using Deterministic Easy-DKIM (DEED).

        " + "documentation":"

        A string that indicates how DKIM was configured for the identity. These are the possible values:

        • AWS_SES – Indicates that DKIM was configured for the identity by using Easy DKIM.

        • EXTERNAL – Indicates that DKIM was configured for the identity by using Bring Your Own DKIM (BYODKIM).

        • AWS_SES_AF_SOUTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Africa (Cape Town) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_NORTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Stockholm) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Mumbai) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTH_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Hyderabad) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_WEST_3 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Paris) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_WEST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (London) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_SOUTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Milan) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_WEST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Ireland) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_NORTHEAST_3 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Osaka) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_NORTHEAST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Seoul) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_ME_CENTRAL_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Middle East (UAE) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_ME_SOUTH_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Middle East (Bahrain) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_NORTHEAST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Tokyo) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_IL_CENTRAL_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Israel (Tel Aviv) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_SA_EAST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in South America (São Paulo) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_CA_CENTRAL_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Canada (Central) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTHEAST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Singapore) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTHEAST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Sydney) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTHEAST_3 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Asia Pacific (Jakarta) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_CENTRAL_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Frankfurt) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_CENTRAL_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in Europe (Zurich) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_EAST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in US East (N. Virginia) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_EAST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in US East (Ohio) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_WEST_1 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in US West (N. California) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_WEST_2 – Indicates that DKIM was configured for the identity by replicating signing attributes from a parent identity in US West (Oregon) region using Deterministic Easy-DKIM (DEED).

        " }, "NextSigningKeyLength":{ "shape":"DkimSigningKeyLength", @@ -3104,7 +3080,7 @@ }, "DomainSigningAttributesOrigin":{ "shape":"DkimSigningAttributesOrigin", - "documentation":"

        The attribute to use for configuring DKIM for the identity depends on the operation:

        1. For PutEmailIdentityDkimSigningAttributes:

        2. For CreateEmailIdentity when replicating a parent identity's DKIM configuration:

          • Allowed values: All values except AWS_SES and EXTERNAL

        • AWS_SES – Configure DKIM for the identity by using Easy DKIM.

        • EXTERNAL – Configure DKIM for the identity by using Bring Your Own DKIM (BYODKIM).

        • AWS_SES_AF_SOUTH_1 – Configure DKIM for the identity by replicating from a parent identity in Africa (Cape Town) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_NORTH_1 – Configure DKIM for the identity by replicating from a parent identity in Europe (Stockholm) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTH_1 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Mumbai) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_WEST_3 – Configure DKIM for the identity by replicating from a parent identity in Europe (Paris) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_WEST_2 – Configure DKIM for the identity by replicating from a parent identity in Europe (London) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_SOUTH_1 – Configure DKIM for the identity by replicating from a parent identity in Europe (Milan) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_WEST_1 – Configure DKIM for the identity by replicating from a parent identity in Europe (Ireland) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_NORTHEAST_3 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Osaka) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_NORTHEAST_2 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Seoul) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_ME_SOUTH_1 – Configure DKIM for the identity by replicating from a parent identity in Middle East (Bahrain) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_NORTHEAST_1 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Tokyo) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_IL_CENTRAL_1 – Configure DKIM for the identity by replicating from a parent identity in Israel (Tel Aviv) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_SA_EAST_1 – Configure DKIM for the identity by replicating from a parent identity in South America (São Paulo) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_CA_CENTRAL_1 – Configure DKIM for the identity by replicating from a parent identity in Canada (Central) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTHEAST_1 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Singapore) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTHEAST_2 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Sydney) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTHEAST_3 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Jakarta) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_CENTRAL_1 – Configure DKIM for the identity by replicating from a parent identity in Europe (Frankfurt) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_EAST_1 – Configure DKIM for the identity by replicating from a parent identity in US East (N. Virginia) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_EAST_2 – Configure DKIM for the identity by replicating from a parent identity in US East (Ohio) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_WEST_1 – Configure DKIM for the identity by replicating from a parent identity in US West (N. California) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_WEST_2 – Configure DKIM for the identity by replicating from a parent identity in US West (Oregon) region using Deterministic Easy-DKIM (DEED).

        " + "documentation":"

        The attribute to use for configuring DKIM for the identity depends on the operation:

        1. For PutEmailIdentityDkimSigningAttributes:

        2. For CreateEmailIdentity when replicating a parent identity's DKIM configuration:

          • Allowed values: All values except AWS_SES and EXTERNAL

        • AWS_SES – Configure DKIM for the identity by using Easy DKIM.

        • EXTERNAL – Configure DKIM for the identity by using Bring Your Own DKIM (BYODKIM).

        • AWS_SES_AF_SOUTH_1 – Configure DKIM for the identity by replicating from a parent identity in Africa (Cape Town) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_NORTH_1 – Configure DKIM for the identity by replicating from a parent identity in Europe (Stockholm) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTH_1 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Mumbai) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTH_2 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Hyderabad) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_WEST_3 – Configure DKIM for the identity by replicating from a parent identity in Europe (Paris) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_WEST_2 – Configure DKIM for the identity by replicating from a parent identity in Europe (London) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_SOUTH_1 – Configure DKIM for the identity by replicating from a parent identity in Europe (Milan) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_WEST_1 – Configure DKIM for the identity by replicating from a parent identity in Europe (Ireland) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_NORTHEAST_3 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Osaka) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_NORTHEAST_2 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Seoul) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_ME_CENTRAL_1 – Configure DKIM for the identity by replicating from a parent identity in Middle East (UAE) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_ME_SOUTH_1 – Configure DKIM for the identity by replicating from a parent identity in Middle East (Bahrain) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_NORTHEAST_1 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Tokyo) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_IL_CENTRAL_1 – Configure DKIM for the identity by replicating from a parent identity in Israel (Tel Aviv) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_SA_EAST_1 – Configure DKIM for the identity by replicating from a parent identity in South America (São Paulo) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_CA_CENTRAL_1 – Configure DKIM for the identity by replicating from a parent identity in Canada (Central) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTHEAST_1 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Singapore) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTHEAST_2 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Sydney) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_AP_SOUTHEAST_3 – Configure DKIM for the identity by replicating from a parent identity in Asia Pacific (Jakarta) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_CENTRAL_1 – Configure DKIM for the identity by replicating from a parent identity in Europe (Frankfurt) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_EU_CENTRAL_2 – Configure DKIM for the identity by replicating from a parent identity in Europe (Zurich) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_EAST_1 – Configure DKIM for the identity by replicating from a parent identity in US East (N. Virginia) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_EAST_2 – Configure DKIM for the identity by replicating from a parent identity in US East (Ohio) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_WEST_1 – Configure DKIM for the identity by replicating from a parent identity in US West (N. California) region using Deterministic Easy-DKIM (DEED).

        • AWS_SES_US_WEST_2 – Configure DKIM for the identity by replicating from a parent identity in US West (Oregon) region using Deterministic Easy-DKIM (DEED).

        " } }, "documentation":"

        An object that contains configuration for Bring Your Own DKIM (BYODKIM), or, for Easy DKIM

        " @@ -3135,7 +3111,10 @@ "AWS_SES_US_EAST_1", "AWS_SES_US_EAST_2", "AWS_SES_US_WEST_1", - "AWS_SES_US_WEST_2" + "AWS_SES_US_WEST_2", + "AWS_SES_ME_CENTRAL_1", + "AWS_SES_AP_SOUTH_2", + "AWS_SES_EU_CENTRAL_2" ] }, "DkimSigningKeyLength":{ @@ -3391,7 +3370,7 @@ }, "EmailTemplateName":{ "type":"string", - "documentation":"

        The name of the template. You will refer to this name when you send email using the SendTemplatedEmail or SendBulkTemplatedEmail operations.

        ", + "documentation":"

        The name of the template. You will refer to this name when you send email using the SendEmail or SendBulkEmail operations.

        ", "min":1 }, "EmailTemplateSubject":{ @@ -3457,7 +3436,7 @@ }, "MatchingEventTypes":{ "shape":"EventTypes", - "documentation":"

        The types of events that Amazon SES sends to the specified event destinations.

        • SEND - The send request was successful and SES will attempt to deliver the message to the recipient’s mail server. (If account-level or global suppression is being used, SES will still count it as a send, but delivery is suppressed.)

        • REJECT - SES accepted the email, but determined that it contained a virus and didn’t attempt to deliver it to the recipient’s mail server.

        • BOUNCE - (Hard bounce) The recipient's mail server permanently rejected the email. (Soft bounces are only included when SES fails to deliver the email after retrying for a period of time.)

        • COMPLAINT - The email was successfully delivered to the recipient’s mail server, but the recipient marked it as spam.

        • DELIVERY - SES successfully delivered the email to the recipient's mail server.

        • OPEN - The recipient received the message and opened it in their email client.

        • CLICK - The recipient clicked one or more links in the email.

        • RENDERING_FAILURE - The email wasn't sent because of a template rendering issue. This event type can occur when template data is missing, or when there is a mismatch between template parameters and data. (This event type only occurs when you send email using the SendTemplatedEmail or SendBulkTemplatedEmail API operations.)

        • DELIVERY_DELAY - The email couldn't be delivered to the recipient’s mail server because a temporary issue occurred. Delivery delays can occur, for example, when the recipient's inbox is full, or when the receiving email server experiences a transient issue.

        • SUBSCRIPTION - The email was successfully delivered, but the recipient updated their subscription preferences by clicking on an unsubscribe link as part of your subscription management.

        " + "documentation":"

        The types of events that Amazon SES sends to the specified event destinations.

        • SEND - The send request was successful and SES will attempt to deliver the message to the recipient’s mail server. (If account-level or global suppression is being used, SES will still count it as a send, but delivery is suppressed.)

        • REJECT - SES accepted the email, but determined that it contained a virus and didn’t attempt to deliver it to the recipient’s mail server.

        • BOUNCE - (Hard bounce) The recipient's mail server permanently rejected the email. (Soft bounces are only included when SES fails to deliver the email after retrying for a period of time.)

        • COMPLAINT - The email was successfully delivered to the recipient’s mail server, but the recipient marked it as spam.

        • DELIVERY - SES successfully delivered the email to the recipient's mail server.

        • OPEN - The recipient received the message and opened it in their email client.

        • CLICK - The recipient clicked one or more links in the email.

        • RENDERING_FAILURE - The email wasn't sent because of a template rendering issue. This event type can occur when template data is missing, or when there is a mismatch between template parameters and data. (This event type only occurs when you send email using the SendEmail or SendBulkEmail API operations.)

        • DELIVERY_DELAY - The email couldn't be delivered to the recipient’s mail server because a temporary issue occurred. Delivery delays can occur, for example, when the recipient's inbox is full, or when the receiving email server experiences a transient issue.

        • SUBSCRIPTION - The email was successfully delivered, but the recipient updated their subscription preferences by clicking on an unsubscribe link as part of your subscription management.

        " }, "KinesisFirehoseDestination":{ "shape":"KinesisFirehoseDestination", @@ -3693,8 +3672,7 @@ "GeneralEnforcementStatus":{"type":"string"}, "GetAccountRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A request to obtain information about the email-sending capabilities of your Amazon SES account.

        " }, "GetAccountResponse":{ @@ -4065,8 +4043,7 @@ }, "GetDeliverabilityDashboardOptionsRequest":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Retrieve information about the status of the Deliverability dashboard for your Amazon Web Services account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for your domains. You also gain the ability to perform predictive inbox placement tests.

        When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other Amazon Web Services services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

        " }, "GetDeliverabilityDashboardOptionsResponse":{ @@ -4718,8 +4695,7 @@ }, "InternalServiceErrorException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The request couldn't be processed because an error occurred with the Amazon SES API v2.

        ", "error":{"httpStatusCode":500}, "exception":true, @@ -4727,8 +4703,7 @@ }, "InvalidNextTokenException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified request includes an invalid or expired token.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -4823,8 +4798,7 @@ }, "LimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        There are too many instances of the specified resource type.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -5443,8 +5417,7 @@ }, "MailFromDomainNotVerifiedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The message can't be sent because the sending domain isn't verified.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -5611,8 +5584,7 @@ }, "MessageRejected":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The message can't be sent because it contains invalid content.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -5813,8 +5785,7 @@ }, "NotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The resource you attempted to access doesn't exist.

        ", "error":{"httpStatusCode":404}, "exception":true @@ -5926,8 +5897,7 @@ }, "PutAccountDedicatedIpWarmupAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutAccountDetailsRequest":{ @@ -5966,8 +5936,7 @@ }, "PutAccountDetailsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutAccountSendingAttributesRequest":{ @@ -5982,8 +5951,7 @@ }, "PutAccountSendingAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutAccountSuppressionAttributesRequest":{ @@ -5998,8 +5966,7 @@ }, "PutAccountSuppressionAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutAccountVdmAttributesRequest":{ @@ -6015,8 +5982,7 @@ }, "PutAccountVdmAttributesResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "PutConfigurationSetArchivingOptionsRequest":{ "type":"structure", @@ -6037,8 +6003,7 @@ }, "PutConfigurationSetArchivingOptionsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutConfigurationSetDeliveryOptionsRequest":{ @@ -6068,8 +6033,7 @@ }, "PutConfigurationSetDeliveryOptionsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutConfigurationSetReputationOptionsRequest":{ @@ -6091,8 +6055,7 @@ }, "PutConfigurationSetReputationOptionsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutConfigurationSetSendingOptionsRequest":{ @@ -6114,8 +6077,7 @@ }, "PutConfigurationSetSendingOptionsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutConfigurationSetSuppressionOptionsRequest":{ @@ -6137,8 +6099,7 @@ }, "PutConfigurationSetSuppressionOptionsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutConfigurationSetTrackingOptionsRequest":{ @@ -6161,8 +6122,7 @@ }, "PutConfigurationSetTrackingOptionsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutConfigurationSetVdmOptionsRequest":{ @@ -6184,8 +6144,7 @@ }, "PutConfigurationSetVdmOptionsResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutDedicatedIpInPoolRequest":{ @@ -6210,8 +6169,7 @@ }, "PutDedicatedIpInPoolResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutDedicatedIpPoolScalingAttributesRequest":{ @@ -6236,8 +6194,7 @@ }, "PutDedicatedIpPoolScalingAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutDedicatedIpWarmupAttributesRequest":{ @@ -6262,8 +6219,7 @@ }, "PutDedicatedIpWarmupAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutDeliverabilityDashboardOptionRequest":{ @@ -6283,8 +6239,7 @@ }, "PutDeliverabilityDashboardOptionResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A response that indicates whether the Deliverability dashboard is enabled.

        " }, "PutEmailIdentityConfigurationSetAttributesRequest":{ @@ -6306,8 +6261,7 @@ }, "PutEmailIdentityConfigurationSetAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

        " }, "PutEmailIdentityDkimAttributesRequest":{ @@ -6329,8 +6283,7 @@ }, "PutEmailIdentityDkimAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutEmailIdentityDkimSigningAttributesRequest":{ @@ -6390,8 +6343,7 @@ }, "PutEmailIdentityFeedbackAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutEmailIdentityMailFromAttributesRequest":{ @@ -6417,8 +6369,7 @@ }, "PutEmailIdentityMailFromAttributesResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "PutSuppressedDestinationRequest":{ @@ -6441,8 +6392,7 @@ }, "PutSuppressedDestinationResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "QueryErrorCode":{ @@ -6872,8 +6822,7 @@ }, "SendingPausedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The message can't be sent because the account's ability to send email is currently paused.

        ", "error":{"httpStatusCode":400}, "exception":true @@ -7082,8 +7031,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{"type":"string"}, "Template":{ @@ -7091,7 +7039,7 @@ "members":{ "TemplateName":{ "shape":"EmailTemplateName", - "documentation":"

        The name of the template. You will refer to this name when you send email using the SendTemplatedEmail or SendBulkTemplatedEmail operations.

        " + "documentation":"

        The name of the template. You will refer to this name when you send email using the SendEmail or SendBulkEmail operations.

        " }, "TemplateArn":{ "shape":"AmazonResourceName", @@ -7166,8 +7114,7 @@ }, "TooManyRequestsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Too many requests have been made to the operation.

        ", "error":{"httpStatusCode":429}, "exception":true @@ -7279,8 +7226,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateConfigurationSetEventDestinationRequest":{ "type":"structure", @@ -7311,8 +7257,7 @@ }, "UpdateConfigurationSetEventDestinationResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "UpdateContactListRequest":{ @@ -7337,8 +7282,7 @@ }, "UpdateContactListResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateContactRequest":{ "type":"structure", @@ -7375,8 +7319,7 @@ }, "UpdateContactResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateCustomVerificationEmailTemplateRequest":{ "type":"structure", @@ -7420,8 +7363,7 @@ }, "UpdateCustomVerificationEmailTemplateResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

        " }, "UpdateEmailIdentityPolicyRequest":{ @@ -7453,8 +7395,7 @@ }, "UpdateEmailIdentityPolicyResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An HTTP 200 response if the request succeeds, or an error message if the request fails.

        " }, "UpdateEmailTemplateRequest":{ @@ -7479,8 +7420,7 @@ }, "UpdateEmailTemplateResponse":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

        " }, "UseCaseDescription":{ diff --git a/services/sfn/pom.xml b/services/sfn/pom.xml index defc55c6298d..ec591cf61cee 100644 --- a/services/sfn/pom.xml +++ b/services/sfn/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sfn AWS Java SDK :: Services :: AWS Step Functions diff --git a/services/sfn/src/main/resources/codegen-resources/customization.config b/services/sfn/src/main/resources/codegen-resources/customization.config index 285e5949f9e1..0df33f6080a1 100644 --- a/services/sfn/src/main/resources/codegen-resources/customization.config +++ b/services/sfn/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ ], "serviceSpecificHttpConfig": "software.amazon.awssdk.services.sfn.internal.SfnHttpConfigurationOptions", "generateEndpointClientTests": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/shield/pom.xml b/services/shield/pom.xml index d0d089e6be6c..49994df2ad6f 100644 --- a/services/shield/pom.xml +++ b/services/shield/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT shield AWS Java SDK :: Services :: AWS Shield diff --git a/services/shield/src/main/resources/codegen-resources/customization.config b/services/shield/src/main/resources/codegen-resources/customization.config index e58954101f46..7746e3896c44 100644 --- a/services/shield/src/main/resources/codegen-resources/customization.config +++ b/services/shield/src/main/resources/codegen-resources/customization.config @@ -17,6 +17,5 @@ "deprecatedOperations": [ "DeleteSubscription" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/signer/pom.xml b/services/signer/pom.xml index c7c5293597c0..319a9f3ae4fa 100644 --- a/services/signer/pom.xml +++ b/services/signer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT signer AWS Java SDK :: Services :: Signer diff --git a/services/signer/src/main/resources/codegen-resources/customization.config b/services/signer/src/main/resources/codegen-resources/customization.config index 48ddcd092254..5c2bd87be192 100644 --- a/services/signer/src/main/resources/codegen-resources/customization.config +++ b/services/signer/src/main/resources/codegen-resources/customization.config @@ -4,6 +4,5 @@ "listSigningPlatforms", "listSigningProfiles" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/simspaceweaver/pom.xml b/services/simspaceweaver/pom.xml index 48e2716ab19a..2214b9132aee 100644 --- a/services/simspaceweaver/pom.xml +++ b/services/simspaceweaver/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT simspaceweaver AWS Java SDK :: Services :: Sim Space Weaver diff --git a/services/simspaceweaver/src/main/resources/codegen-resources/customization.config b/services/simspaceweaver/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/simspaceweaver/src/main/resources/codegen-resources/customization.config +++ b/services/simspaceweaver/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/sms/pom.xml b/services/sms/pom.xml index da21d1a53e92..eb3d5d4421e9 100644 --- a/services/sms/pom.xml +++ b/services/sms/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sms AWS Java SDK :: Services :: AWS Server Migration diff --git a/services/sms/src/main/resources/codegen-resources/customization.config b/services/sms/src/main/resources/codegen-resources/customization.config index 4f7329022910..24fda7b429ef 100644 --- a/services/sms/src/main/resources/codegen-resources/customization.config +++ b/services/sms/src/main/resources/codegen-resources/customization.config @@ -10,6 +10,5 @@ "getServers", "listApps" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/snowball/pom.xml b/services/snowball/pom.xml index 3a3f366a5672..3fbab7e4eac2 100644 --- a/services/snowball/pom.xml +++ b/services/snowball/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT snowball AWS Java SDK :: Services :: Amazon Snowball diff --git a/services/snowball/src/main/resources/codegen-resources/customization.config b/services/snowball/src/main/resources/codegen-resources/customization.config index 3751f11ab0e2..54b6acdf321e 100644 --- a/services/snowball/src/main/resources/codegen-resources/customization.config +++ b/services/snowball/src/main/resources/codegen-resources/customization.config @@ -20,6 +20,5 @@ "excludedSimpleMethods": [ "createJob" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/snowdevicemanagement/pom.xml b/services/snowdevicemanagement/pom.xml index 99e69bb6b915..1161f76ee1e9 100644 --- a/services/snowdevicemanagement/pom.xml +++ b/services/snowdevicemanagement/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT snowdevicemanagement AWS Java SDK :: Services :: Snow Device Management diff --git a/services/snowdevicemanagement/src/main/resources/codegen-resources/customization.config b/services/snowdevicemanagement/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/snowdevicemanagement/src/main/resources/codegen-resources/customization.config +++ b/services/snowdevicemanagement/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/sns/pom.xml b/services/sns/pom.xml index de0c5eb8e509..3a3e69de400c 100644 --- a/services/sns/pom.xml +++ b/services/sns/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sns AWS Java SDK :: Services :: Amazon SNS diff --git a/services/socialmessaging/pom.xml b/services/socialmessaging/pom.xml index 5fa377fa3810..c7b18018bdfc 100644 --- a/services/socialmessaging/pom.xml +++ b/services/socialmessaging/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT socialmessaging AWS Java SDK :: Services :: Social Messaging diff --git a/services/socialmessaging/src/main/resources/codegen-resources/customization.config b/services/socialmessaging/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/socialmessaging/src/main/resources/codegen-resources/customization.config +++ b/services/socialmessaging/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/sqs/pom.xml b/services/sqs/pom.xml index 9f5e8d1845a7..360082ae17ad 100644 --- a/services/sqs/pom.xml +++ b/services/sqs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sqs AWS Java SDK :: Services :: Amazon SQS diff --git a/services/sqs/src/main/resources/codegen-resources/customization.config b/services/sqs/src/main/resources/codegen-resources/customization.config index cea687a5d99d..1e0dd0cada26 100644 --- a/services/sqs/src/main/resources/codegen-resources/customization.config +++ b/services/sqs/src/main/resources/codegen-resources/customization.config @@ -12,6 +12,5 @@ } }, "enableGenerateCompiledEndpointRules": true, - "batchManagerSupported": true, - "enableFastUnmarshaller": true + "batchManagerSupported": true } diff --git a/services/ssm/pom.xml b/services/ssm/pom.xml index c0ec5876be34..cff3cc06a9a9 100644 --- a/services/ssm/pom.xml +++ b/services/ssm/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ssm AWS Java SDK :: Services :: AWS Simple Systems Management (SSM) diff --git a/services/ssm/src/main/resources/codegen-resources/customization.config b/services/ssm/src/main/resources/codegen-resources/customization.config index 5d409b7f2bcf..926c0be479ff 100644 --- a/services/ssm/src/main/resources/codegen-resources/customization.config +++ b/services/ssm/src/main/resources/codegen-resources/customization.config @@ -26,6 +26,5 @@ "listComplianceItems", "describeMaintenanceWindowSchedule" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ssm/src/main/resources/codegen-resources/service-2.json b/services/ssm/src/main/resources/codegen-resources/service-2.json index 3192fe96beb8..f08a434fc821 100644 --- a/services/ssm/src/main/resources/codegen-resources/service-2.json +++ b/services/ssm/src/main/resources/codegen-resources/service-2.json @@ -157,7 +157,8 @@ {"shape":"InternalServerError"}, {"shape":"InvalidDocumentContent"}, {"shape":"DocumentLimitExceeded"}, - {"shape":"InvalidDocumentSchemaVersion"} + {"shape":"InvalidDocumentSchemaVersion"}, + {"shape":"TooManyUpdates"} ], "documentation":"

        Creates a Amazon Web Services Systems Manager (SSM document). An SSM document defines the actions that Systems Manager performs on your managed nodes. For more information about SSM documents, including information about supported schemas, features, and syntax, see Amazon Web Services Systems Manager Documents in the Amazon Web Services Systems Manager User Guide.

        " }, @@ -286,7 +287,8 @@ {"shape":"InternalServerError"}, {"shape":"InvalidDocument"}, {"shape":"InvalidDocumentOperation"}, - {"shape":"AssociatedInstances"} + {"shape":"AssociatedInstances"}, + {"shape":"TooManyUpdates"} ], "documentation":"

        Deletes the Amazon Web Services Systems Manager document (SSM document) and all managed node associations to the document.

        Before you delete the document, we recommend that you use DeleteAssociation to disassociate all managed nodes that are associated with the document.

        " }, @@ -2184,7 +2186,8 @@ {"shape":"InternalServerError"}, {"shape":"InvalidDocument"}, {"shape":"InvalidDocumentOperation"}, - {"shape":"InvalidDocumentVersion"} + {"shape":"InvalidDocumentVersion"}, + {"shape":"TooManyUpdates"} ], "documentation":"

        Updates information related to approval reviews for a specific version of a change template in Change Manager.

        " }, @@ -2358,6 +2361,13 @@ "Pending" ] }, + "AccessType":{ + "type":"string", + "enum":[ + "Standard", + "JustInTime" + ] + }, "Account":{"type":"string"}, "AccountId":{ "type":"string", @@ -2481,8 +2491,7 @@ }, "AddTagsToResourceResult":{ "type":"structure", - "members":{ - } + "members":{} }, "AgentErrorCode":{ "type":"string", @@ -2621,8 +2630,7 @@ }, "AssociatedInstances":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You must disassociate a document from all managed nodes before you can delete it.

        ", "exception":true }, @@ -2689,8 +2697,7 @@ }, "AssociationAlreadyExists":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified association already exists.

        ", "exception":true }, @@ -3078,8 +3085,7 @@ }, "AssociationLimitExceeded":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You can have at most 2,000 active associations.

        ", "exception":true }, @@ -3982,8 +3988,7 @@ }, "CancelCommandResult":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Whether or not the command was successfully canceled. There is no guarantee that a request can be canceled.

        " }, "CancelMaintenanceWindowExecutionRequest":{ @@ -5279,8 +5284,7 @@ }, "CreateResourceDataSyncResult":{ "type":"structure", - "members":{ - } + "members":{} }, "CreatedDate":{"type":"timestamp"}, "Credentials":{ @@ -5339,8 +5343,7 @@ }, "DeleteActivationResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteAssociationRequest":{ "type":"structure", @@ -5361,8 +5364,7 @@ }, "DeleteAssociationResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDocumentRequest":{ "type":"structure", @@ -5388,8 +5390,7 @@ }, "DeleteDocumentResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteInventoryRequest":{ "type":"structure", @@ -5462,8 +5463,7 @@ }, "DeleteOpsItemResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteOpsMetadataRequest":{ "type":"structure", @@ -5477,8 +5477,7 @@ }, "DeleteOpsMetadataResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteParameterRequest":{ "type":"structure", @@ -5492,8 +5491,7 @@ }, "DeleteParameterResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteParametersRequest":{ "type":"structure", @@ -5553,8 +5551,7 @@ }, "DeleteResourceDataSyncResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteResourcePolicyRequest":{ "type":"structure", @@ -5580,8 +5577,7 @@ }, "DeleteResourcePolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeliveryTimedOutCount":{"type":"integer"}, "DeregisterManagedInstanceRequest":{ @@ -5596,8 +5592,7 @@ }, "DeregisterManagedInstanceResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeregisterPatchBaselineForPatchGroupRequest":{ "type":"structure", @@ -5972,7 +5967,7 @@ "members":{ "Name":{ "shape":"DocumentName", - "documentation":"

        The name of the document for which you are the owner.

        " + "documentation":"

        The name of the document for which you are the owner.

        " }, "PermissionType":{ "shape":"DocumentPermissionType", @@ -5994,7 +5989,7 @@ "members":{ "AccountIds":{ "shape":"AccountIdList", - "documentation":"

        The account IDs that have permission to use this document. The ID can be either an Amazon Web Services account or All.

        " + "documentation":"

        The account IDs that have permission to use this document. The ID can be either an Amazon Web Services account number or all.

        " }, "AccountSharingInfoList":{ "shape":"AccountSharingInfoList", @@ -6012,7 +6007,7 @@ "members":{ "Name":{ "shape":"DocumentARN", - "documentation":"

        The name of the SSM document.

        " + "documentation":"

        The name of the SSM document.

        If you're calling a shared SSM document from a different Amazon Web Services account, Name is the full Amazon Resource Name (ARN) of the document.

        " }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -6945,8 +6940,7 @@ }, "DisassociateOpsItemRelatedItemResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DocumentARN":{ "type":"string", @@ -7602,8 +7596,7 @@ }, "DuplicateInstanceId":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You can't specify a managed node ID in more than one association.

        ", "exception":true }, @@ -9409,7 +9402,7 @@ "members":{ "Key":{ "shape":"InstanceInformationStringFilterKey", - "documentation":"

        The filter key name to describe your managed nodes.

        Valid filter key values: ActivationIds | AgentVersion | AssociationStatus | IamRole | InstanceIds | PingStatus | PlatformTypes | ResourceType | SourceIds | SourceTypes | \"tag-key\" | \"tag:{keyname}

        • Valid values for the AssociationStatus filter key: Success | Pending | Failed

        • Valid values for the PingStatus filter key: Online | ConnectionLost | Inactive (deprecated)

        • Valid values for the PlatformType filter key: Windows | Linux | MacOS

        • Valid values for the ResourceType filter key: EC2Instance | ManagedInstance

        • Valid values for the SourceType filter key: AWS::EC2::Instance | AWS::SSM::ManagedInstance | AWS::IoT::Thing

        • Valid tag examples: Key=tag-key,Values=Purpose | Key=tag:Purpose,Values=Test.

        " + "documentation":"

        The filter key name to describe your managed nodes.

        Valid filter key values: ActivationIds | AgentVersion | AssociationStatus | IamRole | InstanceIds | PingStatus | PlatformType | ResourceType | SourceIds | SourceTypes | \"tag-key\" | \"tag:{keyname}

        • Valid values for the AssociationStatus filter key: Success | Pending | Failed

        • Valid values for the PingStatus filter key: Online | ConnectionLost | Inactive (deprecated)

        • Valid values for the PlatformType filter key: Windows | Linux | MacOS

        • Valid values for the ResourceType filter key: EC2Instance | ManagedInstance

        • Valid values for the SourceType filter key: AWS::EC2::Instance | AWS::SSM::ManagedInstance | AWS::IoT::Thing

        • Valid tag examples: Key=tag-key,Values=Purpose | Key=tag:Purpose,Values=Test.

        " }, "Values":{ "shape":"InstanceInformationFilterValueSet", @@ -9918,8 +9911,7 @@ }, "InvalidCommandId":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified command ID isn't valid. Verify the ID and try again.

        ", "exception":true }, @@ -10003,8 +9995,7 @@ }, "InvalidFilterKey":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified key isn't valid.

        ", "exception":true }, @@ -10118,15 +10109,13 @@ }, "InvalidOutputFolder":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The S3 bucket doesn't exist.

        ", "exception":true }, "InvalidOutputLocation":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The output location isn't valid or doesn't exist.

        ", "exception":true }, @@ -10148,8 +10137,7 @@ }, "InvalidPluginName":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The plugin name isn't valid.

        ", "exception":true }, @@ -10171,15 +10159,13 @@ }, "InvalidResourceId":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The resource ID isn't valid. Verify that you entered the correct ID and try again.

        ", "exception":true }, "InvalidResourceType":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The resource type isn't valid. For example, if you are attempting to tag an EC2 instance, the instance must be a registered managed node.

        ", "exception":true }, @@ -10656,8 +10642,7 @@ "InventoryTypeDisplayName":{"type":"string"}, "InvocationDoesNotExist":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The command ID and managed node ID you specified didn't match any invocations. Verify the command ID and the managed node ID and try again.

        ", "exception":true }, @@ -12234,8 +12219,7 @@ }, "ModifyDocumentPermissionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "NextToken":{"type":"string"}, "Node":{ @@ -14679,8 +14663,7 @@ }, "PutComplianceItemsResult":{ "type":"structure", - "members":{ - } + "members":{} }, "PutInventoryMessage":{"type":"string"}, "PutInventoryRequest":{ @@ -15104,8 +15087,7 @@ }, "RemoveTagsFromResourceResult":{ "type":"structure", - "members":{ - } + "members":{} }, "RequireType":{ "type":"string", @@ -15818,8 +15800,7 @@ }, "SendAutomationSignalResult":{ "type":"structure", - "members":{ - } + "members":{} }, "SendCommandRequest":{ "type":"structure", @@ -16033,6 +16014,10 @@ "MaxSessionDuration":{ "shape":"MaxSessionDuration", "documentation":"

        The maximum duration of a session before it terminates.

        " + }, + "AccessType":{ + "shape":"AccessType", + "documentation":"

        Standard access type is the default for Session Manager sessions. JustInTime is the access type for Just-in-time node access.

        " } }, "documentation":"

        Information about a Session Manager connection to a managed node.

        " @@ -16068,7 +16053,8 @@ "Target", "Owner", "Status", - "SessionId" + "SessionId", + "AccessType" ] }, "SessionFilterList":{ @@ -16295,8 +16281,7 @@ }, "StartAssociationsOnceResult":{ "type":"structure", - "members":{ - } + "members":{} }, "StartAutomationExecutionRequest":{ "type":"structure", @@ -16517,8 +16502,7 @@ "StatusName":{"type":"string"}, "StatusUnchanged":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The updated status is the same as the current status.

        ", "exception":true }, @@ -16705,8 +16689,7 @@ }, "StopAutomationExecutionResult":{ "type":"structure", - "members":{ - } + "members":{} }, "StopType":{ "type":"string", @@ -16992,8 +16975,7 @@ }, "TooManyTagsError":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The Targets parameter includes too many tags. Remove one or more tags and try the command again.

        ", "exception":true }, @@ -17298,8 +17280,7 @@ }, "UpdateDocumentMetadataResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateDocumentRequest":{ "type":"structure", @@ -17697,8 +17678,7 @@ }, "UpdateManagedInstanceRoleResult":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateOpsItemRequest":{ "type":"structure", @@ -17772,8 +17752,7 @@ }, "UpdateOpsItemResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateOpsMetadataRequest":{ "type":"structure", @@ -17952,8 +17931,7 @@ }, "UpdateResourceDataSyncResult":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateServiceSettingRequest":{ "type":"structure", @@ -17975,8 +17953,7 @@ }, "UpdateServiceSettingResult":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The result body of the UpdateServiceSetting API operation.

        " }, "Url":{"type":"string"}, @@ -18006,5 +17983,5 @@ "pattern":"^[0-9]{1,6}(\\.[0-9]{1,6}){2,3}$" } }, - "documentation":"

        Amazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure end-to-end management solution for hybrid cloud environments that enables safe and secure operations at scale.

        This reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up Amazon Web Services Systems Manager.

        Related resources

        " + "documentation":"

        Amazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure end-to-end management solution for hybrid cloud environments that enables safe and secure operations at scale.

        This reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up Amazon Web Services Systems Manager.

        Related resources

        " } diff --git a/services/ssmcontacts/pom.xml b/services/ssmcontacts/pom.xml index 44061c3b5ca4..3c318e50604c 100644 --- a/services/ssmcontacts/pom.xml +++ b/services/ssmcontacts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ssmcontacts AWS Java SDK :: Services :: SSM Contacts diff --git a/services/ssmcontacts/src/main/resources/codegen-resources/customization.config b/services/ssmcontacts/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/ssmcontacts/src/main/resources/codegen-resources/customization.config +++ b/services/ssmcontacts/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ssmguiconnect/pom.xml b/services/ssmguiconnect/pom.xml index c1a417a38968..b35589f3b6d9 100644 --- a/services/ssmguiconnect/pom.xml +++ b/services/ssmguiconnect/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ssmguiconnect AWS Java SDK :: Services :: SSM Gui Connect diff --git a/services/ssmincidents/pom.xml b/services/ssmincidents/pom.xml index 199d78362a62..485f6e92b4c4 100644 --- a/services/ssmincidents/pom.xml +++ b/services/ssmincidents/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ssmincidents AWS Java SDK :: Services :: SSM Incidents diff --git a/services/ssmincidents/src/main/resources/codegen-resources/customization.config b/services/ssmincidents/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/ssmincidents/src/main/resources/codegen-resources/customization.config +++ b/services/ssmincidents/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ssmquicksetup/pom.xml b/services/ssmquicksetup/pom.xml index 89a6d6bd6f34..82526839dd1f 100644 --- a/services/ssmquicksetup/pom.xml +++ b/services/ssmquicksetup/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ssmquicksetup AWS Java SDK :: Services :: SSM Quick Setup diff --git a/services/ssmquicksetup/src/main/resources/codegen-resources/customization.config b/services/ssmquicksetup/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/ssmquicksetup/src/main/resources/codegen-resources/customization.config +++ b/services/ssmquicksetup/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/ssmsap/pom.xml b/services/ssmsap/pom.xml index 703131223893..691d96faf54a 100644 --- a/services/ssmsap/pom.xml +++ b/services/ssmsap/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ssmsap AWS Java SDK :: Services :: Ssm Sap diff --git a/services/ssmsap/src/main/resources/codegen-resources/customization.config b/services/ssmsap/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/ssmsap/src/main/resources/codegen-resources/customization.config +++ b/services/ssmsap/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/sso/pom.xml b/services/sso/pom.xml index dd689a4060ed..28d40e9db206 100644 --- a/services/sso/pom.xml +++ b/services/sso/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sso AWS Java SDK :: Services :: SSO diff --git a/services/sso/src/main/resources/codegen-resources/customization.config b/services/sso/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/sso/src/main/resources/codegen-resources/customization.config +++ b/services/sso/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ssoadmin/pom.xml b/services/ssoadmin/pom.xml index 1710f8fe0e52..8a50f7203bfd 100644 --- a/services/ssoadmin/pom.xml +++ b/services/ssoadmin/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ssoadmin AWS Java SDK :: Services :: SSO Admin diff --git a/services/ssoadmin/src/main/resources/codegen-resources/customization.config b/services/ssoadmin/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/ssoadmin/src/main/resources/codegen-resources/customization.config +++ b/services/ssoadmin/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/ssooidc/pom.xml b/services/ssooidc/pom.xml index 6bfcc4a9a7c4..68b612e88e42 100644 --- a/services/ssooidc/pom.xml +++ b/services/ssooidc/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ssooidc AWS Java SDK :: Services :: SSO OIDC diff --git a/services/ssooidc/src/main/resources/codegen-resources/customization.config b/services/ssooidc/src/main/resources/codegen-resources/customization.config index 7bf5ca8a7d7d..f0d68875fcfa 100644 --- a/services/ssooidc/src/main/resources/codegen-resources/customization.config +++ b/services/ssooidc/src/main/resources/codegen-resources/customization.config @@ -132,6 +132,5 @@ } ] } - }, - "enableFastUnmarshaller": true + } } \ No newline at end of file diff --git a/services/storagegateway/pom.xml b/services/storagegateway/pom.xml index 73252557c878..ba165f38c070 100644 --- a/services/storagegateway/pom.xml +++ b/services/storagegateway/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT storagegateway AWS Java SDK :: Services :: AWS Storage Gateway diff --git a/services/storagegateway/src/main/resources/codegen-resources/customization.config b/services/storagegateway/src/main/resources/codegen-resources/customization.config index de1c486b6e85..2383d1862b10 100644 --- a/services/storagegateway/src/main/resources/codegen-resources/customization.config +++ b/services/storagegateway/src/main/resources/codegen-resources/customization.config @@ -18,6 +18,5 @@ ] } }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/storagegateway/src/main/resources/codegen-resources/service-2.json b/services/storagegateway/src/main/resources/codegen-resources/service-2.json index 2f98e4fa7cbe..8ada1dd5a92c 100644 --- a/services/storagegateway/src/main/resources/codegen-resources/service-2.json +++ b/services/storagegateway/src/main/resources/codegen-resources/service-2.json @@ -1134,7 +1134,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

        Starts generating a report of the file metadata currently cached by an S3 File Gateway for a specific file share. You can use this report to identify and resolve issues if you have files failing upload from your gateway to Amazon S3. The report is a CSV file containing a list of files which match the set of filter parameters you specify in the request.

        The Files Failing Upload flag is reset every 24 hours and during gateway reboot. If this report captures the files after the reset, but before they become flagged again, they will not be reported as Files Failing Upload.

        The following requirements must be met to successfully generate a cache report:

        • You must have permissions to list the entire Amazon S3 bucket associated with the specified file share.

        • No other cache reports can currently be in-progress for the specified file share.

        • There must be fewer than 10 existing cache reports for the specified file share.

        • The gateway must be online and connected to Amazon Web Services.

        • The root disk must have at least 20GB of free space when report generation starts.

        • You must specify at least one value for InclusionFilters or ExclusionFilters in the request.

        " + "documentation":"

        Starts generating a report of the file metadata currently cached by an S3 File Gateway for a specific file share. You can use this report to identify and resolve issues if you have files failing upload from your gateway to Amazon S3. The report is a CSV file containing a list of files which match the set of filter parameters you specify in the request.

        The Files Failing Upload flag is reset every 24 hours and during gateway reboot. If this report captures the files after the reset, but before they become flagged again, they will not be reported as Files Failing Upload.

        The following requirements must be met to successfully generate a cache report:

        • You must have s3:PutObject and s3:AbortMultipartUpload permissions for the Amazon S3 bucket where you want to store the cache report.

        • No other cache reports can currently be in-progress for the specified file share.

        • There must be fewer than 10 existing cache reports for the specified file share.

        • The gateway must be online and connected to Amazon Web Services.

        • The root disk must have at least 20GB of free space when report generation starts.

        • You must specify at least one value for InclusionFilters or ExclusionFilters in the request.

        " }, "StartGateway":{ "name":"StartGateway", @@ -1624,7 +1624,7 @@ }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

        The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

        Valid Values: A valid IP address.

        " + "documentation":"

        The network interface of the gateway on which to expose the iSCSI target. Accepts IPv4 and IPv6 addresses. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

        Valid Values: A valid IP address.

        " }, "DiskId":{ "shape":"DiskId", @@ -2112,7 +2112,7 @@ }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

        The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

        Valid Values: A valid IP address.

        " + "documentation":"

        The network interface of the gateway on which to expose the iSCSI target. Accepts IPv4 and IPv6 addresses. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

        Valid Values: A valid IP address.

        " }, "ClientToken":{ "shape":"ClientToken", @@ -2198,7 +2198,7 @@ }, "ClientList":{ "shape":"FileShareClientList", - "documentation":"

        The list of clients that are allowed to access the S3 File Gateway. The list must contain either valid IP addresses or valid CIDR blocks.

        " + "documentation":"

        The list of clients that are allowed to access the S3 File Gateway. The list must contain either valid IPv4/IPv6 addresses or valid CIDR blocks.

        " }, "Squash":{ "shape":"Squash", @@ -2492,7 +2492,7 @@ }, "NetworkInterfaceId":{ "shape":"NetworkInterfaceId", - "documentation":"

        The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

        Valid Values: A valid IP address.

        " + "documentation":"

        The network interface of the gateway on which to expose the iSCSI target. Accepts IPv4 and IPv6 addresses. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.

        Valid Values: A valid IP address.

        " }, "KMSEncrypted":{ "shape":"Boolean", @@ -3899,8 +3899,8 @@ }, "FileShareClientList":{ "type":"list", - "member":{"shape":"IPV4AddressCIDR"}, - "documentation":"

        The list of clients that are allowed to access the S3 File Gateway. The list must contain either valid IP addresses or valid CIDR blocks.

        ", + "member":{"shape":"Ipv4OrIpv6AddressCIDR"}, + "documentation":"

        The list of clients that are allowed to access the S3 File Gateway. The list must contain either valid IPv4/IPv6 addresses or valid CIDR blocks.

        ", "max":100, "min":1 }, @@ -4166,8 +4166,8 @@ "Host":{ "type":"string", "max":1024, - "min":6, - "pattern":"^(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9])(:(\\d+))?$" + "min":2, + "pattern":"^(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9])(:(\\d+))?$|^(?:\\[(?:(?:(?:[A-Fa-f0-9]{1,4}:){6}|(?=(?:[A-Fa-f0-9]{0,4}:){0,6}(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?![:.\\w]))(?:(?:[0-9A-Fa-f]{1,4}:){0,5}|:)(?:(?::[0-9A-Fa-f]{1,4}){1,5}:|:)|::(?:[A-Fa-f0-9]{1,4}:){5})(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|(?:[A-Fa-f0-9]{1,4}:){7}[A-Fa-f0-9]{1,4}|(?=(?:[A-Fa-f0-9]{0,4}:){0,7}[A-Fa-f0-9]{0,4}(?![:.\\w]))(?:(?:[0-9A-Fa-f]{1,4}:){1,7}|:)(?:(:[0-9A-Fa-f]{1,4}){1,7}|:)|(?:[A-Fa-f0-9]{1,4}:){7}:|:(:[A-Fa-f0-9]{1,4}){7})\\]:\\d+$|^(?:(?:(?:[A-Fa-f0-9]{1,4}:){6}|(?=(?:[A-Fa-f0-9]{0,4}:){0,6}(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?![:.\\w]))(?:(?:[0-9A-Fa-f]{1,4}:){0,5}|:)(?:(?::[0-9A-Fa-f]{1,4}){1,5}:|:)|::(?:[A-Fa-f0-9]{1,4}:){5})(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|(?:[A-Fa-f0-9]{1,4}:){7}[A-Fa-f0-9]{1,4}|(?=(?:[A-Fa-f0-9]{0,4}:){0,7}[A-Fa-f0-9]{0,4}(?![:.\\w]))(?:(?:[0-9A-Fa-f]{1,4}:){1,7}|:)(?:(:[0-9A-Fa-f]{1,4}){1,7}|:)|(?:[A-Fa-f0-9]{1,4}:){7}:|:(:[A-Fa-f0-9]{1,4}){7})$)" }, "HostEnvironment":{ "type":"string", @@ -4200,10 +4200,6 @@ "min":7, "pattern":"^((25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])(\\.(?!$)|$)){4}" }, - "IPV4AddressCIDR":{ - "type":"string", - "pattern":"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\/([0-9]|[1-2][0-9]|3[0-2]))?$" - }, "Initiator":{ "type":"string", "max":50, @@ -4249,6 +4245,10 @@ "max":1, "min":0 }, + "Ipv4OrIpv6AddressCIDR":{ + "type":"string", + "pattern":"^(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(?:\\/(?:[0-9]|[1-2][0-9]|3[0-2]))?$|^(?:(?:(?:[A-Fa-f0-9]{1,4}:){6}|(?=(?:[A-Fa-f0-9]{0,4}:){0,6}(?:[0-9]{1,3}\\.){3}[0-9]{1,3}(?![:.\\w]))(?:(?:[0-9A-Fa-f]{1,4}:){0,5}|:)(?:(?::[0-9A-Fa-f]{1,4}){1,5}:|:)|::(?:[A-Fa-f0-9]{1,4}:){5})(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|(?:[A-Fa-f0-9]{1,4}:){7}[A-Fa-f0-9]{1,4}|(?=(?:[A-Fa-f0-9]{0,4}:){0,7}[A-Fa-f0-9]{0,4}(?![:.\\w]))(?:(?:[0-9A-Fa-f]{1,4}:){1,7}|:)(?:(:[0-9A-Fa-f]{1,4}){1,7}|:)|(?:[A-Fa-f0-9]{1,4}:){7}:|:(:[A-Fa-f0-9]{1,4}){7})(?:\\/(?:12[0-8]|1[01][0-9]|[1-9]?[0-9]))?$" + }, "IqnName":{ "type":"string", "max":255, @@ -4278,7 +4278,7 @@ }, "DomainControllers":{ "shape":"Hosts", - "documentation":"

        List of IPv4 addresses, NetBIOS names, or host names of your domain server. If you need to specify the port number include it after the colon (“:”). For example, mydc.mydomain.com:389.

        " + "documentation":"

        List of IP addresses, NetBIOS names, or host names of your domain server. If you need to specify the port number include it after the colon (“:”). For example, mydc.mydomain.com:389.

        S3 File Gateway supports IPv6 addresses in addition to IPv4 and other existing formats.

        FSx File Gateway does not support IPv6.

        " }, "TimeoutInSeconds":{ "shape":"TimeoutInSeconds", @@ -4775,16 +4775,13 @@ }, "Ipv6Address":{ "shape":"string", - "documentation":"

        The Internet Protocol version 6 (IPv6) address of the interface. Currently not supported.

        " + "documentation":"

        The Internet Protocol version 6 (IPv6) address of the interface.

        This element returns IPv6 addresses for all gateway types except FSx File Gateway.

        " } }, "documentation":"

        Describes a gateway's network interface.

        ", "sensitive":true }, - "NetworkInterfaceId":{ - "type":"string", - "pattern":"\\A(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}\\z" - }, + "NetworkInterfaceId":{"type":"string"}, "NextUpdateAvailabilityDate":{ "type":"string", "max":25, @@ -6023,7 +6020,7 @@ }, "ClientList":{ "shape":"FileShareClientList", - "documentation":"

        The list of clients that are allowed to access the S3 File Gateway. The list must contain either valid IP addresses or valid CIDR blocks.

        " + "documentation":"

        The list of clients that are allowed to access the S3 File Gateway. The list must contain either valid IPv4/IPv6 addresses or valid CIDR blocks.

        " }, "Squash":{ "shape":"Squash", diff --git a/services/sts/pom.xml b/services/sts/pom.xml index 2b6fae6307a8..ec6e981ae18d 100644 --- a/services/sts/pom.xml +++ b/services/sts/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT sts AWS Java SDK :: Services :: AWS STS diff --git a/services/sts/src/main/resources/codegen-resources/paginators-1.json b/services/sts/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..ea142457a6a7 100644 --- a/services/sts/src/main/resources/codegen-resources/paginators-1.json +++ b/services/sts/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,3 @@ { - "pagination": { - } + "pagination": {} } diff --git a/services/sts/src/main/resources/codegen-resources/service-2.json b/services/sts/src/main/resources/codegen-resources/service-2.json index a3fabadccb89..a91e98c4af33 100644 --- a/services/sts/src/main/resources/codegen-resources/service-2.json +++ b/services/sts/src/main/resources/codegen-resources/service-2.json @@ -53,7 +53,9 @@ {"shape":"ExpiredTokenException"}, {"shape":"RegionDisabledException"} ], - "documentation":"

        Returns a set of temporary security credentials for users who have been authenticated via a SAML authentication response. This operation provides a mechanism for tying an enterprise identity store or directory to role-based Amazon Web Services access without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML with the other API operations that produce temporary credentials, see Requesting Temporary Security Credentials and Compare STS credentials in the IAM User Guide.

        The temporary security credentials returned by this operation consist of an access key ID, a secret access key, and a security token. Applications can use these temporary security credentials to sign calls to Amazon Web Services services.

        Session Duration

        By default, the temporary security credentials created by AssumeRoleWithSAML last for one hour. However, you can use the optional DurationSeconds parameter to specify the duration of your session. Your role session lasts for the duration that you specify, or until the time specified in the SAML authentication response's SessionNotOnOrAfter value, whichever is shorter. You can provide a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session duration setting for the role. This setting can have a value from 1 hour to 12 hours. To learn how to view the maximum value for your role, see View the Maximum Session Duration Setting for a Role in the IAM User Guide. The maximum session duration limit applies when you use the AssumeRole* API operations or the assume-role* CLI commands. However the limit does not apply when you use those operations to create a console URL. For more information, see Using IAM Roles in the IAM User Guide.

        Role chaining limits your CLI or Amazon Web Services API role session to a maximum of one hour. When you use the AssumeRole API operation to assume a role, you can specify the duration of your role session with the DurationSeconds parameter. You can specify a parameter value of up to 43200 seconds (12 hours), depending on the maximum session duration setting for your role. However, if you assume a role using role chaining and provide a DurationSeconds parameter value greater than one hour, the operation fails.

        Permissions

        The temporary security credentials created by AssumeRoleWithSAML can be used to make API calls to any Amazon Web Services service with the following exception: you cannot call the STS GetFederationToken or GetSessionToken API operations.

        (Optional) You can pass inline or managed session policies to this operation. You can pass a single JSON policy document to use as an inline session policy. You can also specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed session policies. The plaintext that you use for both inline and managed session policies can't exceed 2,048 characters. Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent Amazon Web Services API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

        Calling AssumeRoleWithSAML does not require the use of Amazon Web Services security credentials. The identity of the caller is validated by using keys in the metadata document that is uploaded for the SAML provider entity for your identity provider.

        Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The entry includes the value in the NameID element of the SAML assertion. We recommend that you use a NameIDType that is not associated with any personally identifiable information (PII). For example, you could instead use the persistent identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).

        Tags

        (Optional) You can configure your IdP to pass attributes into your SAML assertion as session tags. Each session tag consists of a key name and an associated value. For more information about session tags, see Passing Session Tags in STS in the IAM User Guide.

        You can pass up to 50 session tags. The plaintext session tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. For these and additional limits, see IAM and STS Character Limits in the IAM User Guide.

        An Amazon Web Services conversion compresses the passed inline session policy, managed policy ARNs, and session tags into a packed binary format that has a separate limit. Your request can fail for this limit even if your plaintext meets the other requirements. The PackedPolicySize response element indicates by percentage how close the policies and tags for your request are to the upper size limit.

        You can pass a session tag with the same key as a tag that is attached to the role. When you do, session tags override the role's tags with the same key.

        An administrator must grant you the permissions necessary to pass session tags. The administrator can also create granular permissions to allow you to pass only specific session tags. For more information, see Tutorial: Using Tags for Attribute-Based Access Control in the IAM User Guide.

        You can set the session tags as transitive. Transitive tags persist during role chaining. For more information, see Chaining Roles with Session Tags in the IAM User Guide.

        SAML Configuration

        Before your application can call AssumeRoleWithSAML, you must configure your SAML identity provider (IdP) to issue the claims required by Amazon Web Services. Additionally, you must use Identity and Access Management (IAM) to create a SAML provider entity in your Amazon Web Services account that represents your identity provider. You must also create an IAM role that specifies this SAML provider in its trust policy.

        For more information, see the following resources:

        " + "documentation":"

        Returns a set of temporary security credentials for users who have been authenticated via a SAML authentication response. This operation provides a mechanism for tying an enterprise identity store or directory to role-based Amazon Web Services access without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML with the other API operations that produce temporary credentials, see Requesting Temporary Security Credentials and Compare STS credentials in the IAM User Guide.

        The temporary security credentials returned by this operation consist of an access key ID, a secret access key, and a security token. Applications can use these temporary security credentials to sign calls to Amazon Web Services services.

        Session Duration

        By default, the temporary security credentials created by AssumeRoleWithSAML last for one hour. However, you can use the optional DurationSeconds parameter to specify the duration of your session. Your role session lasts for the duration that you specify, or until the time specified in the SAML authentication response's SessionNotOnOrAfter value, whichever is shorter. You can provide a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session duration setting for the role. This setting can have a value from 1 hour to 12 hours. To learn how to view the maximum value for your role, see View the Maximum Session Duration Setting for a Role in the IAM User Guide. The maximum session duration limit applies when you use the AssumeRole* API operations or the assume-role* CLI commands. However the limit does not apply when you use those operations to create a console URL. For more information, see Using IAM Roles in the IAM User Guide.

        Role chaining limits your CLI or Amazon Web Services API role session to a maximum of one hour. When you use the AssumeRole API operation to assume a role, you can specify the duration of your role session with the DurationSeconds parameter. You can specify a parameter value of up to 43200 seconds (12 hours), depending on the maximum session duration setting for your role. However, if you assume a role using role chaining and provide a DurationSeconds parameter value greater than one hour, the operation fails.

        Permissions

        The temporary security credentials created by AssumeRoleWithSAML can be used to make API calls to any Amazon Web Services service with the following exception: you cannot call the STS GetFederationToken or GetSessionToken API operations.

        (Optional) You can pass inline or managed session policies to this operation. You can pass a single JSON policy document to use as an inline session policy. You can also specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed session policies. The plaintext that you use for both inline and managed session policies can't exceed 2,048 characters. Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent Amazon Web Services API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

        Calling AssumeRoleWithSAML does not require the use of Amazon Web Services security credentials. The identity of the caller is validated by using keys in the metadata document that is uploaded for the SAML provider entity for your identity provider.

        Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The entry includes the value in the NameID element of the SAML assertion. We recommend that you use a NameIDType that is not associated with any personally identifiable information (PII). For example, you could instead use the persistent identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).

        Tags

        (Optional) You can configure your IdP to pass attributes into your SAML assertion as session tags. Each session tag consists of a key name and an associated value. For more information about session tags, see Passing Session Tags in STS in the IAM User Guide.

        You can pass up to 50 session tags. The plaintext session tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. For these and additional limits, see IAM and STS Character Limits in the IAM User Guide.

        An Amazon Web Services conversion compresses the passed inline session policy, managed policy ARNs, and session tags into a packed binary format that has a separate limit. Your request can fail for this limit even if your plaintext meets the other requirements. The PackedPolicySize response element indicates by percentage how close the policies and tags for your request are to the upper size limit.

        You can pass a session tag with the same key as a tag that is attached to the role. When you do, session tags override the role's tags with the same key.

        An administrator must grant you the permissions necessary to pass session tags. The administrator can also create granular permissions to allow you to pass only specific session tags. For more information, see Tutorial: Using Tags for Attribute-Based Access Control in the IAM User Guide.

        You can set the session tags as transitive. Transitive tags persist during role chaining. For more information, see Chaining Roles with Session Tags in the IAM User Guide.

        SAML Configuration

        Before your application can call AssumeRoleWithSAML, you must configure your SAML identity provider (IdP) to issue the claims required by Amazon Web Services. Additionally, you must use Identity and Access Management (IAM) to create a SAML provider entity in your Amazon Web Services account that represents your identity provider. You must also create an IAM role that specifies this SAML provider in its trust policy.

        For more information, see the following resources:

        ", + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "AssumeRoleWithWebIdentity":{ "name":"AssumeRoleWithWebIdentity", @@ -75,7 +77,9 @@ {"shape":"ExpiredTokenException"}, {"shape":"RegionDisabledException"} ], - "documentation":"

        Returns a set of temporary security credentials for users who have been authenticated in a mobile or web application with a web identity provider. Example providers include the OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID Connect-compatible identity provider such as Google or Amazon Cognito federated identities.

        For mobile applications, we recommend that you use Amazon Cognito. You can use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide and the Amazon Web Services SDK for Android Developer Guide to uniquely identify a user. You can also supply the user with a consistent identity throughout the lifetime of an application.

        To learn more about Amazon Cognito, see Amazon Cognito identity pools in Amazon Cognito Developer Guide.

        Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web Services security credentials. Therefore, you can distribute an application (for example, on mobile devices) that requests temporary security credentials without including long-term Amazon Web Services credentials in the application. You also don't need to deploy server-based proxy services that use long-term Amazon Web Services credentials. Instead, the identity of the caller is validated by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity with the other API operations that produce temporary credentials, see Requesting Temporary Security Credentials and Compare STS credentials in the IAM User Guide.

        The temporary security credentials returned by this API consist of an access key ID, a secret access key, and a security token. Applications can use these temporary security credentials to sign calls to Amazon Web Services service API operations.

        Session Duration

        By default, the temporary security credentials created by AssumeRoleWithWebIdentity last for one hour. However, you can use the optional DurationSeconds parameter to specify the duration of your session. You can provide a value from 900 seconds (15 minutes) up to the maximum session duration setting for the role. This setting can have a value from 1 hour to 12 hours. To learn how to view the maximum value for your role, see Update the maximum session duration for a role in the IAM User Guide. The maximum session duration limit applies when you use the AssumeRole* API operations or the assume-role* CLI commands. However the limit does not apply when you use those operations to create a console URL. For more information, see Using IAM Roles in the IAM User Guide.

        Permissions

        The temporary security credentials created by AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web Services service with the following exception: you cannot call the STS GetFederationToken or GetSessionToken API operations.

        (Optional) You can pass inline or managed session policies to this operation. You can pass a single JSON policy document to use as an inline session policy. You can also specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed session policies. The plaintext that you use for both inline and managed session policies can't exceed 2,048 characters. Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent Amazon Web Services API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

        Tags

        (Optional) You can configure your IdP to pass attributes into your web identity token as session tags. Each session tag consists of a key name and an associated value. For more information about session tags, see Passing Session Tags in STS in the IAM User Guide.

        You can pass up to 50 session tags. The plaintext session tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. For these and additional limits, see IAM and STS Character Limits in the IAM User Guide.

        An Amazon Web Services conversion compresses the passed inline session policy, managed policy ARNs, and session tags into a packed binary format that has a separate limit. Your request can fail for this limit even if your plaintext meets the other requirements. The PackedPolicySize response element indicates by percentage how close the policies and tags for your request are to the upper size limit.

        You can pass a session tag with the same key as a tag that is attached to the role. When you do, the session tag overrides the role tag with the same key.

        An administrator must grant you the permissions necessary to pass session tags. The administrator can also create granular permissions to allow you to pass only specific session tags. For more information, see Tutorial: Using Tags for Attribute-Based Access Control in the IAM User Guide.

        You can set the session tags as transitive. Transitive tags persist during role chaining. For more information, see Chaining Roles with Session Tags in the IAM User Guide.

        Identities

        Before your application can call AssumeRoleWithWebIdentity, you must have an identity token from a supported identity provider and create a role that the application can assume. The role that your application assumes must trust the identity provider that is associated with the identity token. In other words, the identity provider must be specified in the role's trust policy.

        Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail logs. The entry includes the Subject of the provided web identity token. We recommend that you avoid using any personally identifiable information (PII) in this field. For example, you could instead use a GUID or a pairwise identifier, as suggested in the OIDC specification.

        For more information about how to use OIDC federation and the AssumeRoleWithWebIdentity API, see the following resources:

        " + "documentation":"

        Returns a set of temporary security credentials for users who have been authenticated in a mobile or web application with a web identity provider. Example providers include the OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID Connect-compatible identity provider such as Google or Amazon Cognito federated identities.

        For mobile applications, we recommend that you use Amazon Cognito. You can use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide and the Amazon Web Services SDK for Android Developer Guide to uniquely identify a user. You can also supply the user with a consistent identity throughout the lifetime of an application.

        To learn more about Amazon Cognito, see Amazon Cognito identity pools in Amazon Cognito Developer Guide.

        Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web Services security credentials. Therefore, you can distribute an application (for example, on mobile devices) that requests temporary security credentials without including long-term Amazon Web Services credentials in the application. You also don't need to deploy server-based proxy services that use long-term Amazon Web Services credentials. Instead, the identity of the caller is validated by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity with the other API operations that produce temporary credentials, see Requesting Temporary Security Credentials and Compare STS credentials in the IAM User Guide.

        The temporary security credentials returned by this API consist of an access key ID, a secret access key, and a security token. Applications can use these temporary security credentials to sign calls to Amazon Web Services service API operations.

        Session Duration

        By default, the temporary security credentials created by AssumeRoleWithWebIdentity last for one hour. However, you can use the optional DurationSeconds parameter to specify the duration of your session. You can provide a value from 900 seconds (15 minutes) up to the maximum session duration setting for the role. This setting can have a value from 1 hour to 12 hours. To learn how to view the maximum value for your role, see Update the maximum session duration for a role in the IAM User Guide. The maximum session duration limit applies when you use the AssumeRole* API operations or the assume-role* CLI commands. However the limit does not apply when you use those operations to create a console URL. For more information, see Using IAM Roles in the IAM User Guide.

        Permissions

        The temporary security credentials created by AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web Services service with the following exception: you cannot call the STS GetFederationToken or GetSessionToken API operations.

        (Optional) You can pass inline or managed session policies to this operation. You can pass a single JSON policy document to use as an inline session policy. You can also specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed session policies. The plaintext that you use for both inline and managed session policies can't exceed 2,048 characters. Passing policies to this operation returns new temporary credentials. The resulting session's permissions are the intersection of the role's identity-based policy and the session policies. You can use the role's temporary credentials in subsequent Amazon Web Services API calls to access resources in the account that owns the role. You cannot use session policies to grant more permissions than those allowed by the identity-based policy of the role that is being assumed. For more information, see Session Policies in the IAM User Guide.

        Tags

        (Optional) You can configure your IdP to pass attributes into your web identity token as session tags. Each session tag consists of a key name and an associated value. For more information about session tags, see Passing Session Tags in STS in the IAM User Guide.

        You can pass up to 50 session tags. The plaintext session tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. For these and additional limits, see IAM and STS Character Limits in the IAM User Guide.

        An Amazon Web Services conversion compresses the passed inline session policy, managed policy ARNs, and session tags into a packed binary format that has a separate limit. Your request can fail for this limit even if your plaintext meets the other requirements. The PackedPolicySize response element indicates by percentage how close the policies and tags for your request are to the upper size limit.

        You can pass a session tag with the same key as a tag that is attached to the role. When you do, the session tag overrides the role tag with the same key.

        An administrator must grant you the permissions necessary to pass session tags. The administrator can also create granular permissions to allow you to pass only specific session tags. For more information, see Tutorial: Using Tags for Attribute-Based Access Control in the IAM User Guide.

        You can set the session tags as transitive. Transitive tags persist during role chaining. For more information, see Chaining Roles with Session Tags in the IAM User Guide.

        Identities

        Before your application can call AssumeRoleWithWebIdentity, you must have an identity token from a supported identity provider and create a role that the application can assume. The role that your application assumes must trust the identity provider that is associated with the identity token. In other words, the identity provider must be specified in the role's trust policy.

        Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail logs. The entry includes the Subject of the provided web identity token. We recommend that you avoid using any personally identifiable information (PII) in this field. For example, you could instead use a GUID or a pairwise identifier, as suggested in the OIDC specification.

        For more information about how to use OIDC federation and the AssumeRoleWithWebIdentity API, see the following resources:

        ", + "authtype":"none", + "auth":["smithy.api#noAuth"] }, "AssumeRoot":{ "name":"AssumeRoot", @@ -552,8 +556,7 @@ }, "GetCallerIdentityRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetCallerIdentityResponse":{ "type":"structure", diff --git a/services/supplychain/pom.xml b/services/supplychain/pom.xml index e8a674df3f4d..0bfcaefb2f74 100644 --- a/services/supplychain/pom.xml +++ b/services/supplychain/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT supplychain AWS Java SDK :: Services :: Supply Chain diff --git a/services/supplychain/src/main/resources/codegen-resources/customization.config b/services/supplychain/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/supplychain/src/main/resources/codegen-resources/customization.config +++ b/services/supplychain/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/supplychain/src/main/resources/codegen-resources/paginators-1.json b/services/supplychain/src/main/resources/codegen-resources/paginators-1.json index 8ca1db6b5421..6f4bb39d7a67 100644 --- a/services/supplychain/src/main/resources/codegen-resources/paginators-1.json +++ b/services/supplychain/src/main/resources/codegen-resources/paginators-1.json @@ -1,5 +1,17 @@ { "pagination": { + "ListDataIntegrationEvents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "events" + }, + "ListDataIntegrationFlowExecutions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "flowExecutions" + }, "ListDataIntegrationFlows": { "input_token": "nextToken", "output_token": "nextToken", @@ -12,6 +24,12 @@ "limit_key": "maxResults", "result_key": "datasets" }, + "ListDataLakeNamespaces": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "namespaces" + }, "ListInstances": { "input_token": "nextToken", "output_token": "nextToken", diff --git a/services/supplychain/src/main/resources/codegen-resources/service-2.json b/services/supplychain/src/main/resources/codegen-resources/service-2.json index af66f568f0e1..c87dd1fe9e39 100644 --- a/services/supplychain/src/main/resources/codegen-resources/service-2.json +++ b/services/supplychain/src/main/resources/codegen-resources/service-2.json @@ -66,8 +66,8 @@ "output":{"shape":"CreateDataLakeDatasetResponse"}, "errors":[ {"shape":"ServiceQuotaExceededException"}, - {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"InternalServerException"}, @@ -76,6 +76,27 @@ "documentation":"

        Enables you to programmatically create an Amazon Web Services Supply Chain data lake dataset. Developers can create the datasets using their pre-defined or custom schema for a given instance ID, namespace, and dataset name.

        ", "idempotent":true }, + "CreateDataLakeNamespace":{ + "name":"CreateDataLakeNamespace", + "http":{ + "method":"PUT", + "requestUri":"/api/datalake/instance/{instanceId}/namespaces/{name}", + "responseCode":200 + }, + "input":{"shape":"CreateDataLakeNamespaceRequest"}, + "output":{"shape":"CreateDataLakeNamespaceResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Enables you to programmatically create an Amazon Web Services Supply Chain data lake namespace. Developers can create the namespaces for a given instance ID.

        ", + "idempotent":true + }, "CreateInstance":{ "name":"CreateInstance", "http":{ @@ -139,6 +160,27 @@ "documentation":"

        Enables you to programmatically delete an Amazon Web Services Supply Chain data lake dataset. Developers can delete the existing datasets for a given instance ID, namespace, and instance name.

        ", "idempotent":true }, + "DeleteDataLakeNamespace":{ + "name":"DeleteDataLakeNamespace", + "http":{ + "method":"DELETE", + "requestUri":"/api/datalake/instance/{instanceId}/namespaces/{name}", + "responseCode":200 + }, + "input":{"shape":"DeleteDataLakeNamespaceRequest"}, + "output":{"shape":"DeleteDataLakeNamespaceResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Enables you to programmatically delete an Amazon Web Services Supply Chain data lake namespace and its underling datasets. Developers can delete the existing namespaces for a given instance ID and namespace name.

        ", + "idempotent":true + }, "DeleteInstance":{ "name":"DeleteInstance", "http":{ @@ -180,6 +222,26 @@ ], "documentation":"

        Get status and details of a BillOfMaterialsImportJob.

        " }, + "GetDataIntegrationEvent":{ + "name":"GetDataIntegrationEvent", + "http":{ + "method":"GET", + "requestUri":"/api-data/data-integration/instance/{instanceId}/data-integration-events/{eventId}", + "responseCode":200 + }, + "input":{"shape":"GetDataIntegrationEventRequest"}, + "output":{"shape":"GetDataIntegrationEventResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Enables you to programmatically view an Amazon Web Services Supply Chain Data Integration Event. Developers can view the eventType, eventGroupId, eventTimestamp, datasetTarget, datasetLoadExecution.

        " + }, "GetDataIntegrationFlow":{ "name":"GetDataIntegrationFlow", "http":{ @@ -200,6 +262,26 @@ ], "documentation":"

        Enables you to programmatically view a specific data pipeline for the provided Amazon Web Services Supply Chain instance and DataIntegrationFlow name.

        " }, + "GetDataIntegrationFlowExecution":{ + "name":"GetDataIntegrationFlowExecution", + "http":{ + "method":"GET", + "requestUri":"/api-data/data-integration/instance/{instanceId}/data-integration-flows/{flowName}/executions/{executionId}", + "responseCode":200 + }, + "input":{"shape":"GetDataIntegrationFlowExecutionRequest"}, + "output":{"shape":"GetDataIntegrationFlowExecutionResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Get the flow execution.

        " + }, "GetDataLakeDataset":{ "name":"GetDataLakeDataset", "http":{ @@ -220,6 +302,26 @@ ], "documentation":"

        Enables you to programmatically view an Amazon Web Services Supply Chain data lake dataset. Developers can view the data lake dataset information such as namespace, schema, and so on for a given instance ID, namespace, and dataset name.

        " }, + "GetDataLakeNamespace":{ + "name":"GetDataLakeNamespace", + "http":{ + "method":"GET", + "requestUri":"/api/datalake/instance/{instanceId}/namespaces/{name}", + "responseCode":200 + }, + "input":{"shape":"GetDataLakeNamespaceRequest"}, + "output":{"shape":"GetDataLakeNamespaceResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Enables you to programmatically view an Amazon Web Services Supply Chain data lake namespace. Developers can view the data lake namespace information such as description for a given instance ID and namespace name.

        " + }, "GetInstance":{ "name":"GetInstance", "http":{ @@ -240,6 +342,46 @@ ], "documentation":"

        Enables you to programmatically retrieve the information related to an Amazon Web Services Supply Chain instance ID.

        " }, + "ListDataIntegrationEvents":{ + "name":"ListDataIntegrationEvents", + "http":{ + "method":"GET", + "requestUri":"/api-data/data-integration/instance/{instanceId}/data-integration-events", + "responseCode":200 + }, + "input":{"shape":"ListDataIntegrationEventsRequest"}, + "output":{"shape":"ListDataIntegrationEventsResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Enables you to programmatically list all data integration events for the provided Amazon Web Services Supply Chain instance.

        " + }, + "ListDataIntegrationFlowExecutions":{ + "name":"ListDataIntegrationFlowExecutions", + "http":{ + "method":"GET", + "requestUri":"/api-data/data-integration/instance/{instanceId}/data-integration-flows/{flowName}/executions", + "responseCode":200 + }, + "input":{"shape":"ListDataIntegrationFlowExecutionsRequest"}, + "output":{"shape":"ListDataIntegrationFlowExecutionsResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        List flow executions.

        " + }, "ListDataIntegrationFlows":{ "name":"ListDataIntegrationFlows", "http":{ @@ -280,6 +422,26 @@ ], "documentation":"

        Enables you to programmatically view the list of Amazon Web Services Supply Chain data lake datasets. Developers can view the datasets and the corresponding information such as namespace, schema, and so on for a given instance ID and namespace.

        " }, + "ListDataLakeNamespaces":{ + "name":"ListDataLakeNamespaces", + "http":{ + "method":"GET", + "requestUri":"/api/datalake/instance/{instanceId}/namespaces", + "responseCode":200 + }, + "input":{"shape":"ListDataLakeNamespacesRequest"}, + "output":{"shape":"ListDataLakeNamespacesResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Enables you to programmatically view the list of Amazon Web Services Supply Chain data lake namespaces. Developers can view the namespaces and the corresponding information such as description for a given instance ID. Note that this API only return custom namespaces, instance pre-defined namespaces are not included.

        " + }, "ListInstances":{ "name":"ListInstances", "http":{ @@ -338,7 +500,7 @@ {"shape":"InternalServerException"}, {"shape":"ConflictException"} ], - "documentation":"

        Send the transactional data payload for the event with real-time data for analysis or monitoring. The real-time data events are stored in an Amazon Web Services service before being processed and stored in data lake. New data events are synced with data lake at 5 PM GMT everyday. The updated transactional data is available in data lake after ingestion.

        ", + "documentation":"

        Send the data payload for the event with real-time data for analysis or monitoring. The real-time data events are stored in an Amazon Web Services service before being processed and stored in data lake.

        ", "idempotent":true }, "TagResource":{ @@ -422,6 +584,26 @@ ], "documentation":"

        Enables you to programmatically update an Amazon Web Services Supply Chain data lake dataset. Developers can update the description of a data lake dataset for a given instance ID, namespace, and dataset name.

        " }, + "UpdateDataLakeNamespace":{ + "name":"UpdateDataLakeNamespace", + "http":{ + "method":"PATCH", + "requestUri":"/api/datalake/instance/{instanceId}/namespaces/{name}", + "responseCode":200 + }, + "input":{"shape":"UpdateDataLakeNamespaceRequest"}, + "output":{"shape":"UpdateDataLakeNamespaceResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Enables you to programmatically update an Amazon Web Services Supply Chain data lake namespace. Developers can update the description of a data lake namespace for a given instance ID and namespace name.

        " + }, "UpdateInstance":{ "name":"UpdateInstance", "http":{ @@ -646,8 +828,8 @@ "locationName":"instanceId" }, "namespace":{ - "shape":"DataLakeDatasetNamespace", - "documentation":"

        The name space of the dataset.

        ", + "shape":"DataLakeNamespaceName", + "documentation":"

        The namespace of the dataset, besides the custom defined namespace, every instance comes with below pre-defined namespaces:

        ", "location":"uri", "locationName":"namespace" }, @@ -659,12 +841,16 @@ }, "schema":{ "shape":"DataLakeDatasetSchema", - "documentation":"

        The custom schema of the data lake dataset and is only required when the name space is default.

        " + "documentation":"

        The custom schema of the data lake dataset and required for dataset in default and custom namespaces.

        " }, "description":{ "shape":"DataLakeDatasetDescription", "documentation":"

        The description of the dataset.

        " }, + "partitionSpec":{ + "shape":"DataLakeDatasetPartitionSpec", + "documentation":"

        The partition specification of the dataset. Partitioning can effectively improve the dataset query performance by reducing the amount of data scanned during query execution. But partitioning or not will affect how data get ingested by data ingestion methods, such as SendDataIntegrationEvent's dataset UPSERT will upsert records within partition (instead of within whole dataset). For more details, refer to those data ingestion documentations.

        " + }, "tags":{ "shape":"TagMap", "documentation":"

        The tags of the dataset.

        " @@ -683,6 +869,47 @@ }, "documentation":"

        The response parameters of CreateDataLakeDataset.

        " }, + "CreateDataLakeNamespaceRequest":{ + "type":"structure", + "required":[ + "instanceId", + "name" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The Amazon Web Services Supply Chain instance identifier.

        ", + "location":"uri", + "locationName":"instanceId" + }, + "name":{ + "shape":"DataLakeNamespaceName", + "documentation":"

        The name of the namespace. Noted you cannot create namespace with name starting with asc, default, scn, aws, amazon, amzn

        ", + "location":"uri", + "locationName":"name" + }, + "description":{ + "shape":"DataLakeNamespaceDescription", + "documentation":"

        The description of the namespace.

        " + }, + "tags":{ + "shape":"TagMap", + "documentation":"

        The tags of the namespace.

        " + } + }, + "documentation":"

        The request parameters for CreateDataLakeNamespace.

        " + }, + "CreateDataLakeNamespaceResponse":{ + "type":"structure", + "required":["namespace"], + "members":{ + "namespace":{ + "shape":"DataLakeNamespace", + "documentation":"

        The detail of created namespace.

        " + } + }, + "documentation":"

        The response parameters of CreateDataLakeNamespace.

        " + }, "CreateInstanceRequest":{ "type":"structure", "members":{ @@ -725,17 +952,147 @@ }, "documentation":"

        The response parameters for CreateInstance.

        " }, + "DataIntegrationDatasetArn":{ + "type":"string", + "max":1011, + "min":20, + "pattern":"arn:aws:scn:([a-z0-9-]+):([0-9]+):instance/([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/namespaces/[^/]+/datasets/[^/]+" + }, + "DataIntegrationEvent":{ + "type":"structure", + "required":[ + "instanceId", + "eventId", + "eventType", + "eventGroupId", + "eventTimestamp" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The AWS Supply Chain instance identifier.

        " + }, + "eventId":{ + "shape":"UUID", + "documentation":"

        The unique event identifier.

        " + }, + "eventType":{ + "shape":"DataIntegrationEventType", + "documentation":"

        The data event type.

        " + }, + "eventGroupId":{ + "shape":"DataIntegrationEventGroupId", + "documentation":"

        Event identifier (for example, orderId for InboundOrder) used for data sharding or partitioning.

        " + }, + "eventTimestamp":{ + "shape":"Timestamp", + "documentation":"

        The event timestamp (in epoch seconds).

        " + }, + "datasetTargetDetails":{ + "shape":"DataIntegrationEventDatasetTargetDetails", + "documentation":"

        The target dataset details for a DATASET event type.

        " + } + }, + "documentation":"

        The data integration event details.

        " + }, "DataIntegrationEventData":{ "type":"string", "max":1048576, "min":1, "sensitive":true }, + "DataIntegrationEventDatasetLoadExecutionDetails":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"DataIntegrationEventDatasetLoadStatus", + "documentation":"

        The event load execution status to target dataset.

        " + }, + "message":{ + "shape":"String", + "documentation":"

        The failure message (if any) of failed event load execution to dataset.

        " + } + }, + "documentation":"

        The target dataset load execution details.

        " + }, + "DataIntegrationEventDatasetLoadStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "IN_PROGRESS", + "FAILED" + ] + }, + "DataIntegrationEventDatasetOperationType":{ + "type":"string", + "enum":[ + "APPEND", + "UPSERT", + "DELETE" + ] + }, + "DataIntegrationEventDatasetTargetConfiguration":{ + "type":"structure", + "required":[ + "datasetIdentifier", + "operationType" + ], + "members":{ + "datasetIdentifier":{ + "shape":"DataIntegrationDatasetArn", + "documentation":"

        The datalake dataset ARN identifier.

        " + }, + "operationType":{ + "shape":"DataIntegrationEventDatasetOperationType", + "documentation":"

        The target dataset load operation type.

        " + } + }, + "documentation":"

        The target dataset configuration for a DATASET event type.

        " + }, + "DataIntegrationEventDatasetTargetDetails":{ + "type":"structure", + "required":[ + "datasetIdentifier", + "operationType", + "datasetLoadExecution" + ], + "members":{ + "datasetIdentifier":{ + "shape":"DataIntegrationDatasetArn", + "documentation":"

        The datalake dataset ARN identifier.

        " + }, + "operationType":{ + "shape":"DataIntegrationEventDatasetOperationType", + "documentation":"

        The target dataset load operation type. The available options are:

        • APPEND - Add new records to the dataset. Noted that this operation type will just try to append records as-is without any primary key or partition constraints.

        • UPSERT - Modify existing records in the dataset with primary key configured, events for datasets without primary keys are not allowed. If event data contains primary keys that match records in the dataset within same partition, then those existing records (in that partition) will be updated. If primary keys do not match, new records will be added. Note that if dataset contain records with duplicate primary key values in the same partition, those duplicate records will be deduped into one updated record.

        • DELETE - Remove existing records in the dataset with primary key configured, events for datasets without primary keys are not allowed. If event data contains primary keys that match records in the dataset within same partition, then those existing records (in that partition) will be deleted. If primary keys do not match, no actions will be done. Note that if dataset contain records with duplicate primary key values in the same partition, all those duplicates will be removed.

        " + }, + "datasetLoadExecution":{ + "shape":"DataIntegrationEventDatasetLoadExecutionDetails", + "documentation":"

        The target dataset load execution.

        " + } + }, + "documentation":"

        The target dataset details for a DATASET event type.

        " + }, "DataIntegrationEventGroupId":{ "type":"string", "max":255, "min":1 }, + "DataIntegrationEventList":{ + "type":"list", + "member":{"shape":"DataIntegrationEvent"} + }, + "DataIntegrationEventMaxResults":{ + "type":"integer", + "box":true, + "max":20, + "min":1 + }, + "DataIntegrationEventNextToken":{ + "type":"string", + "max":65535, + "min":1 + }, "DataIntegrationEventType":{ "type":"string", "enum":[ @@ -753,7 +1110,8 @@ "scn.data.shipment", "scn.data.shipmentstop", "scn.data.shipmentstoporder", - "scn.data.supplyplan" + "scn.data.supplyplan", + "scn.data.dataset" ] }, "DataIntegrationFlow":{ @@ -804,15 +1162,30 @@ "members":{ "loadType":{ "shape":"DataIntegrationFlowLoadType", - "documentation":"

        The dataset data load type in dataset options.

        " + "documentation":"

        The target dataset's data load type. This only affects how source S3 files are selected in the S3-to-dataset flow.

        • REPLACE - Target dataset will get replaced with the new file added under the source s3 prefix.

        • INCREMENTAL - Target dataset will get updated with the up-to-date content under S3 prefix incorporating any file additions or removals there.

        " }, "dedupeRecords":{ "shape":"Boolean", - "documentation":"

        The dataset load option to remove duplicates.

        " + "documentation":"

        The option to perform deduplication on data records sharing same primary key values. If disabled, transformed data with duplicate primary key values will ingest into dataset, for datasets within asc namespace, such duplicates will cause ingestion fail. If enabled without dedupeStrategy, deduplication is done by retaining a random data record among those sharing the same primary key values. If enabled with dedupeStragtegy, the deduplication is done following the strategy.

        Note that target dataset may have partition configured, when dedupe is enabled, it only dedupe against primary keys and retain only one record out of those duplicates regardless of its partition status.

        " + }, + "dedupeStrategy":{ + "shape":"DataIntegrationFlowDedupeStrategy", + "documentation":"

        The deduplication strategy to dedupe the data records sharing same primary key values of the target dataset. This strategy only applies to target dataset with primary keys and with dedupeRecords option enabled. If transformed data still got duplicates after the dedupeStrategy evaluation, a random data record is chosen to be retained.

        " } }, "documentation":"

        The dataset options used in dataset source and target configurations.

        " }, + "DataIntegrationFlowDatasetSource":{ + "type":"structure", + "required":["datasetIdentifier"], + "members":{ + "datasetIdentifier":{ + "shape":"DataIntegrationDatasetArn", + "documentation":"

        The ARN of the dataset source.

        " + } + }, + "documentation":"

        The details of a flow execution with dataset source.

        " + }, "DataIntegrationFlowDatasetSourceConfiguration":{ "type":"structure", "required":["datasetIdentifier"], @@ -843,6 +1216,176 @@ }, "documentation":"

        The dataset DataIntegrationFlow target configuration parameters.

        " }, + "DataIntegrationFlowDedupeStrategy":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"DataIntegrationFlowDedupeStrategyType", + "documentation":"

        The type of the deduplication strategy.

        • FIELD_PRIORITY - Field priority configuration for the deduplication strategy specifies an ordered list of fields used to tie-break the data records sharing the same primary key values. Fields earlier in the list have higher priority for evaluation. For each field, the sort order determines whether to retain data record with larger or smaller field value.

        " + }, + "fieldPriority":{ + "shape":"DataIntegrationFlowFieldPriorityDedupeStrategyConfiguration", + "documentation":"

        The field priority deduplication strategy.

        " + } + }, + "documentation":"

        The deduplication strategy details.

        " + }, + "DataIntegrationFlowDedupeStrategyType":{ + "type":"string", + "enum":["FIELD_PRIORITY"] + }, + "DataIntegrationFlowExecution":{ + "type":"structure", + "required":[ + "instanceId", + "flowName", + "executionId" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The flow execution's instanceId.

        " + }, + "flowName":{ + "shape":"DataIntegrationFlowName", + "documentation":"

        The flow execution's flowName.

        " + }, + "executionId":{ + "shape":"UUID", + "documentation":"

        The flow executionId.

        " + }, + "status":{ + "shape":"DataIntegrationFlowExecutionStatus", + "documentation":"

        The status of flow execution.

        " + }, + "sourceInfo":{ + "shape":"DataIntegrationFlowExecutionSourceInfo", + "documentation":"

        The source information for a flow execution.

        " + }, + "message":{ + "shape":"String", + "documentation":"

        The failure message (if any) of failed flow execution.

        " + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

        The flow execution start timestamp.

        " + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

        The flow execution end timestamp.

        " + }, + "outputMetadata":{ + "shape":"DataIntegrationFlowExecutionOutputMetadata", + "documentation":"

        The flow execution output metadata.

        " + } + }, + "documentation":"

        The flow execution details.

        " + }, + "DataIntegrationFlowExecutionDiagnosticReportsRootS3URI":{ + "type":"string", + "pattern":"s3://[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]/.{1,1024}" + }, + "DataIntegrationFlowExecutionList":{ + "type":"list", + "member":{"shape":"DataIntegrationFlowExecution"} + }, + "DataIntegrationFlowExecutionMaxResults":{ + "type":"integer", + "box":true, + "max":20, + "min":1 + }, + "DataIntegrationFlowExecutionNextToken":{ + "type":"string", + "max":65535, + "min":1 + }, + "DataIntegrationFlowExecutionOutputMetadata":{ + "type":"structure", + "members":{ + "diagnosticReportsRootS3URI":{ + "shape":"DataIntegrationFlowExecutionDiagnosticReportsRootS3URI", + "documentation":"

        The S3 URI under which all diagnostic files (such as deduped records if any) are stored.

        " + } + }, + "documentation":"

        The output metadata of the flow execution.

        " + }, + "DataIntegrationFlowExecutionSourceInfo":{ + "type":"structure", + "required":["sourceType"], + "members":{ + "sourceType":{ + "shape":"DataIntegrationFlowSourceType", + "documentation":"

        The data integration flow execution source type.

        " + }, + "s3Source":{ + "shape":"DataIntegrationFlowS3Source", + "documentation":"

        The source details of a flow execution with S3 source.

        " + }, + "datasetSource":{ + "shape":"DataIntegrationFlowDatasetSource", + "documentation":"

        The source details of a flow execution with dataset source.

        " + } + }, + "documentation":"

        The source information of a flow execution.

        " + }, + "DataIntegrationFlowExecutionStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "IN_PROGRESS", + "FAILED" + ] + }, + "DataIntegrationFlowFieldPriorityDedupeField":{ + "type":"structure", + "required":[ + "name", + "sortOrder" + ], + "members":{ + "name":{ + "shape":"DataIntegrationFlowFieldPriorityDedupeFieldName", + "documentation":"

        The name of the deduplication field. Must exist in the dataset and not be a primary key.

        " + }, + "sortOrder":{ + "shape":"DataIntegrationFlowFieldPriorityDedupeSortOrder", + "documentation":"

        The sort order for the deduplication field.

        " + } + }, + "documentation":"

        The field used in the field priority deduplication strategy.

        " + }, + "DataIntegrationFlowFieldPriorityDedupeFieldList":{ + "type":"list", + "member":{"shape":"DataIntegrationFlowFieldPriorityDedupeField"}, + "max":10, + "min":1 + }, + "DataIntegrationFlowFieldPriorityDedupeFieldName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-z0-9_]+" + }, + "DataIntegrationFlowFieldPriorityDedupeSortOrder":{ + "type":"string", + "enum":[ + "ASC", + "DESC" + ] + }, + "DataIntegrationFlowFieldPriorityDedupeStrategyConfiguration":{ + "type":"structure", + "required":["fields"], + "members":{ + "fields":{ + "shape":"DataIntegrationFlowFieldPriorityDedupeFieldList", + "documentation":"

        The list of field names and their sort order for deduplication, arranged in descending priority from highest to lowest.

        " + } + }, + "documentation":"

        The field priority deduplication strategy details.

        " + }, "DataIntegrationFlowFileType":{ "type":"string", "enum":[ @@ -895,6 +1438,24 @@ "min":0, "pattern":"[/A-Za-z0-9._-]+" }, + "DataIntegrationFlowS3Source":{ + "type":"structure", + "required":[ + "bucketName", + "key" + ], + "members":{ + "bucketName":{ + "shape":"S3BucketName", + "documentation":"

        The S3 bucket name of the S3 source.

        " + }, + "key":{ + "shape":"DataIntegrationS3ObjectKey", + "documentation":"

        The S3 object key of the S3 source.

        " + } + }, + "documentation":"

        The details of a flow execution with S3 source.

        " + }, "DataIntegrationFlowS3SourceConfiguration":{ "type":"structure", "required":[ @@ -908,7 +1469,7 @@ }, "prefix":{ "shape":"DataIntegrationFlowS3Prefix", - "documentation":"

        The prefix of the S3 source objects.

        " + "documentation":"

        The prefix of the S3 source objects. To trigger data ingestion, S3 files need to be put under s3://bucketName/prefix/.

        " }, "options":{ "shape":"DataIntegrationFlowS3Options", @@ -942,7 +1503,8 @@ "DataIntegrationFlowSQLQuery":{ "type":"string", "max":65535, - "min":1 + "min":1, + "sensitive":true }, "DataIntegrationFlowSQLTransformationConfiguration":{ "type":"structure", @@ -1014,7 +1576,7 @@ }, "datasetTarget":{ "shape":"DataIntegrationFlowDatasetTargetConfiguration", - "documentation":"

        The dataset DataIntegrationFlow target.

        " + "documentation":"

        The dataset DataIntegrationFlow target. Note that for AWS Supply Chain dataset under asc namespace, it has a connection_id internal field that is not allowed to be provided by client directly, they will be auto populated.

        " } }, "documentation":"

        The DataIntegrationFlow target parameters.

        " @@ -1048,6 +1610,12 @@ "NONE" ] }, + "DataIntegrationS3ObjectKey":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[/A-Za-z0-9._:*()'!=?&+;@-]+" + }, "DataLakeDataset":{ "type":"structure", "required":[ @@ -1065,12 +1633,12 @@ "documentation":"

        The Amazon Web Services Supply Chain instance identifier.

        " }, "namespace":{ - "shape":"DataLakeDatasetNamespace", - "documentation":"

        The name space of the dataset. The available values are:

        " + "shape":"DataLakeNamespaceName", + "documentation":"

        The namespace of the dataset, besides the custom defined namespace, every instance comes with below pre-defined namespaces:

        " }, "name":{ "shape":"DataLakeDatasetName", - "documentation":"

        The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

        " + "documentation":"

        The name of the dataset. For asc namespace, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

        " }, "arn":{ "shape":"AscResourceArn", @@ -1084,6 +1652,7 @@ "shape":"DataLakeDatasetDescription", "documentation":"

        The description of the dataset.

        " }, + "partitionSpec":{"shape":"DataLakeDatasetPartitionSpec"}, "createdTime":{ "shape":"Timestamp", "documentation":"

        The creation time of the dataset.

        " @@ -1104,7 +1673,7 @@ "type":"list", "member":{"shape":"DataLakeDataset"}, "max":20, - "min":1 + "min":0 }, "DataLakeDatasetMaxResults":{ "type":"integer", @@ -1118,17 +1687,84 @@ "min":1, "pattern":"[a-z0-9_]+" }, - "DataLakeDatasetNamespace":{ - "type":"string", - "max":50, - "min":1, - "pattern":"[a-z]+" - }, "DataLakeDatasetNextToken":{ "type":"string", "max":65535, "min":1 }, + "DataLakeDatasetPartitionField":{ + "type":"structure", + "required":[ + "name", + "transform" + ], + "members":{ + "name":{ + "shape":"DataLakeDatasetSchemaFieldName", + "documentation":"

        The name of the partition field.

        " + }, + "transform":{ + "shape":"DataLakeDatasetPartitionFieldTransform", + "documentation":"

        The transformation of the partition field. A transformation specifies how to partition on a given field. For example, with timestamp you can specify that you'd like to partition fields by day, e.g. data record with value 2025-01-03T00:00:00Z in partition field is in 2025-01-03 partition. Also noted that data record without any value in optional partition field is in NULL partition.

        " + } + }, + "documentation":"

        The detail of the partition field.

        " + }, + "DataLakeDatasetPartitionFieldList":{ + "type":"list", + "member":{"shape":"DataLakeDatasetPartitionField"}, + "max":10, + "min":1 + }, + "DataLakeDatasetPartitionFieldTransform":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"DataLakeDatasetPartitionTransformType", + "documentation":"

        The type of partitioning transformation for this field. The available options are:

        • IDENTITY - Partitions data on a given field by its exact values.

        • YEAR - Partitions data on a timestamp field using year granularity.

        • MONTH - Partitions data on a timestamp field using month granularity.

        • DAY - Partitions data on a timestamp field using day granularity.

        • HOUR - Partitions data on a timestamp field using hour granularity.

        " + } + }, + "documentation":"

        The detail of the partition field transformation.

        " + }, + "DataLakeDatasetPartitionSpec":{ + "type":"structure", + "required":["fields"], + "members":{ + "fields":{ + "shape":"DataLakeDatasetPartitionFieldList", + "documentation":"

        The fields on which to partition a dataset. The partitions will be applied hierarchically based on the order of this list.

        " + } + }, + "documentation":"

        The partition specification for a dataset.

        " + }, + "DataLakeDatasetPartitionTransformType":{ + "type":"string", + "enum":[ + "YEAR", + "MONTH", + "DAY", + "HOUR", + "IDENTITY" + ] + }, + "DataLakeDatasetPrimaryKeyField":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"DataLakeDatasetSchemaFieldName", + "documentation":"

        The name of the primary key field.

        " + } + }, + "documentation":"

        The detail of the primary key field.

        " + }, + "DataLakeDatasetPrimaryKeyFieldList":{ + "type":"list", + "member":{"shape":"DataLakeDatasetPrimaryKeyField"}, + "max":20, + "min":1 + }, "DataLakeDatasetSchema":{ "type":"structure", "required":[ @@ -1143,9 +1779,13 @@ "fields":{ "shape":"DataLakeDatasetSchemaFieldList", "documentation":"

        The list of field details of the dataset schema.

        " + }, + "primaryKeys":{ + "shape":"DataLakeDatasetPrimaryKeyFieldList", + "documentation":"

        The list of primary key fields for the dataset. Primary keys defined can help data ingestion methods to ensure data uniqueness: CreateDataIntegrationFlow's dedupe strategy will leverage primary keys to perform records deduplication before write to dataset; SendDataIntegrationEvent's UPSERT and DELETE can only work with dataset with primary keys. For more details, refer to those data ingestion documentations.

        Note that defining primary keys does not necessarily mean the dataset cannot have duplicate records, duplicate records can still be ingested if CreateDataIntegrationFlow's dedupe disabled or through SendDataIntegrationEvent's APPEND operation.

        " } }, - "documentation":"

        The schema details of the dataset.

        " + "documentation":"

        The schema details of the dataset. Note that for AWS Supply Chain dataset under asc namespace, it may have internal fields like connection_id that will be auto populated by data ingestion methods.

        " }, "DataLakeDatasetSchemaField":{ "type":"structure", @@ -1168,34 +1808,100 @@ "documentation":"

        Indicate if the field is required or not.

        " } }, - "documentation":"

        The dataset field details.

        " + "documentation":"

        The dataset field details.

        " + }, + "DataLakeDatasetSchemaFieldList":{ + "type":"list", + "member":{"shape":"DataLakeDatasetSchemaField"}, + "max":500, + "min":1 + }, + "DataLakeDatasetSchemaFieldName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-z0-9_]+" + }, + "DataLakeDatasetSchemaFieldType":{ + "type":"string", + "enum":[ + "INT", + "DOUBLE", + "STRING", + "TIMESTAMP", + "LONG" + ] + }, + "DataLakeDatasetSchemaName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[A-Za-z0-9]+" + }, + "DataLakeNamespace":{ + "type":"structure", + "required":[ + "instanceId", + "name", + "arn", + "createdTime", + "lastModifiedTime" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The Amazon Web Services Supply Chain instance identifier.

        " + }, + "name":{ + "shape":"DataLakeNamespaceName", + "documentation":"

        The name of the namespace.

        " + }, + "arn":{ + "shape":"AscResourceArn", + "documentation":"

        The arn of the namespace.

        " + }, + "description":{ + "shape":"DataLakeNamespaceDescription", + "documentation":"

        The description of the namespace.

        " + }, + "createdTime":{ + "shape":"Timestamp", + "documentation":"

        The creation time of the namespace.

        " + }, + "lastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

        The last modified time of the namespace.

        " + } + }, + "documentation":"

        The data lake namespace details.

        " }, - "DataLakeDatasetSchemaFieldList":{ - "type":"list", - "member":{"shape":"DataLakeDatasetSchemaField"}, + "DataLakeNamespaceDescription":{ + "type":"string", "max":500, "min":1 }, - "DataLakeDatasetSchemaFieldName":{ + "DataLakeNamespaceList":{ + "type":"list", + "member":{"shape":"DataLakeNamespace"}, + "max":20, + "min":1 + }, + "DataLakeNamespaceMaxResults":{ + "type":"integer", + "box":true, + "max":20, + "min":1 + }, + "DataLakeNamespaceName":{ "type":"string", - "max":100, + "max":50, "min":1, "pattern":"[a-z0-9_]+" }, - "DataLakeDatasetSchemaFieldType":{ - "type":"string", - "enum":[ - "INT", - "DOUBLE", - "STRING", - "TIMESTAMP" - ] - }, - "DataLakeDatasetSchemaName":{ + "DataLakeNamespaceNextToken":{ "type":"string", - "max":100, - "min":1, - "pattern":"[A-Za-z0-9]+" + "max":65535, + "min":1 }, "DatasetIdentifier":{ "type":"string", @@ -1258,14 +1964,14 @@ "locationName":"instanceId" }, "namespace":{ - "shape":"DataLakeDatasetNamespace", - "documentation":"

        The name space of the dataset. The available values are:

        ", + "shape":"DataLakeNamespaceName", + "documentation":"

        The namespace of the dataset, besides the custom defined namespace, every instance comes with below pre-defined namespaces:

        ", "location":"uri", "locationName":"namespace" }, "name":{ "shape":"DataLakeDatasetName", - "documentation":"

        The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

        ", + "documentation":"

        The name of the dataset. For asc namespace, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

        ", "location":"uri", "locationName":"name" } @@ -1285,8 +1991,8 @@ "documentation":"

        The AWS Supply Chain instance identifier.

        " }, "namespace":{ - "shape":"DataLakeDatasetNamespace", - "documentation":"

        The name space of deleted dataset.

        " + "shape":"DataLakeNamespaceName", + "documentation":"

        The namespace of deleted dataset.

        " }, "name":{ "shape":"DataLakeDatasetName", @@ -1295,6 +2001,46 @@ }, "documentation":"

        The response parameters of DeleteDataLakeDataset.

        " }, + "DeleteDataLakeNamespaceRequest":{ + "type":"structure", + "required":[ + "instanceId", + "name" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The AWS Supply Chain instance identifier.

        ", + "location":"uri", + "locationName":"instanceId" + }, + "name":{ + "shape":"DataLakeNamespaceName", + "documentation":"

        The name of the namespace. Noted you cannot delete pre-defined namespace like asc, default which are only deleted through instance deletion.

        ", + "location":"uri", + "locationName":"name" + } + }, + "documentation":"

        The request parameters of DeleteDataLakeNamespace.

        " + }, + "DeleteDataLakeNamespaceResponse":{ + "type":"structure", + "required":[ + "instanceId", + "name" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The AWS Supply Chain instance identifier.

        " + }, + "name":{ + "shape":"DataLakeNamespaceName", + "documentation":"

        The name of deleted namespace.

        " + } + }, + "documentation":"

        The response parameters of DeleteDataLakeNamespace.

        " + }, "DeleteInstanceRequest":{ "type":"structure", "required":["instanceId"], @@ -1356,6 +2102,79 @@ }, "documentation":"

        The response parameters for GetBillOfMaterialsImportJob.

        " }, + "GetDataIntegrationEventRequest":{ + "type":"structure", + "required":[ + "instanceId", + "eventId" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The Amazon Web Services Supply Chain instance identifier.

        ", + "location":"uri", + "locationName":"instanceId" + }, + "eventId":{ + "shape":"UUID", + "documentation":"

        The unique event identifier.

        ", + "location":"uri", + "locationName":"eventId" + } + }, + "documentation":"

        The request parameters for GetDataIntegrationEvent.

        " + }, + "GetDataIntegrationEventResponse":{ + "type":"structure", + "required":["event"], + "members":{ + "event":{ + "shape":"DataIntegrationEvent", + "documentation":"

        The details of the DataIntegrationEvent returned.

        " + } + }, + "documentation":"

        The response parameters for GetDataIntegrationEvent.

        " + }, + "GetDataIntegrationFlowExecutionRequest":{ + "type":"structure", + "required":[ + "instanceId", + "flowName", + "executionId" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The AWS Supply Chain instance identifier.

        ", + "location":"uri", + "locationName":"instanceId" + }, + "flowName":{ + "shape":"DataIntegrationFlowName", + "documentation":"

        The flow name.

        ", + "location":"uri", + "locationName":"flowName" + }, + "executionId":{ + "shape":"UUID", + "documentation":"

        The flow execution identifier.

        ", + "location":"uri", + "locationName":"executionId" + } + }, + "documentation":"

        The request parameters of GetFlowExecution.

        " + }, + "GetDataIntegrationFlowExecutionResponse":{ + "type":"structure", + "required":["flowExecution"], + "members":{ + "flowExecution":{ + "shape":"DataIntegrationFlowExecution", + "documentation":"

        The flow execution details.

        " + } + }, + "documentation":"

        The response parameters of GetFlowExecution.

        " + }, "GetDataIntegrationFlowRequest":{ "type":"structure", "required":[ @@ -1404,14 +2223,14 @@ "locationName":"instanceId" }, "namespace":{ - "shape":"DataLakeDatasetNamespace", - "documentation":"

        The name space of the dataset. The available values are:

        ", + "shape":"DataLakeNamespaceName", + "documentation":"

        The namespace of the dataset, besides the custom defined namespace, every instance comes with below pre-defined namespaces:

        ", "location":"uri", "locationName":"namespace" }, "name":{ "shape":"DataLakeDatasetName", - "documentation":"

        The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

        ", + "documentation":"

        The name of the dataset. For asc namespace, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

        ", "location":"uri", "locationName":"name" } @@ -1427,7 +2246,40 @@ "documentation":"

        The fetched dataset details.

        " } }, - "documentation":"

        The response parameters for UpdateDataLakeDataset.

        " + "documentation":"

        The response parameters for GetDataLakeDataset.

        " + }, + "GetDataLakeNamespaceRequest":{ + "type":"structure", + "required":[ + "instanceId", + "name" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The Amazon Web Services Supply Chain instance identifier.

        ", + "location":"uri", + "locationName":"instanceId" + }, + "name":{ + "shape":"DataLakeNamespaceName", + "documentation":"

        The name of the namespace. Besides the namespaces user created, you can also specify the pre-defined namespaces:

        ", + "location":"uri", + "locationName":"name" + } + }, + "documentation":"

        The request parameters for GetDataLakeNamespace.

        " + }, + "GetDataLakeNamespaceResponse":{ + "type":"structure", + "required":["namespace"], + "members":{ + "namespace":{ + "shape":"DataLakeNamespace", + "documentation":"

        The fetched namespace details.

        " + } + }, + "documentation":"

        The response parameters for GetDataLakeNamespace.

        " }, "GetInstanceRequest":{ "type":"structure", @@ -1579,6 +2431,101 @@ "min":0, "pattern":"arn:[a-z0-9][-.a-z0-9]{0,62}:kms:([a-z0-9][-.a-z0-9]{0,62})?:([a-z0-9][-.a-z0-9]{0,62})?:key/.{0,1019}" }, + "ListDataIntegrationEventsRequest":{ + "type":"structure", + "required":["instanceId"], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The Amazon Web Services Supply Chain instance identifier.

        ", + "location":"uri", + "locationName":"instanceId" + }, + "eventType":{ + "shape":"DataIntegrationEventType", + "documentation":"

        List data integration events for the specified eventType.

        ", + "location":"querystring", + "locationName":"eventType" + }, + "nextToken":{ + "shape":"DataIntegrationEventNextToken", + "documentation":"

        The pagination token to fetch the next page of the data integration events.

        ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"DataIntegrationEventMaxResults", + "documentation":"

        Specify the maximum number of data integration events to fetch in one paginated request.

        ", + "location":"querystring", + "locationName":"maxResults" + } + }, + "documentation":"

        The request parameters for ListDataIntegrationEvents.

        " + }, + "ListDataIntegrationEventsResponse":{ + "type":"structure", + "required":["events"], + "members":{ + "events":{ + "shape":"DataIntegrationEventList", + "documentation":"

        The list of data integration events.

        " + }, + "nextToken":{ + "shape":"DataIntegrationEventNextToken", + "documentation":"

        The pagination token to fetch the next page of the ListDataIntegrationEvents.

        " + } + }, + "documentation":"

        The response parameters for ListDataIntegrationEvents.

        " + }, + "ListDataIntegrationFlowExecutionsRequest":{ + "type":"structure", + "required":[ + "instanceId", + "flowName" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The AWS Supply Chain instance identifier.

        ", + "location":"uri", + "locationName":"instanceId" + }, + "flowName":{ + "shape":"DataIntegrationFlowName", + "documentation":"

        The flow name.

        ", + "location":"uri", + "locationName":"flowName" + }, + "nextToken":{ + "shape":"DataIntegrationFlowExecutionNextToken", + "documentation":"

        The pagination token to fetch next page of flow executions.

        ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"DataIntegrationFlowExecutionMaxResults", + "documentation":"

        The number to specify the max number of flow executions to fetch in this paginated request.

        ", + "location":"querystring", + "locationName":"maxResults" + } + }, + "documentation":"

        The request parameters of ListFlowExecutions.

        " + }, + "ListDataIntegrationFlowExecutionsResponse":{ + "type":"structure", + "required":["flowExecutions"], + "members":{ + "flowExecutions":{ + "shape":"DataIntegrationFlowExecutionList", + "documentation":"

        The list of flow executions.

        " + }, + "nextToken":{ + "shape":"DataIntegrationFlowExecutionNextToken", + "documentation":"

        The pagination token to fetch next page of flow executions.

        " + } + }, + "documentation":"

        The response parameters of ListFlowExecutions.

        " + }, "ListDataIntegrationFlowsRequest":{ "type":"structure", "required":["instanceId"], @@ -1633,8 +2580,8 @@ "locationName":"instanceId" }, "namespace":{ - "shape":"DataLakeDatasetNamespace", - "documentation":"

        The name space of the dataset. The available values are:

        ", + "shape":"DataLakeNamespaceName", + "documentation":"

        The namespace of the dataset, besides the custom defined namespace, every instance comes with below pre-defined namespaces:

        ", "location":"uri", "locationName":"namespace" }, @@ -1668,6 +2615,46 @@ }, "documentation":"

        The response parameters of ListDataLakeDatasets.

        " }, + "ListDataLakeNamespacesRequest":{ + "type":"structure", + "required":["instanceId"], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The Amazon Web Services Supply Chain instance identifier.

        ", + "location":"uri", + "locationName":"instanceId" + }, + "nextToken":{ + "shape":"DataLakeNamespaceNextToken", + "documentation":"

        The pagination token to fetch next page of namespaces.

        ", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"DataLakeNamespaceMaxResults", + "documentation":"

        The max number of namespaces to fetch in this paginated request.

        ", + "location":"querystring", + "locationName":"maxResults" + } + }, + "documentation":"

        The request parameters of ListDataLakeNamespaces.

        " + }, + "ListDataLakeNamespacesResponse":{ + "type":"structure", + "required":["namespaces"], + "members":{ + "namespaces":{ + "shape":"DataLakeNamespaceList", + "documentation":"

        The list of fetched namespace details. Noted it only contains custom namespaces, pre-defined namespaces are not included.

        " + }, + "nextToken":{ + "shape":"DataLakeNamespaceNextToken", + "documentation":"

        The pagination token to fetch next page of namespaces.

        " + } + }, + "documentation":"

        The response parameters of ListDataLakeNamespaces.

        " + }, "ListInstancesRequest":{ "type":"structure", "members":{ @@ -1772,24 +2759,28 @@ }, "eventType":{ "shape":"DataIntegrationEventType", - "documentation":"

        The data event type.

        " + "documentation":"

        The data event type.

        " }, "data":{ "shape":"DataIntegrationEventData", - "documentation":"

        The data payload of the event. For more information on the data schema to use, see Data entities supported in AWS Supply Chain.

        " + "documentation":"

        The data payload of the event, should follow the data schema of the target dataset, or see Data entities supported in AWS Supply Chain. To send single data record, use JsonObject format; to send multiple data records, use JsonArray format.

        Note that for AWS Supply Chain dataset under asc namespace, it has a connection_id internal field that is not allowed to be provided by client directly, they will be auto populated.

        " }, "eventGroupId":{ "shape":"DataIntegrationEventGroupId", - "documentation":"

        Event identifier (for example, orderId for InboundOrder) used for data sharing or partitioning.

        " + "documentation":"

        Event identifier (for example, orderId for InboundOrder) used for data sharding or partitioning. Noted under one eventGroupId of same eventType and instanceId, events are processed sequentially in the order they are received by the server.

        " }, "eventTimestamp":{ "shape":"SyntheticTimestamp_epoch_seconds", - "documentation":"

        The event timestamp (in epoch seconds).

        " + "documentation":"

        The timestamp (in epoch seconds) associated with the event. If not provided, it will be assigned with current timestamp.

        " }, "clientToken":{ "shape":"ClientToken", - "documentation":"

        The idempotent client token.

        ", + "documentation":"

        The idempotent client token. The token is active for 8 hours, and within its lifetime, it ensures the request completes only once upon retry with same client token. If omitted, the AWS SDK generates a unique value so that AWS SDK can safely retry the request upon network errors.

        ", "idempotencyToken":true + }, + "datasetTarget":{ + "shape":"DataIntegrationEventDatasetTargetConfiguration", + "documentation":"

        The target dataset configuration for scn.data.dataset event type.

        " } }, "documentation":"

        The request parameters for SendDataIntegrationEvent.

        " @@ -1979,14 +2970,14 @@ "locationName":"instanceId" }, "namespace":{ - "shape":"DataLakeDatasetNamespace", - "documentation":"

        The name space of the dataset. The available values are:

        ", + "shape":"DataLakeNamespaceName", + "documentation":"

        The namespace of the dataset, besides the custom defined namespace, every instance comes with below pre-defined namespaces:

        ", "location":"uri", "locationName":"namespace" }, "name":{ "shape":"DataLakeDatasetName", - "documentation":"

        The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

        ", + "documentation":"

        The name of the dataset. For asc namespace, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

        ", "location":"uri", "locationName":"name" }, @@ -2008,6 +2999,43 @@ }, "documentation":"

        The response parameters of UpdateDataLakeDataset.

        " }, + "UpdateDataLakeNamespaceRequest":{ + "type":"structure", + "required":[ + "instanceId", + "name" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

        The Amazon Web Services Chain instance identifier.

        ", + "location":"uri", + "locationName":"instanceId" + }, + "name":{ + "shape":"DataLakeNamespaceName", + "documentation":"

        The name of the namespace. Noted you cannot update namespace with name starting with asc, default, scn, aws, amazon, amzn

        ", + "location":"uri", + "locationName":"name" + }, + "description":{ + "shape":"DataLakeNamespaceDescription", + "documentation":"

        The updated description of the data lake namespace.

        " + } + }, + "documentation":"

        The request parameters of UpdateDataLakeNamespace.

        " + }, + "UpdateDataLakeNamespaceResponse":{ + "type":"structure", + "required":["namespace"], + "members":{ + "namespace":{ + "shape":"DataLakeNamespace", + "documentation":"

        The updated namespace details.

        " + } + }, + "documentation":"

        The response parameters of UpdateDataLakeNamespace.

        " + }, "UpdateInstanceRequest":{ "type":"structure", "required":["instanceId"], diff --git a/services/support/pom.xml b/services/support/pom.xml index 1f3d3c3d2bdb..9ca6af67c7c1 100644 --- a/services/support/pom.xml +++ b/services/support/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT support AWS Java SDK :: Services :: AWS Support diff --git a/services/support/src/main/resources/codegen-resources/customization.config b/services/support/src/main/resources/codegen-resources/customization.config index 3f0f10d31169..8376e307ce94 100644 --- a/services/support/src/main/resources/codegen-resources/customization.config +++ b/services/support/src/main/resources/codegen-resources/customization.config @@ -5,6 +5,5 @@ "describeCases", "describeServices" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/supportapp/pom.xml b/services/supportapp/pom.xml index 2f270d67bd09..0dcb67d8a7fa 100644 --- a/services/supportapp/pom.xml +++ b/services/supportapp/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT supportapp AWS Java SDK :: Services :: Support App diff --git a/services/supportapp/src/main/resources/codegen-resources/customization.config b/services/supportapp/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/supportapp/src/main/resources/codegen-resources/customization.config +++ b/services/supportapp/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/swf/pom.xml b/services/swf/pom.xml index 9c86806ed704..a09d34fb2d53 100644 --- a/services/swf/pom.xml +++ b/services/swf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT swf AWS Java SDK :: Services :: Amazon SWF diff --git a/services/swf/src/main/resources/codegen-resources/customization.config b/services/swf/src/main/resources/codegen-resources/customization.config index 7c8ea67a8943..704ac68ddeb8 100644 --- a/services/swf/src/main/resources/codegen-resources/customization.config +++ b/services/swf/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "serviceSpecificHttpConfig": "software.amazon.awssdk.services.swf.internal.SwfHttpConfigurationOptions", - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/synthetics/pom.xml b/services/synthetics/pom.xml index 17d0bc04c530..758984efdec3 100644 --- a/services/synthetics/pom.xml +++ b/services/synthetics/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT synthetics AWS Java SDK :: Services :: Synthetics diff --git a/services/synthetics/src/main/resources/codegen-resources/customization.config b/services/synthetics/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/synthetics/src/main/resources/codegen-resources/customization.config +++ b/services/synthetics/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/synthetics/src/main/resources/codegen-resources/service-2.json b/services/synthetics/src/main/resources/codegen-resources/service-2.json index 06349912c2c9..06779ae8d16d 100644 --- a/services/synthetics/src/main/resources/codegen-resources/service-2.json +++ b/services/synthetics/src/main/resources/codegen-resources/service-2.json @@ -551,26 +551,26 @@ "members":{ "S3Bucket":{ "shape":"String", - "documentation":"

        If your canary script is located in S3, specify the bucket name here. Do not include s3:// as the start of the bucket name.

        " + "documentation":"

        If your canary script is located in Amazon S3, specify the bucket name here. Do not include s3:// as the start of the bucket name.

        " }, "S3Key":{ "shape":"String", - "documentation":"

        The S3 key of your script. For more information, see Working with Amazon S3 Objects.

        " + "documentation":"

        The Amazon S3 key of your script. For more information, see Working with Amazon S3 Objects.

        " }, "S3Version":{ "shape":"String", - "documentation":"

        The S3 version ID of your script.

        " + "documentation":"

        The Amazon S3 version ID of your script.

        " }, "ZipFile":{ "shape":"Blob", - "documentation":"

        If you input your canary script directly into the canary instead of referring to an S3 location, the value of this parameter is the base64-encoded contents of the .zip file that contains the script. It must be smaller than 225 Kb.

        For large canary scripts, we recommend that you use an S3 location instead of inputting it directly with this parameter.

        " + "documentation":"

        If you input your canary script directly into the canary instead of referring to an Amazon S3 location, the value of this parameter is the base64-encoded contents of the .zip file that contains the script. It must be smaller than 225 Kb.

        For large canary scripts, we recommend that you use an Amazon S3 location instead of inputting it directly with this parameter.

        " }, "Handler":{ "shape":"CodeHandler", "documentation":"

        The entry point to use for the source code when running the canary. For canaries that use the syn-python-selenium-1.0 runtime or a syn-nodejs.puppeteer runtime earlier than syn-nodejs.puppeteer-3.4, the handler must be specified as fileName.handler. For syn-python-selenium-1.1, syn-nodejs.puppeteer-3.4, and later runtimes, the handler can be specified as fileName.functionName , or you can specify a folder where canary scripts reside as folder/fileName.functionName .

        " } }, - "documentation":"

        Use this structure to input your script code for the canary. This structure contains the Lambda handler with the location where the canary should start running the script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included. If the script was passed into the canary directly, the script code is contained in the value of Zipfile.

        If you are uploading your canary scripts with an Amazon S3 bucket, your zip file should include your script in a certain folder structure.

        " + "documentation":"

        Use this structure to input your script code for the canary. This structure contains the Lambda handler with the location where the canary should start running the script. If the script is stored in an Amazon S3 bucket, the bucket name, key, and version are also included. If the script was passed into the canary directly, the script code is contained in the value of Zipfile.

        If you are uploading your canary scripts with an Amazon S3 bucket, your zip file should include your script in a certain folder structure.

        " }, "CanaryCodeOutput":{ "type":"structure", @@ -623,6 +623,14 @@ "shape":"UUID", "documentation":"

        A unique ID that identifies this canary run.

        " }, + "ScheduledRunId":{ + "shape":"UUID", + "documentation":"

        The ID of the scheduled canary run.

        " + }, + "RetryAttempt":{ + "shape":"RetryAttempt", + "documentation":"

        The count in number of the retry attempt.

        " + }, "Name":{ "shape":"CanaryName", "documentation":"

        The name of the canary.

        " @@ -664,6 +672,10 @@ "EnvironmentVariables":{ "shape":"EnvironmentVariablesMap", "documentation":"

        Specifies the keys and values to use for any environment variables used in the canary script. Use the following format:

        { \"key1\" : \"value1\", \"key2\" : \"value2\", ...}

        Keys must start with a letter and be at least two characters. The total size of your environment variables cannot exceed 4 KB. You can't specify any Lambda reserved environment variables as the keys for your environment variables. For more information about reserved keys, see Runtime environment variables.

        Environment variable keys and values are encrypted at rest using Amazon Web Services owned KMS keys. However, the environment variables are not encrypted on the client side. Do not store sensitive information in them.

        " + }, + "EphemeralStorage":{ + "shape":"EphemeralStorageSize", + "documentation":"

        Specifies the amount of ephemeral storage (in MB) to allocate for the canary run during execution. This temporary storage is used for storing canary run artifacts (which are uploaded to an Amazon S3 bucket at the end of the run), and any canary browser operations. This temporary storage is cleared after the run is completed. Default storage value is 1024 MB.

        " } }, "documentation":"

        A structure that contains input information for a canary run.

        " @@ -682,6 +694,10 @@ "ActiveTracing":{ "shape":"NullableBoolean", "documentation":"

        Displays whether this canary run used active X-Ray tracing.

        " + }, + "EphemeralStorage":{ + "shape":"EphemeralStorageSize", + "documentation":"

        Specifies the amount of ephemeral storage (in MB) to allocate for the canary run during execution. This temporary storage is used for storing canary run artifacts (which are uploaded to an Amazon S3 bucket at the end of the run), and any canary browser operations. This temporary storage is cleared after the run is completed. Default storage value is 1024 MB.

        " } }, "documentation":"

        A structure that contains information about a canary run.

        " @@ -714,11 +730,23 @@ }, "StateReasonCode":{ "shape":"CanaryRunStateReasonCode", - "documentation":"

        If this value is CANARY_FAILURE, an exception occurred in the canary code. If this value is EXECUTION_FAILURE, an exception occurred in CloudWatch Synthetics.

        " + "documentation":"

        If this value is CANARY_FAILURE, either the canary script failed or Synthetics ran into a fatal error when running the canary. For example, a canary timeout misconfiguration setting can cause the canary to timeout before Synthetics can evaluate its status.

        If this value is EXECUTION_FAILURE, a non-critical failure occurred such as failing to save generated debug artifacts (for example, screenshots or har files).

        If both types of failures occurred, the CANARY_FAILURE takes precedence. To understand the exact error, use the StateReason API.

        " + }, + "TestResult":{ + "shape":"CanaryRunTestResult", + "documentation":"

        Specifies the status of canary script for this run. When Synthetics tries to determine the status but fails, the result is marked as UNKNOWN. For the overall status of canary run, see State.

        " } }, "documentation":"

        This structure contains the status information about a canary run.

        " }, + "CanaryRunTestResult":{ + "type":"string", + "enum":[ + "PASSED", + "FAILED", + "UNKNOWN" + ] + }, "CanaryRunTimeline":{ "type":"structure", "members":{ @@ -729,6 +757,10 @@ "Completed":{ "shape":"Timestamp", "documentation":"

        The end time of the run.

        " + }, + "MetricTimestampForRunAndRetries":{ + "shape":"Timestamp", + "documentation":"

        The time at which the metrics will be generated for this run or retries.

        " } }, "documentation":"

        This structure contains the start and end times of a single canary run.

        " @@ -748,6 +780,10 @@ "DurationInSeconds":{ "shape":"MaxOneYearInSeconds", "documentation":"

        How long, in seconds, for the canary to continue making regular runs according to the schedule in the Expression value. If you specify 0, the canary continues making runs until you stop it. If you omit this field, the default of 0 is used.

        " + }, + "RetryConfig":{ + "shape":"RetryConfigInput", + "documentation":"

        A structure that contains the retry configuration for a canary

        " } }, "documentation":"

        This structure specifies how often a canary is to make runs and the date and time when it should stop making runs.

        " @@ -762,6 +798,10 @@ "DurationInSeconds":{ "shape":"MaxOneYearInSeconds", "documentation":"

        How long, in seconds, for the canary to continue making regular runs after it was created. The runs are performed according to the schedule in the Expression value.

        " + }, + "RetryConfig":{ + "shape":"RetryConfigOutput", + "documentation":"

        A structure that contains the retry configuration for a canary

        " } }, "documentation":"

        How long, in seconds, for the canary to continue making regular runs according to the schedule in the Expression value.

        " @@ -806,11 +846,11 @@ }, "StateReason":{ "shape":"String", - "documentation":"

        If the canary has insufficient permissions to run, this field provides more details.

        " + "documentation":"

        If the canary creation or update failed, this field provides details on the failure.

        " }, "StateReasonCode":{ "shape":"CanaryStateReasonCode", - "documentation":"

        If the canary cannot run or has failed, this field displays the reason.

        " + "documentation":"

        If the canary creation or update failed, this field displays the reason code.

        " } }, "documentation":"

        A structure that contains the current state of the canary.

        " @@ -841,7 +881,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^([0-9a-zA-Z_-]+\\/)*[0-9A-Za-z_\\\\-]+\\.[A-Za-z_][A-Za-z0-9_]*$" + "pattern":"^([0-9a-zA-Z_-]+(\\/|\\.))*[0-9A-Za-z_\\\\-]+(\\.|::)[A-Za-z_][A-Za-z0-9_]*$" }, "ConflictException":{ "type":"structure", @@ -869,11 +909,11 @@ }, "Code":{ "shape":"CanaryCodeInput", - "documentation":"

        A structure that includes the entry point from which the canary should start running your script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included.

        " + "documentation":"

        A structure that includes the entry point from which the canary should start running your script. If the script is stored in an Amazon S3 bucket, the bucket name, key, and version are also included.

        " }, "ArtifactS3Location":{ "shape":"String", - "documentation":"

        The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files. The name of the S3 bucket can't include a period (.).

        " + "documentation":"

        The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files. The name of the Amazon S3 bucket can't include a period (.).

        " }, "ExecutionRoleArn":{ "shape":"RoleArn", @@ -1143,6 +1183,11 @@ "key":{"shape":"EnvironmentVariableName"}, "value":{"shape":"EnvironmentVariableValue"} }, + "EphemeralStorageSize":{ + "type":"integer", + "max":5120, + "min":1024 + }, "ErrorMessage":{"type":"string"}, "FunctionArn":{ "type":"string", @@ -1189,7 +1234,7 @@ }, "NextToken":{ "shape":"Token", - "documentation":"

        A token that indicates that there is more data available. You can use this token in a subsequent GetCanaryRuns operation to retrieve the next set of results.

        " + "documentation":"

        A token that indicates that there is more data available. You can use this token in a subsequent GetCanaryRuns operation to retrieve the next set of results.

        When auto retry is enabled for the canary, the first subsequent retry is suffixed with *1 indicating its the first retry and the next subsequent try is suffixed with *2.

        " }, "MaxResults":{ "shape":"MaxSize100", @@ -1464,6 +1509,11 @@ "max":31622400, "min":0 }, + "MaxRetries":{ + "type":"integer", + "max":2, + "min":0 + }, "MaxSize100":{ "type":"integer", "max":100, @@ -1536,6 +1586,32 @@ "type":"string", "enum":["lambda-function"] }, + "RetryAttempt":{ + "type":"integer", + "max":2, + "min":1 + }, + "RetryConfigInput":{ + "type":"structure", + "required":["MaxRetries"], + "members":{ + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

        The maximum number of retries. The value must be less than or equal to 2.

        " + } + }, + "documentation":"

        This structure contains information about the canary's retry configuration.

        The default account level concurrent execution limit from Lambda is 1000. When you have more than 1000 canaries, it's possible there are more than 1000 Lambda invocations due to retries and the console might hang. For more information on the Lambda execution limit, see Understanding Lambda function scaling.

        For canary with MaxRetries = 2, you need to set the CanaryRunConfigInput.TimeoutInSeconds to less than 600 seconds to avoid validation errors.

        " + }, + "RetryConfigOutput":{ + "type":"structure", + "members":{ + "MaxRetries":{ + "shape":"MaxRetries", + "documentation":"

        The maximum number of retries. The value must be less than or equal to 2.

        " + } + }, + "documentation":"

        This structure contains information about the canary's retry configuration.

        " + }, "RoleArn":{ "type":"string", "max":2048, @@ -1628,11 +1704,11 @@ }, "SuccessRetentionPeriodInDays":{ "shape":"MaxSize1024", - "documentation":"

        The number of days to retain data on the failed runs for this canary. The valid range is 1 to 455 days.

        This setting affects the range of information returned by GetCanaryRuns, as well as the range of information displayed in the Synthetics console.

        " + "documentation":"

        The number of days to retain data about successful runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.

        This setting affects the range of information returned by GetCanaryRuns, as well as the range of information displayed in the Synthetics console.

        " }, "FailureRetentionPeriodInDays":{ "shape":"MaxSize1024", - "documentation":"

        The number of days to retain data on the failed runs for this canary. The valid range is 1 to 455 days.

        This setting affects the range of information returned by GetCanaryRuns, as well as the range of information displayed in the Synthetics console.

        " + "documentation":"

        The number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.

        This setting affects the range of information returned by GetCanaryRuns, as well as the range of information displayed in the Synthetics console.

        " }, "VisualReference":{"shape":"VisualReferenceInput"}, "ArtifactS3Location":{ @@ -1642,7 +1718,7 @@ "ArtifactConfig":{"shape":"ArtifactConfigInput"}, "ProvisionedResourceCleanup":{ "shape":"ProvisionedResourceCleanupSetting", - "documentation":"

        Specifies whether to also delete the Lambda functions and layers used by this canary when the canary is deleted. If the value of this parameter is AUTOMATIC, it means that the Lambda functions and layers will be deleted when the canary is deleted.

        If the value of this parameter is OFF, then the value of the DeleteLambda parameter of the DeleteCanary operation determines whether the Lambda functions and layers will be deleted.

        " + "documentation":"

        Specifies whether to also delete the Lambda functions and layers used by this canary when the canary is deleted. If you omit this parameter, the default of AUTOMATIC is used, which means that the Lambda functions and layers will be deleted when the canary is deleted.

        If the value of this parameter is OFF, then the value of the DeleteLambda parameter of the DeleteCanary operation determines whether the Lambda functions and layers will be deleted.

        " } } }, @@ -1805,7 +1881,7 @@ }, "Code":{ "shape":"CanaryCodeInput", - "documentation":"

        A structure that includes the entry point from which the canary should start running your script. If the script is stored in an S3 bucket, the bucket name, key, and version are also included.

        " + "documentation":"

        A structure that includes the entry point from which the canary should start running your script. If the script is stored in an Amazon S3 bucket, the bucket name, key, and version are also included.

        " }, "ExecutionRoleArn":{ "shape":"RoleArn", @@ -1841,7 +1917,7 @@ }, "ArtifactS3Location":{ "shape":"String", - "documentation":"

        The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files. The name of the S3 bucket can't include a period (.).

        " + "documentation":"

        The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files. The name of the Amazon S3 bucket can't include a period (.).

        " }, "ArtifactConfig":{ "shape":"ArtifactConfigInput", diff --git a/services/taxsettings/pom.xml b/services/taxsettings/pom.xml index b5e110b5830d..3fbc8c246f9f 100644 --- a/services/taxsettings/pom.xml +++ b/services/taxsettings/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT taxsettings AWS Java SDK :: Services :: Tax Settings diff --git a/services/taxsettings/src/main/resources/codegen-resources/customization.config b/services/taxsettings/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/taxsettings/src/main/resources/codegen-resources/customization.config +++ b/services/taxsettings/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/textract/pom.xml b/services/textract/pom.xml index 6a67dacfb9f6..d42ac2087ee0 100644 --- a/services/textract/pom.xml +++ b/services/textract/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT textract AWS Java SDK :: Services :: Textract diff --git a/services/textract/src/main/resources/codegen-resources/customization.config b/services/textract/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/textract/src/main/resources/codegen-resources/customization.config +++ b/services/textract/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/textract/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/textract/src/main/resources/codegen-resources/endpoint-rule-set.json index 5af848c1a2ea..c696c545db6c 100644 --- a/services/textract/src/main/resources/codegen-resources/endpoint-rule-set.json +++ b/services/textract/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/services/textract/src/main/resources/codegen-resources/service-2.json b/services/textract/src/main/resources/codegen-resources/service-2.json index 610b0c475699..25a818647866 100644 --- a/services/textract/src/main/resources/codegen-resources/service-2.json +++ b/services/textract/src/main/resources/codegen-resources/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"textract", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon Textract", "serviceId":"Textract", "signatureVersion":"v4", "targetPrefix":"Textract", - "uid":"textract-2018-06-27" + "uid":"textract-2018-06-27", + "auth":["aws.auth#sigv4"] }, "operations":{ "AnalyzeDocument":{ @@ -428,7 +430,7 @@ {"shape":"ThrottlingException"}, {"shape":"LimitExceededException"} ], - "documentation":"

        Starts the asynchronous detection of text in a document. Amazon Textract can detect lines of text and the words that make up a line of text.

        StartDocumentTextDetection can analyze text in documents that are in JPEG, PNG, TIFF, and PDF format. The documents are stored in an Amazon S3 bucket. Use DocumentLocation to specify the bucket name and file name of the document.

        StartTextDetection returns a job identifier (JobId) that you use to get the results of the operation. When text detection is finished, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that you specify in NotificationChannel. To get the results of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentTextDetection, and pass the job identifier (JobId) from the initial call to StartDocumentTextDetection.

        For more information, see Document Text Detection.

        " + "documentation":"

        Starts the asynchronous detection of text in a document. Amazon Textract can detect lines of text and the words that make up a line of text.

        StartDocumentTextDetection can analyze text in documents that are in JPEG, PNG, TIFF, and PDF format. The documents are stored in an Amazon S3 bucket. Use DocumentLocation to specify the bucket name and file name of the document.

        StartDocumentTextDetection returns a job identifier (JobId) that you use to get the results of the operation. When text detection is finished, Amazon Textract publishes a completion status to the Amazon Simple Notification Service (Amazon SNS) topic that you specify in NotificationChannel. To get the results of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetDocumentTextDetection, and pass the job identifier (JobId) from the initial call to StartDocumentTextDetection.

        For more information, see Document Text Detection.

        " }, "StartExpenseAnalysis":{ "name":"StartExpenseAnalysis", @@ -541,8 +543,7 @@ "shapes":{ "AccessDeniedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        You aren't authorized to perform the action. Use the Amazon Resource Name (ARN) of an authorized user or IAM role to perform the operation.

        ", "exception":true }, @@ -838,6 +839,7 @@ } } }, + "Angle":{"type":"float"}, "AutoUpdate":{ "type":"string", "enum":[ @@ -847,8 +849,7 @@ }, "BadDocumentException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Amazon Textract isn't able to read the document. For more information on the document limits in Amazon Textract, see limits.

        ", "exception":true }, @@ -857,7 +858,7 @@ "members":{ "BlockType":{ "shape":"BlockType", - "documentation":"

        The type of text item that's recognized. In operations for text detection, the following types are returned:

        • PAGE - Contains a list of the LINE Block objects that are detected on a document page.

        • WORD - A word detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces.

        • LINE - A string of tab-delimited, contiguous words that are detected on a document page.

        In text analysis operations, the following types are returned:

        • PAGE - Contains a list of child Block objects that are detected on a document page.

        • KEY_VALUE_SET - Stores the KEY and VALUE Block objects for linked text that's detected on a document page. Use the EntityType field to determine if a KEY_VALUE_SET object is a KEY Block object or a VALUE Block object.

        • WORD - A word that's detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces.

        • LINE - A string of tab-delimited, contiguous words that are detected on a document page.

        • TABLE - A table that's detected on a document page. A table is grid-based information with two or more rows or columns, with a cell span of one row and one column each.

        • TABLE_TITLE - The title of a table. A title is typically a line of text above or below a table, or embedded as the first row of a table.

        • TABLE_FOOTER - The footer associated with a table. A footer is typically a line or lines of text below a table or embedded as the last row of a table.

        • CELL - A cell within a detected table. The cell is the parent of the block that contains the text in the cell.

        • MERGED_CELL - A cell in a table whose content spans more than one row or column. The Relationships array for this cell contain data from individual cells.

        • SELECTION_ELEMENT - A selection element such as an option button (radio button) or a check box that's detected on a document page. Use the value of SelectionStatus to determine the status of the selection element.

        • SIGNATURE - The location and confidence score of a signature detected on a document page. Can be returned as part of a Key-Value pair or a detected cell.

        • QUERY - A question asked during the call of AnalyzeDocument. Contains an alias and an ID that attaches it to its answer.

        • QUERY_RESULT - A response to a question asked during the call of analyze document. Comes with an alias and ID for ease of locating in a response. Also contains location and confidence score.

        The following BlockTypes are only returned for Amazon Textract Layout.

        • LAYOUT_TITLE - The main title of the document.

        • LAYOUT_HEADER - Text located in the top margin of the document.

        • LAYOUT_FOOTER - Text located in the bottom margin of the document.

        • LAYOUT_SECTION_HEADER - The titles of sections within a document.

        • LAYOUT_PAGE_NUMBER - The page number of the documents.

        • LAYOUT_LIST - Any information grouped together in list form.

        • LAYOUT_FIGURE - Indicates the location of an image in a document.

        • LAYOUT_TABLE - Indicates the location of a table in the document.

        • LAYOUT_KEY_VALUE - Indicates the location of form key-values in a document.

        • LAYOUT_TEXT - Text that is present typically as a part of paragraphs in documents.

        " + "documentation":"

        The type of text item that's recognized. In operations for text detection, the following types are returned:

        • PAGE - Contains a list of the LINE Block objects that are detected on a document page.

        • WORD - A word detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces.

        • LINE - A string of space-delimited, contiguous words that are detected on a document page.

        In text analysis operations, the following types are returned:

        • PAGE - Contains a list of child Block objects that are detected on a document page.

        • KEY_VALUE_SET - Stores the KEY and VALUE Block objects for linked text that's detected on a document page. Use the EntityType field to determine if a KEY_VALUE_SET object is a KEY Block object or a VALUE Block object.

        • WORD - A word that's detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces.

        • LINE - A string of tab-delimited, contiguous words that are detected on a document page.

        • TABLE - A table that's detected on a document page. A table is grid-based information with two or more rows or columns, with a cell span of one row and one column each.

        • TABLE_TITLE - The title of a table. A title is typically a line of text above or below a table, or embedded as the first row of a table.

        • TABLE_FOOTER - The footer associated with a table. A footer is typically a line or lines of text below a table or embedded as the last row of a table.

        • CELL - A cell within a detected table. The cell is the parent of the block that contains the text in the cell.

        • MERGED_CELL - A cell in a table whose content spans more than one row or column. The Relationships array for this cell contain data from individual cells.

        • SELECTION_ELEMENT - A selection element such as an option button (radio button) or a check box that's detected on a document page. Use the value of SelectionStatus to determine the status of the selection element.

        • SIGNATURE - The location and confidence score of a signature detected on a document page. Can be returned as part of a Key-Value pair or a detected cell.

        • QUERY - A question asked during the call of AnalyzeDocument. Contains an alias and an ID that attaches it to its answer.

        • QUERY_RESULT - A response to a question asked during the call of analyze document. Comes with an alias and ID for ease of locating in a response. Also contains location and confidence score.

        The following BlockTypes are only returned for Amazon Textract Layout.

        • LAYOUT_TITLE - The main title of the document.

        • LAYOUT_HEADER - Text located in the top margin of the document.

        • LAYOUT_FOOTER - Text located in the bottom margin of the document.

        • LAYOUT_SECTION_HEADER - The titles of sections within a document.

        • LAYOUT_PAGE_NUMBER - The page number of the documents.

        • LAYOUT_LIST - Any information grouped together in list form.

        • LAYOUT_FIGURE - Indicates the location of an image in a document.

        • LAYOUT_TABLE - Indicates the location of a table in the document.

        • LAYOUT_KEY_VALUE - Indicates the location of form key-values in a document.

        • LAYOUT_TEXT - Text that is present typically as a part of paragraphs in documents.

        " }, "Confidence":{ "shape":"Percent", @@ -981,8 +982,7 @@ }, "ConflictException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Updating or deleting a resource can cause an inconsistent state.

        ", "exception":true }, @@ -1099,8 +1099,7 @@ }, "DeleteAdapterResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteAdapterVersionRequest":{ "type":"structure", @@ -1121,8 +1120,7 @@ }, "DeleteAdapterVersionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DetectDocumentTextRequest":{ "type":"structure", @@ -1233,8 +1231,7 @@ }, "DocumentTooLargeException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The document can't be processed because it's too large. The maximum document size for synchronous operations 10 MB. The maximum document size for asynchronous operations is 500 MB for PDF files.

        ", "exception":true }, @@ -1441,6 +1438,10 @@ "Polygon":{ "shape":"Polygon", "documentation":"

        Within the bounding box, a fine-grained polygon around the recognized item.

        " + }, + "RotationAngle":{ + "shape":"Angle", + "documentation":"

        Provides a numerical value corresponding to the rotation of the text.

        " } }, "documentation":"

        Information about where the following items are located on a document page: detected page, text, key-value pairs, tables, table cells, and selection elements.

        " @@ -1884,8 +1885,7 @@ }, "IdempotentParameterMismatchException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        A ClientRequestToken input parameter was reused with an operation, but at least one of the other input parameters is different from the previous call to the operation.

        ", "exception":true }, @@ -1930,37 +1930,32 @@ }, "InternalServerError":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Amazon Textract experienced a service issue. Try your call again.

        ", "exception":true, "fault":true }, "InvalidJobIdException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An invalid job identifier was passed to an asynchronous analysis operation.

        ", "exception":true }, "InvalidKMSKeyException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Indicates you do not have decrypt permissions with the KMS key entered, or the KMS key was entered incorrectly.

        ", "exception":true }, "InvalidParameterException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An input parameter violated a constraint. For example, in synchronous operations, an InvalidParameterException exception occurs when neither of the S3Object or Bytes values are supplied in the Document request parameter. Validate your parameter before calling the API operation again.

        ", "exception":true }, "InvalidS3ObjectException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Amazon Textract is unable to access the S3 object that's specified in the request. for more information, Configure Access to Amazon S3 For troubleshooting information, see Troubleshooting Amazon S3

        ", "exception":true }, @@ -2085,8 +2080,7 @@ }, "LimitExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        An Amazon Textract service limit was exceeded. For example, if you start too many asynchronous jobs concurrently, calls to start operations (StartDocumentTextDetection, for example) raise a LimitExceededException exception (HTTP status code: 400) until the number of concurrently running jobs is below the Amazon Textract service limit.

        ", "exception":true }, @@ -2343,8 +2337,7 @@ }, "ProvisionedThroughputExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Textract.

        ", "exception":true }, @@ -2434,8 +2427,7 @@ }, "ResourceNotFoundException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Returned when an operation tried to access a nonexistent resource.

        ", "exception":true }, @@ -2460,7 +2452,7 @@ }, "Name":{ "shape":"S3ObjectName", - "documentation":"

        The file name of the input document. Synchronous operations can use image files that are in JPEG or PNG format. Asynchronous operations also support PDF and TIFF format files.

        " + "documentation":"

        The file name of the input document. Image files may be in PDF, TIFF, JPEG, or PNG format.

        " }, "Version":{ "shape":"S3ObjectVersion", @@ -2496,8 +2488,7 @@ }, "ServiceQuotaExceededException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Returned when a request cannot be completed as it would exceed a maximum service quota.

        ", "exception":true }, @@ -2737,8 +2728,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -2755,8 +2745,7 @@ }, "ThrottlingException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Amazon Textract is temporarily unable to process the request. Try your call again.

        ", "exception":true, "fault":true @@ -2785,8 +2774,7 @@ }, "UnsupportedDocumentException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The format of the input document isn't supported. Documents for operations can be in PNG, JPEG, PDF, or TIFF format.

        ", "exception":true }, @@ -2809,8 +2797,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateAdapterRequest":{ "type":"structure", @@ -2865,8 +2852,7 @@ }, "ValidationException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Indicates that a request was not valid. Check request for proper formatting.

        ", "exception":true }, diff --git a/services/timestreaminfluxdb/pom.xml b/services/timestreaminfluxdb/pom.xml index 1b72878d51fe..9ea6274edb4b 100644 --- a/services/timestreaminfluxdb/pom.xml +++ b/services/timestreaminfluxdb/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT timestreaminfluxdb AWS Java SDK :: Services :: Timestream Influx DB diff --git a/services/timestreaminfluxdb/src/main/resources/codegen-resources/customization.config b/services/timestreaminfluxdb/src/main/resources/codegen-resources/customization.config index 751610ceef5f..2c63c0851048 100644 --- a/services/timestreaminfluxdb/src/main/resources/codegen-resources/customization.config +++ b/services/timestreaminfluxdb/src/main/resources/codegen-resources/customization.config @@ -1,3 +1,2 @@ { - "enableFastUnmarshaller": true } diff --git a/services/timestreamquery/pom.xml b/services/timestreamquery/pom.xml index 52d82e2a5adb..fd07c1aac408 100644 --- a/services/timestreamquery/pom.xml +++ b/services/timestreamquery/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT timestreamquery AWS Java SDK :: Services :: Timestream Query diff --git a/services/timestreamwrite/pom.xml b/services/timestreamwrite/pom.xml index 960a448ffe8b..96ca13b625fe 100644 --- a/services/timestreamwrite/pom.xml +++ b/services/timestreamwrite/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT timestreamwrite AWS Java SDK :: Services :: Timestream Write diff --git a/services/timestreamwrite/src/main/resources/codegen-resources/customization.config b/services/timestreamwrite/src/main/resources/codegen-resources/customization.config index 82211e2a0977..bca9c08d17ec 100644 --- a/services/timestreamwrite/src/main/resources/codegen-resources/customization.config +++ b/services/timestreamwrite/src/main/resources/codegen-resources/customization.config @@ -1,5 +1,4 @@ { "allowEndpointOverrideForEndpointDiscoveryRequiredOperations": true, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/tnb/pom.xml b/services/tnb/pom.xml index 6f1e8f32d917..1e61fb31d77a 100644 --- a/services/tnb/pom.xml +++ b/services/tnb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT tnb AWS Java SDK :: Services :: Tnb diff --git a/services/tnb/src/main/resources/codegen-resources/customization.config b/services/tnb/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/tnb/src/main/resources/codegen-resources/customization.config +++ b/services/tnb/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/transcribe/pom.xml b/services/transcribe/pom.xml index 00bdc96e73ad..cf9461a694b5 100644 --- a/services/transcribe/pom.xml +++ b/services/transcribe/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT transcribe AWS Java SDK :: Services :: Transcribe diff --git a/services/transcribe/src/main/resources/codegen-resources/customization.config b/services/transcribe/src/main/resources/codegen-resources/customization.config index 2dd5910b2d52..7d4a6e0d6c5b 100644 --- a/services/transcribe/src/main/resources/codegen-resources/customization.config +++ b/services/transcribe/src/main/resources/codegen-resources/customization.config @@ -3,6 +3,5 @@ "verifiedSimpleMethods" : [ "listTranscriptionJobs", "listVocabularies" - ], - "enableFastUnmarshaller": true + ] } diff --git a/services/transcribe/src/main/resources/codegen-resources/service-2.json b/services/transcribe/src/main/resources/codegen-resources/service-2.json index 6d85ea6d12b2..750ccfa46970 100644 --- a/services/transcribe/src/main/resources/codegen-resources/service-2.json +++ b/services/transcribe/src/main/resources/codegen-resources/service-2.json @@ -1015,7 +1015,7 @@ "members":{ "NoteTemplate":{ "shape":"MedicalScribeNoteTemplate", - "documentation":"

        Specify one of the following templates to use for the clinical note summary. The default is HISTORY_AND_PHYSICAL.

        • HISTORY_AND_PHYSICAL: Provides summaries for key sections of the clinical documentation. Examples of sections include Chief Complaint, History of Present Illness, Review of Systems, Past Medical History, Assessment, and Plan.

        • GIRPP: Provides summaries based on the patients progress toward goals. Examples of sections include Goal, Intervention, Response, Progress, and Plan.

        " + "documentation":"

        Specify one of the following templates to use for the clinical note summary. The default is HISTORY_AND_PHYSICAL.

        • HISTORY_AND_PHYSICAL: Provides summaries for key sections of the clinical documentation. Examples of sections include Chief Complaint, History of Present Illness, Review of Systems, Past Medical History, Assessment, and Plan.

        • GIRPP: Provides summaries based on the patients progress toward goals. Examples of sections include Goal, Intervention, Response, Progress, and Plan.

        • BIRP: Focuses on the patient's behavioral patterns and responses. Examples of sections include Behavior, Intervention, Response, and Plan.

        • SIRP: Emphasizes the situational context of therapy. Examples of sections include Situation, Intervention, Response, and Plan.

        • DAP: Provides a simplified format for clinical documentation. Examples of sections include Data, Assessment, and Plan.

        • BEHAVIORAL_SOAP: Behavioral health focused documentation format. Examples of sections include Subjective, Objective, Assessment, and Plan.

        • PHYSICAL_SOAP: Physical health focused documentation format. Examples of sections include Subjective, Objective, Assessment, and Plan.

        " } }, "documentation":"

        The output configuration for clinical note generation.

        " @@ -1318,8 +1318,7 @@ }, "DeleteCallAnalyticsCategoryResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteCallAnalyticsJobRequest":{ "type":"structure", @@ -1333,8 +1332,7 @@ }, "DeleteCallAnalyticsJobResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLanguageModelRequest":{ "type":"structure", @@ -1779,6 +1777,7 @@ "cs-CZ", "cy-WL", "el-GR", + "et-EE", "et-ET", "eu-ES", "fi-FI", @@ -2466,7 +2465,12 @@ "type":"string", "enum":[ "HISTORY_AND_PHYSICAL", - "GIRPP" + "GIRPP", + "BIRP", + "SIRP", + "DAP", + "BEHAVIORAL_SOAP", + "PHYSICAL_SOAP" ] }, "MedicalScribeOutput":{ @@ -3367,8 +3371,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -3675,8 +3678,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateCallAnalyticsCategoryRequest":{ "type":"structure", diff --git a/services/transcribestreaming/pom.xml b/services/transcribestreaming/pom.xml index e84951b04180..60d910094999 100644 --- a/services/transcribestreaming/pom.xml +++ b/services/transcribestreaming/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT transcribestreaming AWS Java SDK :: Services :: AWS Transcribe Streaming diff --git a/services/transcribestreaming/src/main/resources/codegen-resources/customization.config b/services/transcribestreaming/src/main/resources/codegen-resources/customization.config index 9f59866f0821..93624870594f 100644 --- a/services/transcribestreaming/src/main/resources/codegen-resources/customization.config +++ b/services/transcribestreaming/src/main/resources/codegen-resources/customization.config @@ -7,6 +7,5 @@ "AudioStream": ["AudioEvent"], "MedicalTranscriptResultStream": ["TranscriptEvent"] }, - "usePriorKnowledgeForH2": true, - "enableFastUnmarshaller": true + "usePriorKnowledgeForH2": true } diff --git a/services/transcribestreaming/src/main/resources/codegen-resources/paginators-1.json b/services/transcribestreaming/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..ea142457a6a7 100644 --- a/services/transcribestreaming/src/main/resources/codegen-resources/paginators-1.json +++ b/services/transcribestreaming/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,3 @@ { - "pagination": { - } + "pagination": {} } diff --git a/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json b/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json index 551077b31ef1..6a57f1ac5512 100644 --- a/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json +++ b/services/transcribestreaming/src/main/resources/codegen-resources/service-2.json @@ -361,7 +361,7 @@ }, "NoteTemplate":{ "shape":"MedicalScribeNoteTemplate", - "documentation":"

        Specify one of the following templates to use for the clinical note summary. The default is HISTORY_AND_PHYSICAL.

        • HISTORY_AND_PHYSICAL: Provides summaries for key sections of the clinical documentation. Sections include Chief Complaint, History of Present Illness, Review of Systems, Past Medical History, Assessment, and Plan.

        • GIRPP: Provides summaries based on the patients progress toward goals. Sections include Goal, Intervention, Response, Progress, and Plan.

        " + "documentation":"

        Specify one of the following templates to use for the clinical note summary. The default is HISTORY_AND_PHYSICAL.

        • HISTORY_AND_PHYSICAL: Provides summaries for key sections of the clinical documentation. Examples of sections include Chief Complaint, History of Present Illness, Review of Systems, Past Medical History, Assessment, and Plan.

        • GIRPP: Provides summaries based on the patients progress toward goals. Examples of sections include Goal, Intervention, Response, Progress, and Plan.

        • BIRP: Focuses on the patient's behavioral patterns and responses. Examples of sections include Behavior, Intervention, Response, and Plan.

        • SIRP: Emphasizes the situational context of therapy. Examples of sections include Situation, Intervention, Response, and Plan.

        • DAP: Provides a simplified format for clinical documentation. Examples of sections include Data, Assessment, and Plan.

        • BEHAVIORAL_SOAP: Behavioral health focused documentation format. Examples of sections include Subjective, Objective, Assessment, and Plan.

        • PHYSICAL_SOAP: Physical health focused documentation format. Examples of sections include Subjective, Objective, Assessment, and Plan.

        " } }, "documentation":"

        The output configuration for aggregated transcript and clinical note generation.

        " @@ -421,11 +421,11 @@ "members":{ "StartTime":{ "shape":"Double", - "documentation":"

        The start time, in milliseconds, of the utterance that was identified as PII.

        " + "documentation":"

        The start time of the utterance that was identified as PII in seconds, with millisecond precision (e.g., 1.056)

        " }, "EndTime":{ "shape":"Double", - "documentation":"

        The end time, in milliseconds, of the utterance that was identified as PII.

        " + "documentation":"

        The end time of the utterance that was identified as PII in seconds, with millisecond precision (e.g., 1.056)

        " }, "Category":{ "shape":"String", @@ -507,11 +507,11 @@ "members":{ "StartTime":{ "shape":"Double", - "documentation":"

        The start time, in milliseconds, of the transcribed item.

        " + "documentation":"

        The start time of the transcribed item in seconds, with millisecond precision (e.g., 1.056)

        " }, "EndTime":{ "shape":"Double", - "documentation":"

        The end time, in milliseconds, of the transcribed item.

        " + "documentation":"

        The end time of the transcribed item in seconds, with millisecond precision (e.g., 1.056)

        " }, "Type":{ "shape":"ItemType", @@ -706,11 +706,11 @@ "members":{ "StartTime":{ "shape":"Double", - "documentation":"

        The start time, in milliseconds, of the utterance that was identified as PHI.

        " + "documentation":"

        The start time, in seconds, of the utterance that was identified as PHI.

        " }, "EndTime":{ "shape":"Double", - "documentation":"

        The end time, in milliseconds, of the utterance that was identified as PHI.

        " + "documentation":"

        The end time, in seconds, of the utterance that was identified as PHI.

        " }, "Category":{ "shape":"String", @@ -736,11 +736,11 @@ "members":{ "StartTime":{ "shape":"Double", - "documentation":"

        The start time, in milliseconds, of the transcribed item.

        " + "documentation":"

        The start time, in seconds, of the transcribed item.

        " }, "EndTime":{ "shape":"Double", - "documentation":"

        The end time, in milliseconds, of the transcribed item.

        " + "documentation":"

        The end time, in seconds, of the transcribed item.

        " }, "Type":{ "shape":"ItemType", @@ -774,11 +774,11 @@ }, "StartTime":{ "shape":"Double", - "documentation":"

        The start time, in milliseconds, of the Result.

        " + "documentation":"

        The start time, in seconds, of the Result.

        " }, "EndTime":{ "shape":"Double", - "documentation":"

        The end time, in milliseconds, of the Result.

        " + "documentation":"

        The end time, in seconds, of the Result.

        " }, "IsPartial":{ "shape":"Boolean", @@ -932,7 +932,12 @@ "type":"string", "enum":[ "HISTORY_AND_PHYSICAL", - "GIRPP" + "GIRPP", + "DAP", + "SIRP", + "BIRP", + "BEHAVIORAL_SOAP", + "PHYSICAL_SOAP" ] }, "MedicalScribeParticipantRole":{ @@ -1297,11 +1302,11 @@ }, "StartTime":{ "shape":"Double", - "documentation":"

        The start time, in milliseconds, of the Result.

        " + "documentation":"

        The start time of the Result in seconds, with millisecond precision (e.g., 1.056).

        " }, "EndTime":{ "shape":"Double", - "documentation":"

        The end time, in milliseconds, of the Result.

        " + "documentation":"

        The end time of the Result in seconds, with millisecond precision (e.g., 1.056).

        " }, "IsPartial":{ "shape":"Boolean", diff --git a/services/transfer/pom.xml b/services/transfer/pom.xml index ad47f1114d4a..705ba007622b 100644 --- a/services/transfer/pom.xml +++ b/services/transfer/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT transfer AWS Java SDK :: Services :: Transfer diff --git a/services/transfer/src/main/resources/codegen-resources/customization.config b/services/transfer/src/main/resources/codegen-resources/customization.config index ca33f8c0427b..74e0cba3a8db 100644 --- a/services/transfer/src/main/resources/codegen-resources/customization.config +++ b/services/transfer/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listServers" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/transfer/src/main/resources/codegen-resources/service-2.json b/services/transfer/src/main/resources/codegen-resources/service-2.json index 9472ae7d1373..b414a38a6213 100644 --- a/services/transfer/src/main/resources/codegen-resources/service-2.json +++ b/services/transfer/src/main/resources/codegen-resources/service-2.json @@ -1464,7 +1464,7 @@ "members":{ "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.

        " + "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        You can use the HomeDirectory parameter for HomeDirectoryType when it is set to either PATH or LOGICAL.

        " }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", @@ -1727,6 +1727,10 @@ "S3StorageOptions":{ "shape":"S3StorageOptions", "documentation":"

        Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.

        By default, home directory mappings have a TYPE of DIRECTORY. If you enable this option, you would then need to explicitly set the HomeDirectoryMapEntry Type to FILE if you want a mapping to have a file target.

        " + }, + "IpAddressType":{ + "shape":"IpAddressType", + "documentation":"

        Specifies whether to use IPv4 only, or to use dual-stack (IPv4 and IPv6) for your Transfer Family endpoint. The default value is IPV4.

        The IpAddressType parameter has the following limitations:

        • It cannot be changed while the server is online. You must stop the server before modifying this parameter.

        • It cannot be updated to DUALSTACK if the server has AddressAllocationIds specified.

        When using DUALSTACK as the IpAddressType, you cannot set the AddressAllocationIds parameter for the EndpointDetails for the server.

        " } } }, @@ -1750,7 +1754,7 @@ "members":{ "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.

        " + "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        You can use the HomeDirectory parameter for HomeDirectoryType when it is set to either PATH or LOGICAL.

        " }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", @@ -2481,7 +2485,7 @@ "members":{ "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.

        " + "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        You can use the HomeDirectory parameter for HomeDirectoryType when it is set to either PATH or LOGICAL.

        " }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", @@ -2929,6 +2933,10 @@ "As2ServiceManagedEgressIpAddresses":{ "shape":"ServiceManagedEgressIpAddresses", "documentation":"

        The list of egress IP addresses of this server. These IP addresses are only relevant for servers that use the AS2 protocol. They are used for sending asynchronous MDNs.

        These IP addresses are assigned automatically when you create an AS2 server. Additionally, if you update an existing server and add the AS2 protocol, static IP addresses are assigned as well.

        " + }, + "IpAddressType":{ + "shape":"IpAddressType", + "documentation":"

        Specifies whether to use IPv4 only, or to use dual-stack (IPv4 and IPv6) for your Transfer Family endpoint. The default value is IPV4.

        The IpAddressType parameter has the following limitations:

        • It cannot be changed while the server is online. You must stop the server before modifying this parameter.

        • It cannot be updated to DUALSTACK if the server has AddressAllocationIds specified.

        When using DUALSTACK as the IpAddressType, you cannot set the AddressAllocationIds parameter for the EndpointDetails for the server.

        " } }, "documentation":"

        Describes the properties of a file transfer protocol-enabled server that was specified.

        " @@ -2943,7 +2951,7 @@ }, "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.

        " + "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        You can use the HomeDirectory parameter for HomeDirectoryType when it is set to either PATH or LOGICAL.

        " }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", @@ -3166,7 +3174,7 @@ "members":{ "AddressAllocationIds":{ "shape":"AddressAllocationIds", - "documentation":"

        A list of address allocation IDs that are required to attach an Elastic IP address to your server's endpoint.

        An address allocation ID corresponds to the allocation ID of an Elastic IP address. This value can be retrieved from the allocationId field from the Amazon EC2 Address data type. One way to retrieve this value is by calling the EC2 DescribeAddresses API.

        This parameter is optional. Set this parameter if you want to make your VPC endpoint public-facing. For details, see Create an internet-facing endpoint for your server.

        This property can only be set as follows:

        • EndpointType must be set to VPC

        • The Transfer Family server must be offline.

        • You cannot set this parameter for Transfer Family servers that use the FTP protocol.

        • The server must already have SubnetIds populated (SubnetIds and AddressAllocationIds cannot be updated simultaneously).

        • AddressAllocationIds can't contain duplicates, and must be equal in length to SubnetIds. For example, if you have three subnet IDs, you must also specify three address allocation IDs.

        • Call the UpdateServer API to set or change this parameter.

        " + "documentation":"

        A list of address allocation IDs that are required to attach an Elastic IP address to your server's endpoint.

        An address allocation ID corresponds to the allocation ID of an Elastic IP address. This value can be retrieved from the allocationId field from the Amazon EC2 Address data type. One way to retrieve this value is by calling the EC2 DescribeAddresses API.

        This parameter is optional. Set this parameter if you want to make your VPC endpoint public-facing. For details, see Create an internet-facing endpoint for your server.

        This property can only be set as follows:

        • EndpointType must be set to VPC

        • The Transfer Family server must be offline.

        • You cannot set this parameter for Transfer Family servers that use the FTP protocol.

        • The server must already have SubnetIds populated (SubnetIds and AddressAllocationIds cannot be updated simultaneously).

        • AddressAllocationIds can't contain duplicates, and must be equal in length to SubnetIds. For example, if you have three subnet IDs, you must also specify three address allocation IDs.

        • Call the UpdateServer API to set or change this parameter.

        • You can't set address allocation IDs for servers that have an IpAddressType set to DUALSTACK You can only set this property if IpAddressType is set to IPV4.

        " }, "SubnetIds":{ "shape":"SubnetIds", @@ -3633,6 +3641,13 @@ "documentation":"

        This exception is thrown when the client submits a malformed request.

        ", "exception":true }, + "IpAddressType":{ + "type":"string", + "enum":[ + "IPV4", + "DUALSTACK" + ] + }, "ListAccessesRequest":{ "type":"structure", "required":["ServerId"], @@ -4093,7 +4108,7 @@ "members":{ "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.

        " + "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        You can use the HomeDirectory parameter for HomeDirectoryType when it is set to either PATH or LOGICAL.

        " }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", @@ -4356,7 +4371,7 @@ }, "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.

        " + "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        You can use the HomeDirectory parameter for HomeDirectoryType when it is set to either PATH or LOGICAL.

        " }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", @@ -5044,7 +5059,7 @@ "type":"string", "max":32, "min":0, - "pattern":"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}" + "pattern":"[0-9a-fA-F\\.\\:]+" }, "SshPublicKey":{ "type":"structure", @@ -5496,7 +5511,7 @@ "members":{ "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.

        " + "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        You can use the HomeDirectory parameter for HomeDirectoryType when it is set to either PATH or LOGICAL.

        " }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", @@ -5807,6 +5822,10 @@ "S3StorageOptions":{ "shape":"S3StorageOptions", "documentation":"

        Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.

        By default, home directory mappings have a TYPE of DIRECTORY. If you enable this option, you would then need to explicitly set the HomeDirectoryMapEntry Type to FILE if you want a mapping to have a file target.

        " + }, + "IpAddressType":{ + "shape":"IpAddressType", + "documentation":"

        Specifies whether to use IPv4 only, or to use dual-stack (IPv4 and IPv6) for your Transfer Family endpoint. The default value is IPV4.

        The IpAddressType parameter has the following limitations:

        • It cannot be changed while the server is online. You must stop the server before modifying this parameter.

        • It cannot be updated to DUALSTACK if the server has AddressAllocationIds specified.

        When using DUALSTACK as the IpAddressType, you cannot set the AddressAllocationIds parameter for the EndpointDetails for the server.

        " } } }, @@ -5829,7 +5848,7 @@ "members":{ "HomeDirectory":{ "shape":"HomeDirectory", - "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.

        " + "documentation":"

        The landing directory (folder) for a user when they log in to the server using the client.

        A HomeDirectory example is /bucket_name/home/mydirectory.

        You can use the HomeDirectory parameter for HomeDirectoryType when it is set to either PATH or LOGICAL.

        " }, "HomeDirectoryType":{ "shape":"HomeDirectoryType", diff --git a/services/translate/pom.xml b/services/translate/pom.xml index b60f252885e3..7ff89f7185a9 100644 --- a/services/translate/pom.xml +++ b/services/translate/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 translate diff --git a/services/translate/src/main/resources/codegen-resources/customization.config b/services/translate/src/main/resources/codegen-resources/customization.config index c0db422a83ce..f927886f4077 100644 --- a/services/translate/src/main/resources/codegen-resources/customization.config +++ b/services/translate/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listTerminologies" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/trustedadvisor/pom.xml b/services/trustedadvisor/pom.xml index a071ece7359e..98c8466deb88 100644 --- a/services/trustedadvisor/pom.xml +++ b/services/trustedadvisor/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT trustedadvisor AWS Java SDK :: Services :: Trusted Advisor diff --git a/services/trustedadvisor/src/main/resources/codegen-resources/customization.config b/services/trustedadvisor/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/trustedadvisor/src/main/resources/codegen-resources/customization.config +++ b/services/trustedadvisor/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/verifiedpermissions/pom.xml b/services/verifiedpermissions/pom.xml index c94dda40bc37..25c9351515b0 100644 --- a/services/verifiedpermissions/pom.xml +++ b/services/verifiedpermissions/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT verifiedpermissions AWS Java SDK :: Services :: Verified Permissions diff --git a/services/verifiedpermissions/src/main/resources/codegen-resources/customization.config b/services/verifiedpermissions/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/verifiedpermissions/src/main/resources/codegen-resources/customization.config +++ b/services/verifiedpermissions/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/voiceid/pom.xml b/services/voiceid/pom.xml index 65c40aae39a0..bda59fc6c92b 100644 --- a/services/voiceid/pom.xml +++ b/services/voiceid/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT voiceid AWS Java SDK :: Services :: Voice ID diff --git a/services/voiceid/src/main/resources/codegen-resources/customization.config b/services/voiceid/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/voiceid/src/main/resources/codegen-resources/customization.config +++ b/services/voiceid/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/vpclattice/pom.xml b/services/vpclattice/pom.xml index 80b3f6b6865a..02be9b1e1a83 100644 --- a/services/vpclattice/pom.xml +++ b/services/vpclattice/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT vpclattice AWS Java SDK :: Services :: VPC Lattice diff --git a/services/vpclattice/src/main/resources/codegen-resources/customization.config b/services/vpclattice/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/vpclattice/src/main/resources/codegen-resources/customization.config +++ b/services/vpclattice/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/waf/pom.xml b/services/waf/pom.xml index 6efd6fd694bc..14080f133c74 100644 --- a/services/waf/pom.xml +++ b/services/waf/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT waf AWS Java SDK :: Services :: AWS WAF diff --git a/services/waf/src/main/resources/codegen-resources/waf/customization.config b/services/waf/src/main/resources/codegen-resources/waf/customization.config index e19a7e6f3f44..23503733d437 100644 --- a/services/waf/src/main/resources/codegen-resources/waf/customization.config +++ b/services/waf/src/main/resources/codegen-resources/waf/customization.config @@ -18,6 +18,5 @@ "listSubscribedRuleGroups", "listWebACLs", "listXssMatchSets" - ], - "enableFastUnmarshaller": true + ] } diff --git a/services/waf/src/main/resources/codegen-resources/wafregional/customization.config b/services/waf/src/main/resources/codegen-resources/wafregional/customization.config index c58308f678b2..e8d7cb991668 100644 --- a/services/waf/src/main/resources/codegen-resources/wafregional/customization.config +++ b/services/waf/src/main/resources/codegen-resources/wafregional/customization.config @@ -25,6 +25,5 @@ "listSubscribedRuleGroups", "listWebACLs", "listXssMatchSets" - ], - "enableFastUnmarshaller": true + ] } diff --git a/services/wafv2/pom.xml b/services/wafv2/pom.xml index 4c203fa23251..1b2eb29e49a4 100644 --- a/services/wafv2/pom.xml +++ b/services/wafv2/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT wafv2 AWS Java SDK :: Services :: WAFV2 diff --git a/services/wafv2/src/main/resources/codegen-resources/customization.config b/services/wafv2/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/wafv2/src/main/resources/codegen-resources/customization.config +++ b/services/wafv2/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/wafv2/src/main/resources/codegen-resources/paginators-1.json b/services/wafv2/src/main/resources/codegen-resources/paginators-1.json index 5677bd8e4a2d..ea142457a6a7 100644 --- a/services/wafv2/src/main/resources/codegen-resources/paginators-1.json +++ b/services/wafv2/src/main/resources/codegen-resources/paginators-1.json @@ -1,4 +1,3 @@ { - "pagination": { - } + "pagination": {} } diff --git a/services/wafv2/src/main/resources/codegen-resources/service-2.json b/services/wafv2/src/main/resources/codegen-resources/service-2.json index 813353a9b906..cfe9f24360a4 100644 --- a/services/wafv2/src/main/resources/codegen-resources/service-2.json +++ b/services/wafv2/src/main/resources/codegen-resources/service-2.json @@ -997,6 +997,11 @@ "type":"integer", "min":0 }, + "ASN":{ + "type":"long", + "max":4294967295, + "min":0 + }, "AWSManagedRulesACFPRuleSet":{ "type":"structure", "required":[ @@ -1026,7 +1031,7 @@ "documentation":"

        Allow the use of regular expressions in the registration page path and the account creation path.

        " } }, - "documentation":"

        Details for your use of the account creation fraud prevention managed rule group, AWSManagedRulesACFPRuleSet. This configuration is used in ManagedRuleGroupConfig.

        " + "documentation":"

        Details for your use of the account creation fraud prevention managed rule group, AWSManagedRulesACFPRuleSet. This configuration is used in ManagedRuleGroupConfig.

        For additional information about this and the other intelligent threat mitigation rule groups, see Intelligent threat mitigation in WAF and Amazon Web Services Managed Rules rule groups list in the WAF Developer Guide.

        " }, "AWSManagedRulesATPRuleSet":{ "type":"structure", @@ -1049,7 +1054,22 @@ "documentation":"

        Allow the use of regular expressions in the login page path.

        " } }, - "documentation":"

        Details for your use of the account takeover prevention managed rule group, AWSManagedRulesATPRuleSet. This configuration is used in ManagedRuleGroupConfig.

        " + "documentation":"

        Details for your use of the account takeover prevention managed rule group, AWSManagedRulesATPRuleSet. This configuration is used in ManagedRuleGroupConfig.

        For additional information about this and the other intelligent threat mitigation rule groups, see Intelligent threat mitigation in WAF and Amazon Web Services Managed Rules rule groups list in the WAF Developer Guide.

        " + }, + "AWSManagedRulesAntiDDoSRuleSet":{ + "type":"structure", + "required":["ClientSideActionConfig"], + "members":{ + "ClientSideActionConfig":{ + "shape":"ClientSideActionConfig", + "documentation":"

        Configures the request handling that's applied by the managed rule group rules ChallengeAllDuringEvent and ChallengeDDoSRequests during a distributed denial of service (DDoS) attack.

        " + }, + "SensitivityToBlock":{ + "shape":"SensitivityToAct", + "documentation":"

        The sensitivity that the rule group rule DDoSRequests uses when matching against the DDoS suspicion labeling on a request. The managed rule group adds the labeling during DDoS events, before the DDoSRequests rule runs.

        The higher the sensitivity, the more levels of labeling that the rule matches:

        • Low sensitivity is less sensitive, causing the rule to match only on the most likely participants in an attack, which are the requests with the high suspicion label awswaf:managed:aws:anti-ddos:high-suspicion-ddos-request.

        • Medium sensitivity causes the rule to match on the medium and high suspicion labels.

        • High sensitivity causes the rule to match on all of the suspicion labels: low, medium, and high.

        Default: LOW

        " + } + }, + "documentation":"

        Configures the use of the anti-DDoS managed rule group, AWSManagedRulesAntiDDoSRuleSet. This configuration is used in ManagedRuleGroupConfig.

        The configuration that you provide here determines whether and how the rules in the rule group are used.

        For additional information about this and the other intelligent threat mitigation rule groups, see Intelligent threat mitigation in WAF and Amazon Web Services Managed Rules rule groups list in the WAF Developer Guide.

        " }, "AWSManagedRulesBotControlRuleSet":{ "type":"structure", @@ -1064,7 +1084,7 @@ "documentation":"

        Applies only to the targeted inspection level.

        Determines whether to use machine learning (ML) to analyze your web traffic for bot-related activity. Machine learning is required for the Bot Control rules TGT_ML_CoordinatedActivityLow and TGT_ML_CoordinatedActivityMedium, which inspect for anomalous behavior that might indicate distributed, coordinated bot activity.

        For more information about this choice, see the listing for these rules in the table at Bot Control rules listing in the WAF Developer Guide.

        Default: TRUE

        " } }, - "documentation":"

        Details for your use of the Bot Control managed rule group, AWSManagedRulesBotControlRuleSet. This configuration is used in ManagedRuleGroupConfig.

        " + "documentation":"

        Details for your use of the Bot Control managed rule group, AWSManagedRulesBotControlRuleSet. This configuration is used in ManagedRuleGroupConfig.

        For additional information about this and the other intelligent threat mitigation rule groups, see Intelligent threat mitigation in WAF and Amazon Web Services Managed Rules rule groups list in the WAF Developer Guide.

        " }, "Action":{"type":"string"}, "ActionCondition":{ @@ -1106,14 +1126,12 @@ }, "All":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Inspect all of the elements that WAF has parsed and extracted from the web request component that you've identified in your FieldToMatch specifications.

        This is used in the FieldToMatch specification for some web request component types.

        JSON specification: \"All\": {}

        " }, "AllQueryArguments":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Inspect all query arguments of the web request.

        This is used in the FieldToMatch specification for some web request component types.

        JSON specification: \"AllQueryArguments\": {}

        " }, "AllowAction":{ @@ -1137,6 +1155,57 @@ }, "documentation":"

        A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement.

        " }, + "ApplicationAttribute":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"AttributeName", + "documentation":"

        Specifies the attribute name.

        " + }, + "Values":{ + "shape":"AttributeValues", + "documentation":"

        Specifies the attribute value.

        " + } + }, + "documentation":"

        Application details defined during the web ACL creation process. Application attributes help WAF give recommendations for protection packs.

        " + }, + "ApplicationAttributes":{ + "type":"list", + "member":{"shape":"ApplicationAttribute"}, + "max":10, + "min":1 + }, + "ApplicationConfig":{ + "type":"structure", + "members":{ + "Attributes":{ + "shape":"ApplicationAttributes", + "documentation":"

        Contains the attribute name and a list of values for that attribute.

        " + } + }, + "documentation":"

        A list of ApplicationAttributes that contains information about the application.

        " + }, + "AsnList":{ + "type":"list", + "member":{"shape":"ASN"}, + "max":100, + "min":1 + }, + "AsnMatchStatement":{ + "type":"structure", + "required":["AsnList"], + "members":{ + "AsnList":{ + "shape":"AsnList", + "documentation":"

        Contains one or more Autonomous System Numbers (ASNs). ASNs are unique identifiers assigned to large internet networks managed by organizations such as internet service providers, enterprises, universities, or government agencies.

        " + }, + "ForwardedIPConfig":{ + "shape":"ForwardedIPConfig", + "documentation":"

        The configuration for inspecting IP addresses to match against an ASN in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

        " + } + }, + "documentation":"

        A rule statement that inspects web traffic based on the Autonomous System Number (ASN) associated with the request's IP address.

        For additional details, see ASN match rule statement in the WAF Developer Guide.

        " + }, "AssociateWebACLRequest":{ "type":"structure", "required":[ @@ -1156,8 +1225,7 @@ }, "AssociateWebACLResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "AssociatedResourceType":{ "type":"string", @@ -1179,6 +1247,23 @@ }, "documentation":"

        Specifies custom configurations for the associations between the web ACL and protected resources.

        Use this to customize the maximum size of the request body that your protected resources forward to WAF for inspection. You can customize this setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resources. The default setting is 16 KB (16,384 bytes).

        You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

        For Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).

        " }, + "AttributeName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[\\w\\-]+$" + }, + "AttributeValue":{ + "type":"string", + "max":64, + "min":1 + }, + "AttributeValues":{ + "type":"list", + "member":{"shape":"AttributeValue"}, + "max":10, + "min":1 + }, "BlockAction":{ "type":"structure", "members":{ @@ -1342,6 +1427,36 @@ } } }, + "ClientSideAction":{ + "type":"structure", + "required":["UsageOfAction"], + "members":{ + "UsageOfAction":{ + "shape":"UsageOfAction", + "documentation":"

        Determines whether to use the AWSManagedRulesAntiDDoSRuleSet rules ChallengeAllDuringEvent and ChallengeDDoSRequests in the rule group evaluation and the related label awswaf:managed:aws:anti-ddos:challengeable-request.

        • If usage is enabled:

          • The managed rule group adds the label awswaf:managed:aws:anti-ddos:challengeable-request to any web request whose URL does NOT match the regular expressions provided in the ClientSideAction setting ExemptUriRegularExpressions.

          • The two rules are evaluated against web requests for protected resources that are experiencing a DDoS attack. The two rules only apply their action to matching requests that have the label awswaf:managed:aws:anti-ddos:challengeable-request.

        • If usage is disabled:

          • The managed rule group doesn't add the label awswaf:managed:aws:anti-ddos:challengeable-request to any web requests.

          • The two rules are not evaluated.

          • None of the other ClientSideAction settings have any effect.

        This setting only enables or disables the use of the two anti-DDOS rules ChallengeAllDuringEvent and ChallengeDDoSRequests in the anti-DDoS managed rule group.

        This setting doesn't alter the action setting in the two rules. To override the actions used by the rules ChallengeAllDuringEvent and ChallengeDDoSRequests, enable this setting, and then override the rule actions in the usual way, in your managed rule group configuration.

        " + }, + "Sensitivity":{ + "shape":"SensitivityToAct", + "documentation":"

        The sensitivity that the rule group rule ChallengeDDoSRequests uses when matching against the DDoS suspicion labeling on a request. The managed rule group adds the labeling during DDoS events, before the ChallengeDDoSRequests rule runs.

        The higher the sensitivity, the more levels of labeling that the rule matches:

        • Low sensitivity is less sensitive, causing the rule to match only on the most likely participants in an attack, which are the requests with the high suspicion label awswaf:managed:aws:anti-ddos:high-suspicion-ddos-request.

        • Medium sensitivity causes the rule to match on the medium and high suspicion labels.

        • High sensitivity causes the rule to match on all of the suspicion labels: low, medium, and high.

        Default: HIGH

        " + }, + "ExemptUriRegularExpressions":{ + "shape":"RegularExpressionList", + "documentation":"

        The regular expression to match against the web request URI, used to identify requests that can't handle a silent browser challenge. When the ClientSideAction setting UsageOfAction is enabled, the managed rule group uses this setting to determine which requests to label with awswaf:managed:aws:anti-ddos:challengeable-request. If UsageOfAction is disabled, this setting has no effect and the managed rule group doesn't add the label to any requests.

        The anti-DDoS managed rule group doesn't evaluate the rules ChallengeDDoSRequests or ChallengeAllDuringEvent for web requests whose URIs match this regex. This is true regardless of whether you override the rule action for either of the rules in your web ACL configuration.

        Amazon Web Services recommends using a regular expression.

        This setting is required if UsageOfAction is set to ENABLED. If required, you can provide between 1 and 5 regex objects in the array of settings.

        Amazon Web Services recommends starting with the following setting. Review and update it for your application's needs:

        \\/api\\/|\\.(acc|avi|css|gif|jpe?g|js|mp[34]|ogg|otf|pdf|png|tiff?|ttf|webm|webp|woff2?)$

        " + } + }, + "documentation":"

        This is part of the AWSManagedRulesAntiDDoSRuleSet ClientSideActionConfig configuration in ManagedRuleGroupConfig.

        " + }, + "ClientSideActionConfig":{ + "type":"structure", + "required":["Challenge"], + "members":{ + "Challenge":{ + "shape":"ClientSideAction", + "documentation":"

        Configuration for the use of the AWSManagedRulesAntiDDoSRuleSet rules ChallengeAllDuringEvent and ChallengeDDoSRequests.

        This setting isn't related to the configuration of the Challenge action itself. It only configures the use of the two anti-DDoS rules named here.

        You can enable or disable the use of these rules, and you can configure how to use them when they are enabled.

        " + } + }, + "documentation":"

        This is part of the configuration for the managed rules AWSManagedRulesAntiDDoSRuleSet in ManagedRuleGroupConfig.

        " + }, "ComparisonOperator":{ "type":"string", "enum":[ @@ -1915,6 +2030,14 @@ "AssociationConfig":{ "shape":"AssociationConfig", "documentation":"

        Specifies custom configurations for the associations between the web ACL and protected resources.

        Use this to customize the maximum size of the request body that your protected resources forward to WAF for inspection. You can customize this setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resources. The default setting is 16 KB (16,384 bytes).

        You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

        For Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).

        " + }, + "OnSourceDDoSProtectionConfig":{ + "shape":"OnSourceDDoSProtectionConfig", + "documentation":"

        Specifies the type of DDoS protection to apply to web request data for a web ACL. For most scenarios, it is recommended to use the default protection level, ACTIVE_UNDER_DDOS. If a web ACL is associated with multiple Application Load Balancers, the changes you make to DDoS protection in that web ACL will apply to all associated Application Load Balancers.

        " + }, + "ApplicationConfig":{ + "shape":"ApplicationConfig", + "documentation":"

        Configures the ability for the WAF console to store and retrieve application attributes during the web ACL creation process. Application attributes help WAF give recommendations for protection packs.

        " } } }, @@ -2105,8 +2228,7 @@ }, "DeleteAPIKeyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteFirewallManagerRuleGroupsRequest":{ "type":"structure", @@ -2163,8 +2285,7 @@ }, "DeleteIPSetResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLoggingConfigurationRequest":{ "type":"structure", @@ -2186,8 +2307,7 @@ }, "DeleteLoggingConfigurationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeletePermissionPolicyRequest":{ "type":"structure", @@ -2201,8 +2321,7 @@ }, "DeletePermissionPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteRegexPatternSetRequest":{ "type":"structure", @@ -2233,8 +2352,7 @@ }, "DeleteRegexPatternSetResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteRuleGroupRequest":{ "type":"structure", @@ -2265,8 +2383,7 @@ }, "DeleteRuleGroupResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteWebACLRequest":{ "type":"structure", @@ -2297,8 +2414,7 @@ }, "DeleteWebACLResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeAllManagedProductsRequest":{ "type":"structure", @@ -2416,8 +2532,7 @@ }, "DisassociateWebACLResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DownloadUrl":{"type":"string"}, "EmailField":{ @@ -2715,7 +2830,7 @@ "documentation":"

        The match status to assign to the web request if the request doesn't have a valid IP address in the specified position.

        If the specified header isn't present in the request, WAF doesn't apply the rule to the web request at all.

        You can specify the following fallback behaviors:

        • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

        • NO_MATCH - Treat the web request as not matching the rule statement.

        " } }, - "documentation":"

        The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

        If the specified header isn't present in the request, WAF doesn't apply the rule to the web request at all.

        This configuration is used for GeoMatchStatement and RateBasedStatement. For IPSetReferenceStatement, use IPSetForwardedIPConfig instead.

        WAF only evaluates the first IP address found in the specified HTTP header.

        " + "documentation":"

        The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify any header name.

        If the specified header isn't present in the request, WAF doesn't apply the rule to the web request at all.

        This configuration is used for GeoMatchStatement, AsnMatchStatement, and RateBasedStatement. For IPSetReferenceStatement, use IPSetForwardedIPConfig instead.

        WAF only evaluates the first IP address found in the specified HTTP header.

        " }, "ForwardedIPHeaderName":{ "type":"string", @@ -3243,7 +3358,7 @@ "members":{ "OversizeHandling":{ "shape":"OversizeHandling", - "documentation":"

        What WAF should do if the headers of the request are more numerous or larger than WAF can inspect. WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to WAF.

        The options for oversize handling are the following:

        • CONTINUE - Inspect the available headers normally, according to the rule inspection criteria.

        • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

        • NO_MATCH - Treat the web request as not matching the rule statement.

        " + "documentation":"

        What WAF should do if the headers determined by your match scope are more numerous or larger than WAF can inspect. WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to WAF.

        The options for oversize handling are the following:

        • CONTINUE - Inspect the available headers normally, according to the rule inspection criteria.

        • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

        • NO_MATCH - Treat the web request as not matching the rule statement.

        " } }, "documentation":"

        Inspect a string containing the list of the request's header names, ordered as they appear in the web request that WAF receives for inspection. WAF generates the string and then uses that as the field to match component in its inspection. WAF separates the header names in the string using colons and no added spaces, for example host:user-agent:accept:authorization:referer.

        " @@ -3267,7 +3382,7 @@ }, "OversizeHandling":{ "shape":"OversizeHandling", - "documentation":"

        What WAF should do if the headers of the request are more numerous or larger than WAF can inspect. WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to WAF.

        The options for oversize handling are the following:

        • CONTINUE - Inspect the available headers normally, according to the rule inspection criteria.

        • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

        • NO_MATCH - Treat the web request as not matching the rule statement.

        " + "documentation":"

        What WAF should do if the headers determined by your match scope are more numerous or larger than WAF can inspect. WAF does not support inspecting the entire contents of request headers when they exceed 8 KB (8192 bytes) or 200 total headers. The underlying host service forwards a maximum of 200 headers and at most 8 KB of header contents to WAF.

        The options for oversize handling are the following:

        • CONTINUE - Inspect the available headers normally, according to the rule inspection criteria.

        • MATCH - Treat the web request as matching the rule statement. WAF applies the rule action to the request.

        • NO_MATCH - Treat the web request as not matching the rule statement.

        " } }, "documentation":"

        Inspect all headers in the web request. You can specify the parts of the headers to inspect and you can narrow the set of headers to inspect by including or excluding specific keys.

        This is used to indicate the web request component to inspect, in the FieldToMatch specification.

        If you want to inspect just the value of a single header, use the SingleHeader FieldToMatch setting instead.

        Example JSON: \"Headers\": { \"MatchPattern\": { \"All\": {} }, \"MatchScope\": \"KEY\", \"OversizeHandling\": \"MATCH\" }

        " @@ -4059,6 +4174,13 @@ "min":1, "pattern":".*\\S.*" }, + "LowReputationMode":{ + "type":"string", + "enum":[ + "ACTIVE_UNDER_DDOS", + "ALWAYS_ON" + ] + }, "ManagedProductDescriptor":{ "type":"structure", "members":{ @@ -4143,9 +4265,13 @@ "AWSManagedRulesACFPRuleSet":{ "shape":"AWSManagedRulesACFPRuleSet", "documentation":"

        Additional configuration for using the account creation fraud prevention (ACFP) managed rule group, AWSManagedRulesACFPRuleSet. Use this to provide account creation request information to the rule group. For web ACLs that protect CloudFront distributions, use this to also provide the information about how your distribution responds to account creation requests.

        For information about using the ACFP managed rule group, see WAF Fraud Control account creation fraud prevention (ACFP) rule group and WAF Fraud Control account creation fraud prevention (ACFP) in the WAF Developer Guide.

        " + }, + "AWSManagedRulesAntiDDoSRuleSet":{ + "shape":"AWSManagedRulesAntiDDoSRuleSet", + "documentation":"

        Additional configuration for using the anti-DDoS managed rule group, AWSManagedRulesAntiDDoSRuleSet. Use this to configure anti-DDoS behavior for the rule group.

        For information about using the anti-DDoS managed rule group, see WAF Anti-DDoS rule group and Distributed Denial of Service (DDoS) prevention in the WAF Developer Guide.

        " } }, - "documentation":"

        Additional information that's used by a managed rule group. Many managed rule groups don't require this.

        The rule groups used for intelligent threat mitigation require additional configuration:

        • Use the AWSManagedRulesACFPRuleSet configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.

        • Use the AWSManagedRulesATPRuleSet configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.

        • Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

        For example specifications, see the examples section of CreateWebACL.

        " + "documentation":"

        Additional information that's used by a managed rule group. Many managed rule groups don't require this.

        The rule groups used for intelligent threat mitigation require additional configuration:

        • Use the AWSManagedRulesACFPRuleSet configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.

        • Use the AWSManagedRulesAntiDDoSRuleSet configuration object to configure the anti-DDoS managed rule group. The configuration includes the sensitivity levels to use in the rules that typically block and challenge requests that might be participating in DDoS attacks and the specification to use to indicate whether a request can handle a silent browser challenge.

        • Use the AWSManagedRulesATPRuleSet configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.

        • Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

        For example specifications, see the examples section of CreateWebACL.

        " }, "ManagedRuleGroupConfigs":{ "type":"list", @@ -4180,11 +4306,11 @@ }, "ManagedRuleGroupConfigs":{ "shape":"ManagedRuleGroupConfigs", - "documentation":"

        Additional information that's used by a managed rule group. Many managed rule groups don't require this.

        The rule groups used for intelligent threat mitigation require additional configuration:

        • Use the AWSManagedRulesACFPRuleSet configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.

        • Use the AWSManagedRulesATPRuleSet configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.

        • Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

        " + "documentation":"

        Additional information that's used by a managed rule group. Many managed rule groups don't require this.

        The rule groups used for intelligent threat mitigation require additional configuration:

        • Use the AWSManagedRulesACFPRuleSet configuration object to configure the account creation fraud prevention managed rule group. The configuration includes the registration and sign-up pages of your application and the locations in the account creation request payload of data, such as the user email and phone number fields.

        • Use the AWSManagedRulesAntiDDoSRuleSet configuration object to configure the anti-DDoS managed rule group. The configuration includes the sensitivity levels to use in the rules that typically block and challenge requests that might be participating in DDoS attacks and the specification to use to indicate whether a request can handle a silent browser challenge.

        • Use the AWSManagedRulesATPRuleSet configuration object to configure the account takeover prevention managed rule group. The configuration includes the sign-in page of your application and the locations in the login request payload of data such as the username and password.

        • Use the AWSManagedRulesBotControlRuleSet configuration object to configure the protection level that you want the Bot Control rule group to use.

        " }, "RuleActionOverrides":{ "shape":"RuleActionOverrides", - "documentation":"

        Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.

        Take care to verify the rule names in your overrides. If you provide a rule name that doesn't match the name of any rule in the rule group, WAF doesn't return an error and doesn't apply the override setting.

        You can use overrides for testing, for example you can override all of rule actions to Count and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.

        " + "documentation":"

        Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.

        Verify the rule names in your overrides carefully. With managed rule groups, WAF silently ignores any override that uses an invalid rule name. With customer-owned rule groups, invalid rule names in your overrides will cause web ACL updates to fail. An invalid rule name is any name that doesn't exactly match the case-sensitive name of an existing rule in the rule group.

        You can use overrides for testing, for example you can override all of rule actions to Count and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.

        " } }, "documentation":"

        A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.

        You cannot nest a ManagedRuleGroupStatement, for example for use inside a NotStatement or OrStatement. You cannot use a managed rule group inside another rule group. You can only reference a managed rule group as a top-level statement within a rule that you define in a web ACL.

        You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet, the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet, or the WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet. For more information, see WAF Pricing.

        " @@ -4346,8 +4472,7 @@ }, "Method":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Inspect the HTTP method of the web request. The method indicates the type of operation that the request is asking the origin to perform.

        This is used in the FieldToMatch specification for some web request component types.

        JSON specification: \"Method\": {}

        " }, "MetricName":{ @@ -4386,8 +4511,7 @@ }, "NoneAction":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Specifies that WAF should do nothing. This is used for the OverrideAction setting on a Rule when the rule uses a rule group reference statement.

        This is used in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

        JSON specification: \"None\": {}

        " }, "NotStatement":{ @@ -4401,6 +4525,17 @@ }, "documentation":"

        A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement.

        " }, + "OnSourceDDoSProtectionConfig":{ + "type":"structure", + "required":["ALBLowReputationMode"], + "members":{ + "ALBLowReputationMode":{ + "shape":"LowReputationMode", + "documentation":"

        The level of DDoS protection that applies to web ACLs associated with Application Load Balancers. ACTIVE_UNDER_DDOS protection is enabled by default whenever a web ACL is associated with an Application Load Balancer. In the event that an Application Load Balancer experiences high-load conditions or suspected DDoS attacks, the ACTIVE_UNDER_DDOS protection automatically rate limits traffic from known low reputation sources without disrupting Application Load Balancer availability. ALWAYS_ON protection provides constant, always-on monitoring of known low reputation sources for suspected DDoS attacks. While this provides a higher level of protection, there may be potential impacts on legitimate traffic.

        " + } + }, + "documentation":"

        Configures the level of DDoS protection that applies to web ACLs associated with Application Load Balancers.

        " + }, "OrStatement":{ "type":"structure", "required":["Statements"], @@ -4513,7 +4648,8 @@ "SCOPE_DOWN", "CUSTOM_KEYS", "ACP_RULE_SET_RESPONSE_INSPECTION", - "DATA_PROTECTION_CONFIG" + "DATA_PROTECTION_CONFIG", + "LOW_REPUTATION_MODE" ] }, "ParameterExceptionParameter":{ @@ -4686,13 +4822,11 @@ }, "PutPermissionPolicyResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "QueryString":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Inspect the query string of the web request. This is the part of a URL that appears after a ? character, if any.

        This is used in the FieldToMatch specification for some web request component types.

        JSON specification: \"QueryString\": {}

        " }, "RateBasedStatement":{ @@ -4784,6 +4918,10 @@ "JA4Fingerprint":{ "shape":"RateLimitJA4Fingerprint", "documentation":"

        Use the request's JA4 fingerprint as an aggregate key. If you use a single JA4 fingerprint as your custom key, then each value fully defines an aggregation instance.

        " + }, + "ASN":{ + "shape":"RateLimitAsn", + "documentation":"

        Use an Autonomous System Number (ASN) derived from the request's originating or forwarded IP address as an aggregate key. Each distinct ASN contributes to the aggregation instance.

        " } }, "documentation":"

        Specifies a single custom aggregate key for a rate-base rule.

        Web requests that are missing any of the components specified in the aggregation keys are omitted from the rate-based rule evaluation and handling.

        " @@ -4812,6 +4950,11 @@ "max":2000000000, "min":10 }, + "RateLimitAsn":{ + "type":"structure", + "members":{}, + "documentation":"

        Specifies an Autonomous System Number (ASN) derived from the request's originating or forwarded IP address as an aggregate key for a rate-based rule. Each distinct ASN contributes to the aggregation instance. If you use a single ASN as your custom key, then each ASN fully defines an aggregation instance.

        " + }, "RateLimitCookie":{ "type":"structure", "required":[ @@ -4832,14 +4975,12 @@ }, "RateLimitForwardedIP":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Specifies the first IP address in an HTTP header as an aggregate key for a rate-based rule. Each distinct forwarded IP address contributes to the aggregation instance.

        This setting is used only in the RateBasedStatementCustomKey specification of a rate-based rule statement. When you specify an IP or forwarded IP in the custom key settings, you must also specify at least one other key to use. You can aggregate on only the forwarded IP address by specifying FORWARDED_IP in your rate-based statement's AggregateKeyType.

        This data type supports using the forwarded IP address in the web request aggregation for a rate-based rule, in RateBasedStatementCustomKey. The JSON specification for using the forwarded IP address doesn't explicitly use this data type.

        JSON specification: \"ForwardedIP\": {}

        When you use this specification, you must also configure the forwarded IP address in the rate-based statement's ForwardedIPConfig.

        " }, "RateLimitHTTPMethod":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Specifies the request's HTTP method as an aggregate key for a rate-based rule. Each distinct HTTP method contributes to the aggregation instance. If you use just the HTTP method as your custom key, then each method fully defines an aggregation instance.

        JSON specification: \"RateLimitHTTPMethod\": {}

        " }, "RateLimitHeader":{ @@ -4862,8 +5003,7 @@ }, "RateLimitIP":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Specifies the IP address in the web request as an aggregate key for a rate-based rule. Each distinct IP address contributes to the aggregation instance.

        This setting is used only in the RateBasedStatementCustomKey specification of a rate-based rule statement. To use this in the custom key settings, you must specify at least one other key to use, along with the IP address. To aggregate on only the IP address, in your rate-based statement's AggregateKeyType, specify IP.

        JSON specification: \"RateLimitIP\": {}

        " }, "RateLimitJA3Fingerprint":{ @@ -4952,7 +5092,7 @@ "documentation":"

        The string representing the regular expression.

        " } }, - "documentation":"

        A single regular expression. This is used in a RegexPatternSet.

        " + "documentation":"

        A single regular expression. This is used in a RegexPatternSet and also in the configuration for the Amazon Web Services Managed Rules rule group AWSManagedRulesAntiDDoSRuleSet.

        " }, "RegexMatchStatement":{ "type":"structure", @@ -5529,7 +5669,7 @@ }, "RuleActionOverrides":{ "shape":"RuleActionOverrides", - "documentation":"

        Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.

        Take care to verify the rule names in your overrides. If you provide a rule name that doesn't match the name of any rule in the rule group, WAF doesn't return an error and doesn't apply the override setting.

        You can use overrides for testing, for example you can override all of rule actions to Count and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.

        " + "documentation":"

        Action settings to use in the place of the rule actions that are configured inside the rule group. You specify one override for each rule whose action you want to change.

        Verify the rule names in your overrides carefully. With managed rule groups, WAF silently ignores any override that uses an invalid rule name. With customer-owned rule groups, invalid rule names in your overrides will cause web ACL updates to fail. An invalid rule name is any name that doesn't exactly match the case-sensitive name of an existing rule in the rule group.

        You can use overrides for testing, for example you can override all of rule actions to Count and then monitor the resulting count metrics to understand how the rule group would handle your web traffic. You can also permanently override some or all actions, to modify how the rule group manages your web traffic.

        " } }, "documentation":"

        A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.

        You cannot nest a RuleGroupReferenceStatement, for example for use inside a NotStatement or OrStatement. You cannot use a rule group reference statement inside another rule group. You can only reference a rule group as a top-level statement within a rule that you define in a web ACL.

        " @@ -5667,6 +5807,14 @@ "HIGH" ] }, + "SensitivityToAct":{ + "type":"string", + "enum":[ + "LOW", + "MEDIUM", + "HIGH" + ] + }, "SingleCookieName":{ "type":"string", "max":60, @@ -5823,6 +5971,10 @@ "RegexMatchStatement":{ "shape":"RegexMatchStatement", "documentation":"

        A rule statement used to search web request components for a match against a single regular expression.

        " + }, + "AsnMatchStatement":{ + "shape":"AsnMatchStatement", + "documentation":"

        A rule statement that inspects web traffic based on the Autonomous System Number (ASN) associated with the request's IP address.

        For additional details, see ASN match rule statement in the WAF Developer Guide.

        " } }, "documentation":"

        The processing guidance for a Rule, used by WAF to determine whether a web request matches the rule.

        For example specifications, see the examples section of CreateWebACL.

        " @@ -5910,8 +6062,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -6030,8 +6181,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateIPSetRequest":{ "type":"structure", @@ -6296,6 +6446,10 @@ "AssociationConfig":{ "shape":"AssociationConfig", "documentation":"

        Specifies custom configurations for the associations between the web ACL and protected resources.

        Use this to customize the maximum size of the request body that your protected resources forward to WAF for inspection. You can customize this setting for CloudFront, API Gateway, Amazon Cognito, App Runner, or Verified Access resources. The default setting is 16 KB (16,384 bytes).

        You are charged additional fees when your protected resources forward body sizes that are larger than the default. For more information, see WAF Pricing.

        For Application Load Balancer and AppSync, the limit is fixed at 8 KB (8,192 bytes).

        " + }, + "OnSourceDDoSProtectionConfig":{ + "shape":"OnSourceDDoSProtectionConfig", + "documentation":"

        Specifies the type of DDoS protection to apply to web request data for a web ACL. For most scenarios, it is recommended to use the default protection level, ACTIVE_UNDER_DDOS. If a web ACL is associated with multiple Application Load Balancers, the changes you make to DDoS protection in that web ACL will apply to all associated Application Load Balancers.

        " } } }, @@ -6320,10 +6474,16 @@ }, "UriPath":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        Inspect the path component of the URI of the web request. This is the part of the web request that identifies a resource. For example, /images/daily-ad.jpg.

        This is used in the FieldToMatch specification for some web request component types.

        JSON specification: \"UriPath\": {}

        " }, + "UsageOfAction":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "UsernameField":{ "type":"structure", "required":["Identifier"], @@ -6644,6 +6804,14 @@ "RetrofittedByFirewallManager":{ "shape":"Boolean", "documentation":"

        Indicates whether this web ACL was created by a customer account and then retrofitted by Firewall Manager. If true, then the web ACL is currently being managed by a Firewall Manager WAF policy, and only Firewall Manager can manage any Firewall Manager rule groups in the web ACL. See also the properties ManagedByFirewallManager, PreProcessFirewallManagerRuleGroups, and PostProcessFirewallManagerRuleGroups.

        " + }, + "OnSourceDDoSProtectionConfig":{ + "shape":"OnSourceDDoSProtectionConfig", + "documentation":"

        Configures the level of DDoS protection that applies to web ACLs associated with Application Load Balancers.

        " + }, + "ApplicationConfig":{ + "shape":"ApplicationConfig", + "documentation":"

        Returns a list of ApplicationAttributes.

        " } }, "documentation":"

        A web ACL defines a collection of rules to use to inspect and control web requests. Each rule has a statement that defines what to look for in web requests and an action that WAF applies to requests that match the statement. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resource types include Amazon CloudFront distribution, Amazon API Gateway REST API, Application Load Balancer, AppSync GraphQL API, Amazon Cognito user pool, App Runner service, Amplify application, and Amazon Web Services Verified Access instance.

        " diff --git a/services/wellarchitected/pom.xml b/services/wellarchitected/pom.xml index 7bb854ea2410..75a38eb8aa26 100644 --- a/services/wellarchitected/pom.xml +++ b/services/wellarchitected/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT wellarchitected AWS Java SDK :: Services :: Well Architected diff --git a/services/wellarchitected/src/main/resources/codegen-resources/customization.config b/services/wellarchitected/src/main/resources/codegen-resources/customization.config index beae47f452ee..d7ed49e2a984 100644 --- a/services/wellarchitected/src/main/resources/codegen-resources/customization.config +++ b/services/wellarchitected/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "customServiceMetadata": { "contentType": "application/json" }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/wisdom/pom.xml b/services/wisdom/pom.xml index f2791dd4c043..31dd90181e43 100644 --- a/services/wisdom/pom.xml +++ b/services/wisdom/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT wisdom AWS Java SDK :: Services :: Wisdom diff --git a/services/wisdom/src/main/resources/codegen-resources/customization.config b/services/wisdom/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/wisdom/src/main/resources/codegen-resources/customization.config +++ b/services/wisdom/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/workdocs/pom.xml b/services/workdocs/pom.xml index ee7773c5798a..657a957d99a7 100644 --- a/services/workdocs/pom.xml +++ b/services/workdocs/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT workdocs AWS Java SDK :: Services :: Amazon WorkDocs diff --git a/services/workdocs/src/main/resources/codegen-resources/customization.config b/services/workdocs/src/main/resources/codegen-resources/customization.config index e07d6b8d9c45..c89f8010e088 100644 --- a/services/workdocs/src/main/resources/codegen-resources/customization.config +++ b/services/workdocs/src/main/resources/codegen-resources/customization.config @@ -4,6 +4,5 @@ "describeActivities", "getResources" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/workmail/pom.xml b/services/workmail/pom.xml index 906630d6e073..ecf4cf193471 100644 --- a/services/workmail/pom.xml +++ b/services/workmail/pom.xml @@ -20,7 +20,7 @@ services software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 workmail diff --git a/services/workmail/src/main/resources/codegen-resources/customization.config b/services/workmail/src/main/resources/codegen-resources/customization.config index a27b300b86c5..cade6d8f3851 100644 --- a/services/workmail/src/main/resources/codegen-resources/customization.config +++ b/services/workmail/src/main/resources/codegen-resources/customization.config @@ -2,6 +2,5 @@ "verifiedSimpleMethods": [ "listOrganizations" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/workmailmessageflow/pom.xml b/services/workmailmessageflow/pom.xml index 73c5c49dda4b..da3387c000de 100644 --- a/services/workmailmessageflow/pom.xml +++ b/services/workmailmessageflow/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT workmailmessageflow AWS Java SDK :: Services :: WorkMailMessageFlow diff --git a/services/workmailmessageflow/src/main/resources/codegen-resources/customization.config b/services/workmailmessageflow/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/workmailmessageflow/src/main/resources/codegen-resources/customization.config +++ b/services/workmailmessageflow/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/workspaces/pom.xml b/services/workspaces/pom.xml index 67f2573b7857..63b86ee57f38 100644 --- a/services/workspaces/pom.xml +++ b/services/workspaces/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT workspaces AWS Java SDK :: Services :: Amazon WorkSpaces diff --git a/services/workspaces/src/main/resources/codegen-resources/customization.config b/services/workspaces/src/main/resources/codegen-resources/customization.config index 60b33521804c..fd0fde6fe6ff 100644 --- a/services/workspaces/src/main/resources/codegen-resources/customization.config +++ b/services/workspaces/src/main/resources/codegen-resources/customization.config @@ -11,6 +11,5 @@ "describeAccountModifications", "describeAccount" ], - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/workspaces/src/main/resources/codegen-resources/service-2.json b/services/workspaces/src/main/resources/codegen-resources/service-2.json index 7c15404cf884..cea6818c3a30 100644 --- a/services/workspaces/src/main/resources/codegen-resources/service-2.json +++ b/services/workspaces/src/main/resources/codegen-resources/service-2.json @@ -1079,7 +1079,10 @@ "output":{"shape":"ModifyWorkspaceAccessPropertiesResult"}, "errors":[ {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValuesException"}, + {"shape":"InvalidParameterCombinationException"}, + {"shape":"OperationNotSupportedException"} ], "documentation":"

        Specifies which devices and operating systems users can use to access their WorkSpaces. For more information, see Control Device Access.

        " }, @@ -1501,6 +1504,43 @@ "documentation":"

        The user is not authorized to access a resource.

        ", "exception":true }, + "AccessEndpoint":{ + "type":"structure", + "members":{ + "AccessEndpointType":{ + "shape":"AccessEndpointType", + "documentation":"

        Indicates the type of access endpoint.

        " + }, + "VpcEndpointId":{ + "shape":"AlphanumericDashUnderscoreNonEmptyString", + "documentation":"

        Indicates the VPC endpoint to use for access.

        " + } + }, + "documentation":"

        Describes the access type and endpoint for a WorkSpace.

        " + }, + "AccessEndpointConfig":{ + "type":"structure", + "required":["AccessEndpoints"], + "members":{ + "AccessEndpoints":{ + "shape":"AccessEndpointList", + "documentation":"

        Indicates a list of access endpoints associated with this directory.

        " + }, + "InternetFallbackProtocols":{ + "shape":"InternetFallbackProtocolList", + "documentation":"

        Indicates a list of protocols that fallback to using the public Internet when streaming over a VPC endpoint is not available.

        " + } + }, + "documentation":"

        Describes the access endpoint configuration for a WorkSpace.

        " + }, + "AccessEndpointList":{ + "type":"list", + "member":{"shape":"AccessEndpoint"} + }, + "AccessEndpointType":{ + "type":"string", + "enum":["STREAMING_WSP"] + }, "AccessPropertyValue":{ "type":"string", "enum":[ @@ -1617,6 +1657,10 @@ "pattern":"^(http|https)\\://\\S+" }, "Alias":{"type":"string"}, + "AlphanumericDashUnderscoreNonEmptyString":{ + "type":"string", + "pattern":"^[a-zA-Z0-9\\_\\-]{1,1000}$" + }, "AmazonUuid":{ "type":"string", "max":36, @@ -1650,8 +1694,7 @@ }, "ApplicationNotSupportedException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified application is not supported.

        ", "exception":true }, @@ -1779,8 +1822,7 @@ }, "AssociateIpGroupsResult":{ "type":"structure", - "members":{ - } + "members":{} }, "AssociateWorkspaceApplicationRequest":{ "type":"structure", @@ -1879,8 +1921,7 @@ }, "AuthorizeIpRulesResult":{ "type":"structure", - "members":{ - } + "members":{} }, "AvailableUserSessions":{ "type":"integer", @@ -2124,8 +2165,7 @@ }, "ComputeNotCompatibleException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The compute type of the WorkSpace is not compatible with the application.

        ", "exception":true }, @@ -2494,8 +2534,7 @@ }, "CreateTagsResult":{ "type":"structure", - "members":{ - } + "members":{} }, "CreateUpdatedWorkspaceImageRequest":{ "type":"structure", @@ -2673,6 +2712,10 @@ "TimeoutSettings":{ "shape":"TimeoutSettings", "documentation":"

        Indicates the timeout settings of the pool.

        " + }, + "RunningMode":{ + "shape":"PoolsRunningMode", + "documentation":"

        The running mode for the pool.

        " } } }, @@ -2824,10 +2867,6 @@ "DefaultWorkspaceCreationProperties":{ "type":"structure", "members":{ - "EnableWorkDocs":{ - "shape":"BooleanObject", - "documentation":"

        Specifies whether the directory is enabled for Amazon WorkDocs.

        " - }, "EnableInternetAccess":{ "shape":"BooleanObject", "documentation":"

        Specifies whether to automatically assign an Elastic public IP address to WorkSpaces in this directory by default. If enabled, the Elastic public IP address allows outbound internet access from your WorkSpaces when you’re using an internet gateway in the Amazon VPC in which your WorkSpaces are located. If you're using a Network Address Translation (NAT) gateway for outbound internet access from your VPC, or if your WorkSpaces are in public subnets and you manually assign them Elastic IP addresses, you should disable this setting. This setting applies to new WorkSpaces that you launch or to existing WorkSpaces that you rebuild. For more information, see Configure a VPC for Amazon WorkSpaces.

        " @@ -2916,8 +2955,7 @@ }, "DeleteClientBrandingResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteConnectClientAddInRequest":{ "type":"structure", @@ -2938,8 +2976,7 @@ }, "DeleteConnectClientAddInResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteConnectionAliasRequest":{ "type":"structure", @@ -2953,8 +2990,7 @@ }, "DeleteConnectionAliasResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteIpGroupRequest":{ "type":"structure", @@ -2968,8 +3004,7 @@ }, "DeleteIpGroupResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteTagsRequest":{ "type":"structure", @@ -2990,8 +3025,7 @@ }, "DeleteTagsResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteWorkspaceBundleRequest":{ "type":"structure", @@ -3004,8 +3038,7 @@ }, "DeleteWorkspaceBundleResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteWorkspaceImageRequest":{ "type":"structure", @@ -3019,8 +3052,7 @@ }, "DeleteWorkspaceImageResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DeployWorkspaceApplicationsRequest":{ "type":"structure", @@ -3057,8 +3089,7 @@ }, "DeregisterWorkspaceDirectoryResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeAccountModificationsRequest":{ "type":"structure", @@ -3084,8 +3115,7 @@ }, "DescribeAccountRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeAccountResult":{ "type":"structure", @@ -3903,8 +3933,7 @@ }, "DisassociateConnectionAliasResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateIpGroupsRequest":{ "type":"structure", @@ -3925,8 +3954,7 @@ }, "DisassociateIpGroupsResult":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateWorkspaceApplicationRequest":{ "type":"structure", @@ -4329,8 +4357,7 @@ }, "IncompatibleApplicationsException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The specified application is not compatible with the resource.

        ", "exception":true }, @@ -4342,6 +4369,25 @@ "documentation":"

        Unexpected server error occured.

        ", "exception":true }, + "InternetFallbackProtocol":{ + "type":"string", + "enum":["PCOIP"] + }, + "InternetFallbackProtocolList":{ + "type":"list", + "member":{"shape":"InternetFallbackProtocol"} + }, + "InvalidParameterCombinationException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"ExceptionMessage", + "documentation":"

        The exception error message.

        " + } + }, + "documentation":"

        Two or more of the selected parameter values cannot be used together.

        ", + "exception":true + }, "InvalidParameterValuesException":{ "type":"structure", "members":{ @@ -4684,8 +4730,7 @@ }, "ModifyAccountResult":{ "type":"structure", - "members":{ - } + "members":{} }, "ModifyCertificateBasedAuthPropertiesRequest":{ "type":"structure", @@ -4707,8 +4752,7 @@ }, "ModifyCertificateBasedAuthPropertiesResult":{ "type":"structure", - "members":{ - } + "members":{} }, "ModifyClientPropertiesRequest":{ "type":"structure", @@ -4729,8 +4773,7 @@ }, "ModifyClientPropertiesResult":{ "type":"structure", - "members":{ - } + "members":{} }, "ModifyEndpointEncryptionModeRequest":{ "type":"structure", @@ -4751,8 +4794,7 @@ }, "ModifyEndpointEncryptionModeResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ModifySamlPropertiesRequest":{ "type":"structure", @@ -4774,8 +4816,7 @@ }, "ModifySamlPropertiesResult":{ "type":"structure", - "members":{ - } + "members":{} }, "ModifySelfservicePermissionsRequest":{ "type":"structure", @@ -4796,8 +4837,7 @@ }, "ModifySelfservicePermissionsResult":{ "type":"structure", - "members":{ - } + "members":{} }, "ModifyStreamingPropertiesRequest":{ "type":"structure", @@ -4815,8 +4855,7 @@ }, "ModifyStreamingPropertiesResult":{ "type":"structure", - "members":{ - } + "members":{} }, "ModifyWorkspaceAccessPropertiesRequest":{ "type":"structure", @@ -4837,8 +4876,7 @@ }, "ModifyWorkspaceAccessPropertiesResult":{ "type":"structure", - "members":{ - } + "members":{} }, "ModifyWorkspaceCreationPropertiesRequest":{ "type":"structure", @@ -4859,8 +4897,7 @@ }, "ModifyWorkspaceCreationPropertiesResult":{ "type":"structure", - "members":{ - } + "members":{} }, "ModifyWorkspacePropertiesRequest":{ "type":"structure", @@ -4882,8 +4919,7 @@ }, "ModifyWorkspacePropertiesResult":{ "type":"structure", - "members":{ - } + "members":{} }, "ModifyWorkspaceStateRequest":{ "type":"structure", @@ -4904,8 +4940,7 @@ }, "ModifyWorkspaceStateResult":{ "type":"structure", - "members":{ - } + "members":{} }, "NetworkAccessConfiguration":{ "type":"structure", @@ -4959,8 +4994,7 @@ }, "OperatingSystemNotCompatibleException":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

        The operating system of the WorkSpace is not compatible with the application.

        ", "exception":true }, @@ -5025,6 +5059,13 @@ "type":"list", "member":{"shape":"PendingCreateStandbyWorkspacesRequest"} }, + "PoolsRunningMode":{ + "type":"string", + "enum":[ + "AUTO_STOP", + "ALWAYS_ON" + ] + }, "Protocol":{ "type":"string", "enum":[ @@ -5132,10 +5173,6 @@ "shape":"SubnetIds", "documentation":"

        The identifiers of the subnets for your virtual private cloud (VPC). Make sure that the subnets are in supported Availability Zones. The subnets must also be in separate Availability Zones. If these conditions are not met, you will receive an OperationNotSupportedException error.

        " }, - "EnableWorkDocs":{ - "shape":"BooleanObject", - "documentation":"

        Indicates whether Amazon WorkDocs is enabled or disabled. If you have enabled this parameter and WorkDocs is not available in the Region, you will receive an OperationNotSupportedException error. Set EnableWorkDocs to disabled, and try again.

        " - }, "EnableSelfService":{ "shape":"BooleanObject", "documentation":"

        Indicates whether self-service capabilities are enabled or disabled.

        " @@ -5340,8 +5377,7 @@ }, "RestoreWorkspaceResult":{ "type":"structure", - "members":{ - } + "members":{} }, "RevokeIpRulesRequest":{ "type":"structure", @@ -5362,8 +5398,7 @@ }, "RevokeIpRulesResult":{ "type":"structure", - "members":{ - } + "members":{} }, "RootStorage":{ "type":"structure", @@ -5581,8 +5616,7 @@ }, "StartWorkspacesPoolResult":{ "type":"structure", - "members":{ - } + "members":{} }, "StartWorkspacesRequest":{ "type":"structure", @@ -5631,8 +5665,7 @@ }, "StopWorkspacesPoolResult":{ "type":"structure", - "members":{ - } + "members":{} }, "StopWorkspacesRequest":{ "type":"structure", @@ -5807,8 +5840,7 @@ }, "TerminateWorkspacesPoolResult":{ "type":"structure", - "members":{ - } + "members":{} }, "TerminateWorkspacesPoolSessionRequest":{ "type":"structure", @@ -5822,8 +5854,7 @@ }, "TerminateWorkspacesPoolSessionResult":{ "type":"structure", - "members":{ - } + "members":{} }, "TerminateWorkspacesRequest":{ "type":"structure", @@ -5906,8 +5937,7 @@ }, "UpdateConnectClientAddInResult":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateConnectionAliasPermissionRequest":{ "type":"structure", @@ -5928,8 +5958,7 @@ }, "UpdateConnectionAliasPermissionResult":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateDescription":{ "type":"string", @@ -5970,8 +5999,7 @@ }, "UpdateRulesOfIpGroupResult":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateWorkspaceBundleRequest":{ "type":"structure", @@ -5988,8 +6016,7 @@ }, "UpdateWorkspaceBundleResult":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateWorkspaceImagePermissionRequest":{ "type":"structure", @@ -6015,8 +6042,7 @@ }, "UpdateWorkspaceImagePermissionResult":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateWorkspacesPoolRequest":{ "type":"structure", @@ -6049,6 +6075,10 @@ "TimeoutSettings":{ "shape":"TimeoutSettings", "documentation":"

        Indicates the timeout settings of the specified pool.

        " + }, + "RunningMode":{ + "shape":"PoolsRunningMode", + "documentation":"

        The desired running mode for the pool. The running mode can only be updated when the pool is in a stopped state.

        " } } }, @@ -6352,6 +6382,10 @@ "DeviceTypeWorkSpacesThinClient":{ "shape":"AccessPropertyValue", "documentation":"

        Indicates whether users can access their WorkSpaces through a WorkSpaces Thin Client.

        " + }, + "AccessEndpointConfig":{ + "shape":"AccessEndpointConfig", + "documentation":"

        Specifies the configuration for accessing the WorkSpace.

        " } }, "documentation":"

        The device types and operating systems that can be used to access a WorkSpace. For more information, see Amazon WorkSpaces Client Network Requirements.

        " @@ -6459,10 +6493,6 @@ "WorkspaceCreationProperties":{ "type":"structure", "members":{ - "EnableWorkDocs":{ - "shape":"BooleanObject", - "documentation":"

        Indicates whether Amazon WorkDocs is enabled for your WorkSpaces.

        If WorkDocs is already enabled for a WorkSpaces directory and you disable it, new WorkSpaces launched in the directory will not have WorkDocs enabled. However, WorkDocs remains enabled for any existing WorkSpaces, unless you either disable users' access to WorkDocs or you delete the WorkDocs site. To disable users' access to WorkDocs, see Disabling Users in the Amazon WorkDocs Administration Guide. To delete a WorkDocs site, see Deleting a Site in the Amazon WorkDocs Administration Guide.

        If you enable WorkDocs on a directory that already has existing WorkSpaces, the existing WorkSpaces and any new WorkSpaces that are launched in the directory will have WorkDocs enabled.

        " - }, "EnableInternetAccess":{ "shape":"BooleanObject", "documentation":"

        Indicates whether internet access is enabled for your WorkSpaces.

        " @@ -6760,7 +6790,10 @@ "MultipleUserProfiles", "StagedAppxPackage", "UnsupportedOsUpgrade", - "InsufficientRearmCount" + "InsufficientRearmCount", + "ProtocolOSIncompatibility", + "MemoryIntegrityIncompatibility", + "RestrictedDriveLetterInUse" ] }, "WorkspaceImageId":{ @@ -7021,7 +7054,8 @@ "State", "CreatedAt", "BundleId", - "DirectoryId" + "DirectoryId", + "RunningMode" ], "members":{ "PoolId":{ @@ -7038,7 +7072,7 @@ }, "PoolName":{ "shape":"WorkspacesPoolName", - "documentation":"

        The name of the pool,

        " + "documentation":"

        The name of the pool.

        " }, "Description":{ "shape":"UpdateDescription", @@ -7071,6 +7105,10 @@ "TimeoutSettings":{ "shape":"TimeoutSettings", "documentation":"

        The amount of time that a pool session remains active after users disconnect. If they try to reconnect to the pool session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new pool instance.

        " + }, + "RunningMode":{ + "shape":"PoolsRunningMode", + "documentation":"

        The running mode of the pool.

        " } }, "documentation":"

        Describes a pool of WorkSpaces.

        " diff --git a/services/workspacesinstances/pom.xml b/services/workspacesinstances/pom.xml new file mode 100644 index 000000000000..00c22c8ee22b --- /dev/null +++ b/services/workspacesinstances/pom.xml @@ -0,0 +1,60 @@ + + + 4.0.0 + + software.amazon.awssdk + services + 2.31.76-SNAPSHOT + + workspacesinstances + AWS Java SDK :: Services :: Workspaces Instances + The AWS Java SDK for Workspaces Instances module holds the client classes that are used for + communicating with Workspaces Instances. + + https://aws.amazon.com/sdkforjava + + + + org.apache.maven.plugins + maven-jar-plugin + + + + software.amazon.awssdk.services.workspacesinstances + + + + + + + + + software.amazon.awssdk + protocol-core + ${awsjavasdk.version} + + + software.amazon.awssdk + aws-json-protocol + ${awsjavasdk.version} + + + software.amazon.awssdk + http-auth-aws + ${awsjavasdk.version} + + + diff --git a/services/workspacesinstances/src/main/resources/codegen-resources/endpoint-rule-set.json b/services/workspacesinstances/src/main/resources/codegen-resources/endpoint-rule-set.json new file mode 100644 index 000000000000..e0d3702db0f2 --- /dev/null +++ b/services/workspacesinstances/src/main/resources/codegen-resources/endpoint-rule-set.json @@ -0,0 +1,137 @@ +{ + "version": "1.0", + "parameters": { + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://workspaces-instances-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://workspaces-instances.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/services/workspacesinstances/src/main/resources/codegen-resources/endpoint-tests.json b/services/workspacesinstances/src/main/resources/codegen-resources/endpoint-tests.json new file mode 100644 index 000000000000..bb145251951b --- /dev/null +++ b/services/workspacesinstances/src/main/resources/codegen-resources/endpoint-tests.json @@ -0,0 +1,201 @@ +{ + "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances-fips.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances-fips.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances-fips.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://workspaces-instances.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/services/workspacesinstances/src/main/resources/codegen-resources/paginators-1.json b/services/workspacesinstances/src/main/resources/codegen-resources/paginators-1.json new file mode 100644 index 000000000000..988974d67527 --- /dev/null +++ b/services/workspacesinstances/src/main/resources/codegen-resources/paginators-1.json @@ -0,0 +1,22 @@ +{ + "pagination": { + "ListInstanceTypes": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "InstanceTypes" + }, + "ListRegions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Regions" + }, + "ListWorkspaceInstances": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "WorkspaceInstances" + } + } +} diff --git a/services/workspacesinstances/src/main/resources/codegen-resources/service-2.json b/services/workspacesinstances/src/main/resources/codegen-resources/service-2.json new file mode 100644 index 000000000000..0b6c36125e50 --- /dev/null +++ b/services/workspacesinstances/src/main/resources/codegen-resources/service-2.json @@ -0,0 +1,1945 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2022-07-26", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"workspaces-instances", + "jsonVersion":"1.0", + "protocol":"json", + "protocols":["json"], + "serviceFullName":"Amazon Workspaces Instances", + "serviceId":"Workspaces Instances", + "signatureVersion":"v4", + "signingName":"workspaces-instances", + "targetPrefix":"EUCMIFrontendAPIService", + "uid":"workspaces-instances-2022-07-26" + }, + "operations":{ + "AssociateVolume":{ + "name":"AssociateVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateVolumeRequest"}, + "output":{"shape":"AssociateVolumeResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Attaches a volume to a WorkSpace Instance.

        " + }, + "CreateVolume":{ + "name":"CreateVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateVolumeRequest"}, + "output":{"shape":"CreateVolumeResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Creates a new volume for WorkSpace Instances.

        ", + "idempotent":true + }, + "CreateWorkspaceInstance":{ + "name":"CreateWorkspaceInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateWorkspaceInstanceRequest"}, + "output":{"shape":"CreateWorkspaceInstanceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Launches a new WorkSpace Instance with specified configuration parameters, enabling programmatic workspace deployment.

        ", + "idempotent":true + }, + "DeleteVolume":{ + "name":"DeleteVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteVolumeRequest"}, + "output":{"shape":"DeleteVolumeResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Deletes a specified volume.

        " + }, + "DeleteWorkspaceInstance":{ + "name":"DeleteWorkspaceInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteWorkspaceInstanceRequest"}, + "output":{"shape":"DeleteWorkspaceInstanceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Deletes the specified WorkSpace

        " + }, + "DisassociateVolume":{ + "name":"DisassociateVolume", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateVolumeRequest"}, + "output":{"shape":"DisassociateVolumeResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

        Detaches a volume from a WorkSpace Instance.

        " + }, + "GetWorkspaceInstance":{ + "name":"GetWorkspaceInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetWorkspaceInstanceRequest"}, + "output":{"shape":"GetWorkspaceInstanceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Retrieves detailed information about a specific WorkSpace Instance.

        " + }, + "ListInstanceTypes":{ + "name":"ListInstanceTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInstanceTypesRequest"}, + "output":{"shape":"ListInstanceTypesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Retrieves a list of instance types supported by Amazon WorkSpaces Instances, enabling precise workspace infrastructure configuration.

        " + }, + "ListRegions":{ + "name":"ListRegions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRegionsRequest"}, + "output":{"shape":"ListRegionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Retrieves a list of AWS regions supported by Amazon WorkSpaces Instances, enabling region discovery for workspace deployments.

        " + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Retrieves tags for a WorkSpace Instance.

        " + }, + "ListWorkspaceInstances":{ + "name":"ListWorkspaceInstances", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListWorkspaceInstancesRequest"}, + "output":{"shape":"ListWorkspaceInstancesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Retrieves a collection of WorkSpaces Instances based on specified filters.

        " + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Adds tags to a WorkSpace Instance.

        " + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

        Removes tags from a WorkSpace Instance.

        " + } + }, + "shapes":{ + "ARN":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"arn:.*" + }, + "AccessDeniedException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Detailed explanation of the access denial.

        " + } + }, + "documentation":"

        Indicates insufficient permissions to perform the requested action.

        ", + "exception":true + }, + "AmdSevSnpEnum":{ + "type":"string", + "enum":[ + "enabled", + "disabled" + ] + }, + "AssociateVolumeRequest":{ + "type":"structure", + "required":[ + "WorkspaceInstanceId", + "VolumeId", + "Device" + ], + "members":{ + "WorkspaceInstanceId":{ + "shape":"WorkspaceInstanceId", + "documentation":"

        WorkSpace Instance to attach volume to.

        " + }, + "VolumeId":{ + "shape":"VolumeId", + "documentation":"

        Volume to be attached.

        " + }, + "Device":{ + "shape":"DeviceName", + "documentation":"

        Device path for volume attachment.

        " + } + }, + "documentation":"

        Specifies volume attachment parameters.

        " + }, + "AssociateVolumeResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

        Confirms volume attachment.

        " + }, + "AutoRecoveryEnum":{ + "type":"string", + "enum":[ + "disabled", + "default" + ] + }, + "AvailabilityZone":{ + "type":"string", + "pattern":"[a-z]{2}-[a-z]+-\\d[a-z](-[a-z0-9]+)?" + }, + "BandwidthWeightingEnum":{ + "type":"string", + "enum":[ + "default", + "vpc-1", + "ebs-1" + ] + }, + "BlockDeviceMappingRequest":{ + "type":"structure", + "members":{ + "DeviceName":{ + "shape":"DeviceName", + "documentation":"

        Name of the device for storage mapping.

        " + }, + "Ebs":{ + "shape":"EbsBlockDevice", + "documentation":"

        EBS volume configuration for the device.

        " + }, + "NoDevice":{ + "shape":"DeviceName", + "documentation":"

        Indicates device should not be mapped.

        " + }, + "VirtualName":{ + "shape":"VirtualName", + "documentation":"

        Virtual device name for ephemeral storage.

        " + } + }, + "documentation":"

        Defines device mapping for WorkSpace Instance storage.

        " + }, + "BlockDeviceMappings":{ + "type":"list", + "member":{"shape":"BlockDeviceMappingRequest"} + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "CapacityReservationPreferenceEnum":{ + "type":"string", + "enum":[ + "capacity-reservations-only", + "open", + "none" + ] + }, + "CapacityReservationSpecification":{ + "type":"structure", + "members":{ + "CapacityReservationPreference":{ + "shape":"CapacityReservationPreferenceEnum", + "documentation":"

        Preference for using capacity reservation.

        " + }, + "CapacityReservationTarget":{ + "shape":"CapacityReservationTarget", + "documentation":"

        Specific capacity reservation target.

        " + } + }, + "documentation":"

        Specifies capacity reservation preferences.

        " + }, + "CapacityReservationTarget":{ + "type":"structure", + "members":{ + "CapacityReservationId":{ + "shape":"String128", + "documentation":"

        Unique identifier for the capacity reservation.

        " + }, + "CapacityReservationResourceGroupArn":{ + "shape":"ARN", + "documentation":"

        ARN of the capacity reservation resource group.

        " + } + }, + "documentation":"

        Identifies a specific capacity reservation.

        " + }, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "sensitive":true + }, + "ConflictException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Description of the conflict encountered.

        " + }, + "ResourceId":{ + "shape":"String", + "documentation":"

        Identifier of the conflicting resource.

        " + }, + "ResourceType":{ + "shape":"String", + "documentation":"

        Type of the conflicting resource.

        " + } + }, + "documentation":"

        Signals a conflict with the current state of the resource.

        ", + "exception":true + }, + "ConnectionTrackingSpecificationRequest":{ + "type":"structure", + "members":{ + "TcpEstablishedTimeout":{ + "shape":"NonNegativeInteger", + "documentation":"

        Timeout for established TCP connections.

        " + }, + "UdpStreamTimeout":{ + "shape":"NonNegativeInteger", + "documentation":"

        Timeout for UDP stream connections.

        " + }, + "UdpTimeout":{ + "shape":"NonNegativeInteger", + "documentation":"

        General timeout for UDP connections.

        " + } + }, + "documentation":"

        Defines connection tracking parameters for network interfaces.

        " + }, + "CpuCreditsEnum":{ + "type":"string", + "enum":[ + "standard", + "unlimited" + ] + }, + "CpuOptionsRequest":{ + "type":"structure", + "members":{ + "AmdSevSnp":{ + "shape":"AmdSevSnpEnum", + "documentation":"

        AMD Secure Encrypted Virtualization configuration.

        " + }, + "CoreCount":{ + "shape":"NonNegativeInteger", + "documentation":"

        Number of CPU cores to allocate.

        " + }, + "ThreadsPerCore":{ + "shape":"NonNegativeInteger", + "documentation":"

        Number of threads per CPU core.

        " + } + }, + "documentation":"

        Configures CPU-specific settings for WorkSpace Instance.

        " + }, + "CreateVolumeRequest":{ + "type":"structure", + "required":["AvailabilityZone"], + "members":{ + "AvailabilityZone":{ + "shape":"String64", + "documentation":"

        Availability zone for the volume.

        " + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

        Unique token to prevent duplicate volume creation.

        ", + "idempotencyToken":true + }, + "Encrypted":{ + "shape":"Boolean", + "documentation":"

        Indicates if the volume should be encrypted.

        " + }, + "Iops":{ + "shape":"NonNegativeInteger", + "documentation":"

        Input/output operations per second for the volume.

        " + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

        KMS key for volume encryption.

        " + }, + "SizeInGB":{ + "shape":"NonNegativeInteger", + "documentation":"

        Volume size in gigabytes.

        " + }, + "SnapshotId":{ + "shape":"SnapshotId", + "documentation":"

        Source snapshot for volume creation.

        " + }, + "TagSpecifications":{ + "shape":"TagSpecifications", + "documentation":"

        Metadata tags for the volume.

        " + }, + "Throughput":{ + "shape":"NonNegativeInteger", + "documentation":"

        Volume throughput performance.

        " + }, + "VolumeType":{ + "shape":"VolumeTypeEnum", + "documentation":"

        Type of EBS volume.

        " + } + }, + "documentation":"

        Specifies volume creation parameters.

        " + }, + "CreateVolumeResponse":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"VolumeId", + "documentation":"

        Unique identifier for the new volume.

        " + } + }, + "documentation":"

        Returns the created volume identifier.

        " + }, + "CreateWorkspaceInstanceRequest":{ + "type":"structure", + "required":["ManagedInstance"], + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

        Unique token to ensure idempotent instance creation, preventing duplicate workspace launches.

        ", + "idempotencyToken":true + }, + "Tags":{ + "shape":"TagList", + "documentation":"

        Optional metadata tags for categorizing and managing WorkSpaces Instances.

        " + }, + "ManagedInstance":{ + "shape":"ManagedInstanceRequest", + "documentation":"

        Comprehensive configuration settings for the WorkSpaces Instance, including network, compute, and storage parameters.

        " + } + }, + "documentation":"

        Defines the configuration parameters for creating a new WorkSpaces Instance.

        " + }, + "CreateWorkspaceInstanceResponse":{ + "type":"structure", + "members":{ + "WorkspaceInstanceId":{ + "shape":"WorkspaceInstanceId", + "documentation":"

        Unique identifier assigned to the newly created WorkSpaces Instance.

        " + } + }, + "documentation":"

        Returns the unique identifier for the newly created WorkSpaces Instance.

        " + }, + "CreditSpecificationRequest":{ + "type":"structure", + "members":{ + "CpuCredits":{ + "shape":"CpuCreditsEnum", + "documentation":"

        CPU credit specification mode.

        " + } + }, + "documentation":"

        Defines CPU credit configuration for burstable instances.

        " + }, + "DeleteVolumeRequest":{ + "type":"structure", + "required":["VolumeId"], + "members":{ + "VolumeId":{ + "shape":"VolumeId", + "documentation":"

        Identifier of the volume to delete.

        " + } + }, + "documentation":"

        Specifies the volume to delete.

        " + }, + "DeleteVolumeResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

        Confirms volume deletion.

        " + }, + "DeleteWorkspaceInstanceRequest":{ + "type":"structure", + "required":["WorkspaceInstanceId"], + "members":{ + "WorkspaceInstanceId":{ + "shape":"WorkspaceInstanceId", + "documentation":"

        Unique identifier of the WorkSpaces Instance targeted for deletion.

        " + } + }, + "documentation":"

        The WorkSpace to delete

        " + }, + "DeleteWorkspaceInstanceResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

        Confirms the successful deletion of the specified WorkSpace Instance.

        " + }, + "Description":{ + "type":"string", + "max":1000, + "min":0, + "pattern":"[\\S\\s]*" + }, + "DeviceName":{ + "type":"string", + "max":32, + "min":0 + }, + "DisassociateModeEnum":{ + "type":"string", + "enum":[ + "FORCE", + "NO_FORCE" + ] + }, + "DisassociateVolumeRequest":{ + "type":"structure", + "required":[ + "WorkspaceInstanceId", + "VolumeId" + ], + "members":{ + "WorkspaceInstanceId":{ + "shape":"WorkspaceInstanceId", + "documentation":"

        WorkSpace Instance to detach volume from.

        " + }, + "VolumeId":{ + "shape":"VolumeId", + "documentation":"

        Volume to be detached.

        " + }, + "Device":{ + "shape":"DeviceName", + "documentation":"

        Device path of volume to detach.

        " + }, + "DisassociateMode":{ + "shape":"DisassociateModeEnum", + "documentation":"

        Mode for volume detachment.

        " + } + }, + "documentation":"

        Specifies volume detachment parameters.

        " + }, + "DisassociateVolumeResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

        Confirms volume detachment.

        " + }, + "EC2InstanceError":{ + "type":"structure", + "members":{ + "EC2ErrorCode":{ + "shape":"String", + "documentation":"

        Unique error code identifying the specific EC2 instance error.

        " + }, + "EC2ExceptionType":{ + "shape":"String", + "documentation":"

        Type of exception encountered during EC2 instance operation.

        " + }, + "EC2ErrorMessage":{ + "shape":"String", + "documentation":"

        Detailed description of the EC2 instance error.

        " + } + }, + "documentation":"

        Captures detailed error information for EC2 instance operations.

        " + }, + "EC2InstanceErrors":{ + "type":"list", + "member":{"shape":"EC2InstanceError"} + }, + "EC2ManagedInstance":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "documentation":"

        Unique identifier of the managed EC2 instance.

        " + } + }, + "documentation":"

        Represents an EC2 instance managed by WorkSpaces.

        " + }, + "EbsBlockDevice":{ + "type":"structure", + "members":{ + "VolumeType":{ + "shape":"VolumeTypeEnum", + "documentation":"

        Type of EBS volume (e.g., gp2, io1).

        " + }, + "Encrypted":{ + "shape":"Boolean", + "documentation":"

        Indicates if the volume is encrypted.

        " + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

        KMS key used for volume encryption.

        " + }, + "Iops":{ + "shape":"NonNegativeInteger", + "documentation":"

        Input/output operations per second for the volume.

        " + }, + "Throughput":{ + "shape":"NonNegativeInteger", + "documentation":"

        Volume data transfer rate.

        " + }, + "VolumeSize":{ + "shape":"NonNegativeInteger", + "documentation":"

        Size of the EBS volume in gigabytes.

        " + } + }, + "documentation":"

        Defines configuration for an Elastic Block Store volume.

        " + }, + "EnaSrdSpecificationRequest":{ + "type":"structure", + "members":{ + "EnaSrdEnabled":{ + "shape":"Boolean", + "documentation":"

        Enables or disables ENA SRD for network performance.

        " + }, + "EnaSrdUdpSpecification":{ + "shape":"EnaSrdUdpSpecificationRequest", + "documentation":"

        Configures UDP-specific ENA SRD settings.

        " + } + }, + "documentation":"

        Defines Elastic Network Adapter (ENA) Scalable Reliable Datagram (SRD) configuration.

        " + }, + "EnaSrdUdpSpecificationRequest":{ + "type":"structure", + "members":{ + "EnaSrdUdpEnabled":{ + "shape":"Boolean", + "documentation":"

        Enables or disables ENA SRD for UDP traffic.

        " + } + }, + "documentation":"

        Specifies UDP configuration for ENA SRD.

        " + }, + "EnclaveOptionsRequest":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

        Enables or disables AWS Nitro Enclaves for enhanced security.

        " + } + }, + "documentation":"

        Configures AWS Nitro Enclave options for the WorkSpace Instance.

        " + }, + "GetWorkspaceInstanceRequest":{ + "type":"structure", + "required":["WorkspaceInstanceId"], + "members":{ + "WorkspaceInstanceId":{ + "shape":"WorkspaceInstanceId", + "documentation":"

        Unique identifier of the WorkSpace Instance to retrieve.

        " + } + }, + "documentation":"

        Identifies the WorkSpaces Instance to retrieve detailed information for.

        " + }, + "GetWorkspaceInstanceResponse":{ + "type":"structure", + "members":{ + "WorkspaceInstanceErrors":{ + "shape":"WorkspaceInstanceErrors", + "documentation":"

        Captures any errors specific to the WorkSpace Instance lifecycle.

        " + }, + "EC2InstanceErrors":{ + "shape":"EC2InstanceErrors", + "documentation":"

        Includes any underlying EC2 instance errors encountered.

        " + }, + "ProvisionState":{ + "shape":"ProvisionStateEnum", + "documentation":"

        Current provisioning state of the WorkSpaces Instance.

        " + }, + "WorkspaceInstanceId":{ + "shape":"WorkspaceInstanceId", + "documentation":"

        Unique identifier of the retrieved WorkSpaces Instance.

        " + }, + "EC2ManagedInstance":{ + "shape":"EC2ManagedInstance", + "documentation":"

        Details of the associated EC2 managed instance.

        " + } + }, + "documentation":"

        Provides comprehensive details about the requested WorkSpaces Instance.

        " + }, + "HibernationOptionsRequest":{ + "type":"structure", + "members":{ + "Configured":{ + "shape":"Boolean", + "documentation":"

        Enables or disables instance hibernation capability.

        " + } + }, + "documentation":"

        Defines hibernation configuration for the WorkSpace Instance.

        " + }, + "HostId":{ + "type":"string", + "pattern":"h-[0-9a-zA-Z]{1,63}" + }, + "HostnameTypeEnum":{ + "type":"string", + "enum":[ + "ip-name", + "resource-name" + ] + }, + "HttpEndpointEnum":{ + "type":"string", + "enum":[ + "enabled", + "disabled" + ] + }, + "HttpProtocolIpv6Enum":{ + "type":"string", + "enum":[ + "enabled", + "disabled" + ] + }, + "HttpPutResponseHopLimit":{ + "type":"integer", + "box":true, + "max":64, + "min":1 + }, + "HttpTokensEnum":{ + "type":"string", + "enum":[ + "optional", + "required" + ] + }, + "IamInstanceProfileSpecification":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ARN", + "documentation":"

        Amazon Resource Name (ARN) of the IAM instance profile.

        " + }, + "Name":{ + "shape":"String64", + "documentation":"

        Name of the IAM instance profile.

        " + } + }, + "documentation":"

        Defines IAM instance profile configuration for WorkSpace Instance.

        " + }, + "ImageId":{ + "type":"string", + "pattern":"ami-[0-9a-zA-Z]{1,63}" + }, + "InstanceInterruptionBehaviorEnum":{ + "type":"string", + "enum":[ + "hibernate", + "stop" + ] + }, + "InstanceIpv6Address":{ + "type":"structure", + "members":{ + "Ipv6Address":{ + "shape":"Ipv6Address", + "documentation":"

        Specific IPv6 address assigned to the instance.

        " + }, + "IsPrimaryIpv6":{ + "shape":"Boolean", + "documentation":"

        Indicates if this is the primary IPv6 address for the instance.

        " + } + }, + "documentation":"

        Represents an IPv6 address configuration for a WorkSpace Instance.

        " + }, + "InstanceMaintenanceOptionsRequest":{ + "type":"structure", + "members":{ + "AutoRecovery":{ + "shape":"AutoRecoveryEnum", + "documentation":"

        Enables or disables automatic instance recovery.

        " + } + }, + "documentation":"

        Configures automatic maintenance settings for WorkSpace Instance.

        " + }, + "InstanceMarketOptionsRequest":{ + "type":"structure", + "members":{ + "MarketType":{ + "shape":"MarketTypeEnum", + "documentation":"

        Specifies the type of marketplace for instance deployment.

        " + }, + "SpotOptions":{ + "shape":"SpotMarketOptions", + "documentation":"

        Configuration options for spot instance deployment.

        " + } + }, + "documentation":"

        Configures marketplace-specific instance deployment options.

        " + }, + "InstanceMetadataOptionsRequest":{ + "type":"structure", + "members":{ + "HttpEndpoint":{ + "shape":"HttpEndpointEnum", + "documentation":"

        Enables or disables HTTP endpoint for instance metadata.

        " + }, + "HttpProtocolIpv6":{ + "shape":"HttpProtocolIpv6Enum", + "documentation":"

        Configures IPv6 support for instance metadata HTTP protocol.

        " + }, + "HttpPutResponseHopLimit":{ + "shape":"HttpPutResponseHopLimit", + "documentation":"

        Sets maximum number of network hops for metadata PUT responses.

        " + }, + "HttpTokens":{ + "shape":"HttpTokensEnum", + "documentation":"

        Configures token requirement for instance metadata retrieval.

        " + }, + "InstanceMetadataTags":{ + "shape":"InstanceMetadataTagsEnum", + "documentation":"

        Enables or disables instance metadata tags retrieval.

        " + } + }, + "documentation":"

        Defines instance metadata service configuration.

        " + }, + "InstanceMetadataTagsEnum":{ + "type":"string", + "enum":[ + "enabled", + "disabled" + ] + }, + "InstanceNetworkInterfaceSpecification":{ + "type":"structure", + "members":{ + "AssociateCarrierIpAddress":{ + "shape":"Boolean", + "documentation":"

        Enables carrier IP address association.

        " + }, + "AssociatePublicIpAddress":{ + "shape":"Boolean", + "documentation":"

        Enables public IP address assignment.

        " + }, + "ConnectionTrackingSpecification":{ + "shape":"ConnectionTrackingSpecificationRequest", + "documentation":"

        Configures network connection tracking parameters.

        " + }, + "Description":{ + "shape":"Description", + "documentation":"

        Descriptive text for the network interface.

        " + }, + "DeviceIndex":{ + "shape":"NonNegativeInteger", + "documentation":"

        Unique index for the network interface.

        " + }, + "EnaSrdSpecification":{ + "shape":"EnaSrdSpecificationRequest", + "documentation":"

        Configures Elastic Network Adapter Scalable Reliable Datagram settings.

        " + }, + "InterfaceType":{ + "shape":"InterfaceTypeEnum", + "documentation":"

        Specifies the type of network interface.

        " + }, + "Ipv4Prefixes":{ + "shape":"Ipv4Prefixes", + "documentation":"

        IPv4 prefix configurations for the interface.

        " + }, + "Ipv4PrefixCount":{ + "shape":"NonNegativeInteger", + "documentation":"

        Number of IPv4 prefixes to assign.

        " + }, + "Ipv6AddressCount":{ + "shape":"NonNegativeInteger", + "documentation":"

        Number of IPv6 addresses to assign.

        " + }, + "Ipv6Addresses":{ + "shape":"Ipv6Addresses", + "documentation":"

        Specific IPv6 addresses for the interface.

        " + }, + "Ipv6Prefixes":{ + "shape":"Ipv6Prefixes", + "documentation":"

        IPv6 prefix configurations for the interface.

        " + }, + "Ipv6PrefixCount":{ + "shape":"NonNegativeInteger", + "documentation":"

        Number of IPv6 prefixes to assign.

        " + }, + "NetworkCardIndex":{ + "shape":"NonNegativeInteger", + "documentation":"

        Index of the network card for multiple network interfaces.

        " + }, + "NetworkInterfaceId":{ + "shape":"NetworkInterfaceId", + "documentation":"

        Unique identifier for the network interface.

        " + }, + "PrimaryIpv6":{ + "shape":"Boolean", + "documentation":"

        Indicates the primary IPv6 configuration.

        " + }, + "PrivateIpAddress":{ + "shape":"Ipv4Address", + "documentation":"

        Primary private IP address for the interface.

        " + }, + "PrivateIpAddresses":{ + "shape":"PrivateIpAddresses", + "documentation":"

        List of private IP addresses for the interface.

        " + }, + "SecondaryPrivateIpAddressCount":{ + "shape":"NonNegativeInteger", + "documentation":"

        Number of additional private IP addresses to assign.

        " + }, + "Groups":{ + "shape":"SecurityGroupIds", + "documentation":"

        Security groups associated with the network interface.

        " + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

        Subnet identifier for the network interface.

        " + } + }, + "documentation":"

        Defines network interface configuration for WorkSpace Instance.

        " + }, + "InstanceNetworkPerformanceOptionsRequest":{ + "type":"structure", + "members":{ + "BandwidthWeighting":{ + "shape":"BandwidthWeightingEnum", + "documentation":"

        Defines bandwidth allocation strategy for network interfaces.

        " + } + }, + "documentation":"

        Configures network performance settings for WorkSpace Instance.

        " + }, + "InstanceType":{ + "type":"string", + "pattern":"([a-z0-9-]+)\\.([a-z0-9]+)" + }, + "InstanceTypeInfo":{ + "type":"structure", + "members":{ + "InstanceType":{ + "shape":"InstanceType", + "documentation":"

        Unique identifier for the WorkSpace Instance type.

        " + } + }, + "documentation":"

        Provides details about a specific WorkSpace Instance type.

        " + }, + "InstanceTypes":{ + "type":"list", + "member":{"shape":"InstanceTypeInfo"} + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InterfaceTypeEnum":{ + "type":"string", + "enum":[ + "interface", + "efa", + "efa-only" + ] + }, + "InternalServerException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Description of the internal server error.

        " + }, + "RetryAfterSeconds":{ + "shape":"Integer", + "documentation":"

        Recommended wait time before retrying the request.

        " + } + }, + "documentation":"

        Indicates an unexpected server-side error occurred.

        ", + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "Ipv4Address":{ + "type":"string", + "pattern":"(\\b25[0-5]|\\b2[0-4][0-9]|\\b[01]?[0-9][0-9]?)(\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}", + "sensitive":true + }, + "Ipv4Prefix":{ + "type":"string", + "pattern":".*(?:(?:\\d|[01]?\\d\\d|2[0-4]\\d|25[0-5])\\.){3}(?:25[0-5]|2[0-4]\\d|[01]?\\d\\d|\\d)(?:/\\d{1,2})?.*" + }, + "Ipv4PrefixSpecificationRequest":{ + "type":"structure", + "members":{ + "Ipv4Prefix":{ + "shape":"Ipv4Prefix", + "documentation":"

        Specific IPv4 prefix for network interface configuration.

        " + } + }, + "documentation":"

        Specifies IPv4 prefix configuration for network interfaces.

        " + }, + "Ipv4Prefixes":{ + "type":"list", + "member":{"shape":"Ipv4PrefixSpecificationRequest"} + }, + "Ipv6Address":{ + "type":"string", + "max":128, + "min":0, + "sensitive":true + }, + "Ipv6Addresses":{ + "type":"list", + "member":{"shape":"InstanceIpv6Address"} + }, + "Ipv6Prefix":{ + "type":"string", + "pattern":"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]{1,2}|1[01][0-9]|12[0-8])" + }, + "Ipv6PrefixSpecificationRequest":{ + "type":"structure", + "members":{ + "Ipv6Prefix":{ + "shape":"Ipv6Prefix", + "documentation":"

        Specific IPv6 prefix for network interface configuration.

        " + } + }, + "documentation":"

        Specifies IPv6 prefix configuration for network interfaces.

        " + }, + "Ipv6Prefixes":{ + "type":"list", + "member":{"shape":"Ipv6PrefixSpecificationRequest"} + }, + "KmsKeyId":{ + "type":"string", + "max":128, + "min":0, + "sensitive":true + }, + "LicenseConfigurationRequest":{ + "type":"structure", + "members":{ + "LicenseConfigurationArn":{ + "shape":"ARN", + "documentation":"

        ARN of the license configuration for the WorkSpace Instance.

        " + } + }, + "documentation":"

        Specifies license configuration for WorkSpace Instance.

        " + }, + "LicenseSpecifications":{ + "type":"list", + "member":{"shape":"LicenseConfigurationRequest"} + }, + "ListInstanceTypesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        Maximum number of instance types to return in a single API call. Enables pagination of instance type results.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        Pagination token for retrieving subsequent pages of instance type results.

        " + } + }, + "documentation":"

        Defines input parameters for retrieving supported WorkSpaces Instances instance types.

        " + }, + "ListInstanceTypesResponse":{ + "type":"structure", + "required":["InstanceTypes"], + "members":{ + "InstanceTypes":{ + "shape":"InstanceTypes", + "documentation":"

        Collection of supported instance types for WorkSpaces Instances.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        Token for retrieving additional instance types if the result set is paginated.

        " + } + }, + "documentation":"

        Contains the list of instance types supported by WorkSpaces Instances.

        " + }, + "ListRegionsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        Maximum number of regions to return in a single API call. Enables pagination of region results.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        Pagination token for retrieving subsequent pages of region results.

        " + } + }, + "documentation":"

        Defines input parameters for retrieving supported WorkSpaces Instances regions.

        " + }, + "ListRegionsResponse":{ + "type":"structure", + "required":["Regions"], + "members":{ + "Regions":{ + "shape":"RegionList", + "documentation":"

        Collection of AWS regions supported by WorkSpaces Instances.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        Token for retrieving additional regions if the result set is paginated.

        " + } + }, + "documentation":"

        Contains the list of supported AWS regions for WorkSpaces Instances.

        " + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["WorkspaceInstanceId"], + "members":{ + "WorkspaceInstanceId":{ + "shape":"WorkspaceInstanceId", + "documentation":"

        Unique identifier of the WorkSpace Instance.

        " + } + }, + "documentation":"

        Specifies the WorkSpace Instance to retrieve tags for.

        " + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

        Collection of tags associated with the WorkSpace Instance.

        " + } + }, + "documentation":"

        Returns the list of tags for the specified WorkSpace Instance.

        " + }, + "ListWorkspaceInstancesRequest":{ + "type":"structure", + "members":{ + "ProvisionStates":{ + "shape":"ProvisionStates", + "documentation":"

        Filter WorkSpaces Instances by their current provisioning states.

        " + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

        Maximum number of WorkSpaces Instances to return in a single response.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        Pagination token for retrieving subsequent pages of WorkSpaces Instances.

        " + } + }, + "documentation":"

        Defines filters and pagination parameters for retrieving WorkSpaces Instances.

        " + }, + "ListWorkspaceInstancesResponse":{ + "type":"structure", + "required":["WorkspaceInstances"], + "members":{ + "WorkspaceInstances":{ + "shape":"WorkspaceInstances", + "documentation":"

        Collection of WorkSpaces Instances returned by the query.

        " + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

        Token for retrieving additional WorkSpaces Instances if the result set is paginated.

        " + } + }, + "documentation":"

        Contains the list of WorkSpaces Instances matching the specified criteria.

        " + }, + "ManagedInstanceRequest":{ + "type":"structure", + "members":{ + "BlockDeviceMappings":{ + "shape":"BlockDeviceMappings", + "documentation":"

        Configures block device mappings for storage.

        " + }, + "CapacityReservationSpecification":{ + "shape":"CapacityReservationSpecification", + "documentation":"

        Specifies capacity reservation preferences.

        " + }, + "CpuOptions":{ + "shape":"CpuOptionsRequest", + "documentation":"

        Configures CPU-specific settings.

        " + }, + "CreditSpecification":{ + "shape":"CreditSpecificationRequest", + "documentation":"

        Defines CPU credit configuration for burstable instances.

        " + }, + "DisableApiStop":{ + "shape":"Boolean", + "documentation":"

        Prevents API-initiated instance stop.

        " + }, + "EbsOptimized":{ + "shape":"Boolean", + "documentation":"

        Enables optimized EBS performance.

        " + }, + "EnablePrimaryIpv6":{ + "shape":"Boolean", + "documentation":"

        Enables primary IPv6 address configuration.

        " + }, + "EnclaveOptions":{ + "shape":"EnclaveOptionsRequest", + "documentation":"

        Configures AWS Nitro Enclave settings.

        " + }, + "HibernationOptions":{ + "shape":"HibernationOptionsRequest", + "documentation":"

        Configures instance hibernation capabilities.

        " + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileSpecification", + "documentation":"

        Specifies IAM instance profile configuration.

        " + }, + "ImageId":{ + "shape":"ImageId", + "documentation":"

        Identifies the Amazon Machine Image (AMI) for the instance.

        " + }, + "InstanceMarketOptions":{ + "shape":"InstanceMarketOptionsRequest", + "documentation":"

        Configures marketplace-specific deployment options.

        " + }, + "InstanceType":{ + "shape":"InstanceType", + "documentation":"

        Specifies the WorkSpace Instance type.

        " + }, + "Ipv6Addresses":{ + "shape":"Ipv6Addresses", + "documentation":"

        Configures specific IPv6 addresses.

        " + }, + "Ipv6AddressCount":{ + "shape":"NonNegativeInteger", + "documentation":"

        Specifies number of IPv6 addresses to assign.

        " + }, + "KernelId":{ + "shape":"String128", + "documentation":"

        Identifies the kernel for the instance.

        " + }, + "KeyName":{ + "shape":"String64", + "documentation":"

        Specifies the key pair for instance access.

        " + }, + "LicenseSpecifications":{ + "shape":"LicenseSpecifications", + "documentation":"

        Configures license-related settings.

        " + }, + "MaintenanceOptions":{ + "shape":"InstanceMaintenanceOptionsRequest", + "documentation":"

        Defines automatic maintenance settings.

        " + }, + "MetadataOptions":{ + "shape":"InstanceMetadataOptionsRequest", + "documentation":"

        Configures instance metadata service settings.

        " + }, + "Monitoring":{ + "shape":"RunInstancesMonitoringEnabled", + "documentation":"

        Enables or disables detailed instance monitoring.

        " + }, + "NetworkInterfaces":{ + "shape":"NetworkInterfaces", + "documentation":"

        Configures network interface settings.

        " + }, + "NetworkPerformanceOptions":{ + "shape":"InstanceNetworkPerformanceOptionsRequest", + "documentation":"

        Defines network performance configuration.

        " + }, + "Placement":{ + "shape":"Placement", + "documentation":"

        Specifies instance placement preferences.

        " + }, + "PrivateDnsNameOptions":{ + "shape":"PrivateDnsNameOptionsRequest", + "documentation":"

        Configures private DNS name settings.

        " + }, + "PrivateIpAddress":{ + "shape":"Ipv4Address", + "documentation":"

        Specifies the primary private IP address.

        " + }, + "RamdiskId":{ + "shape":"String128", + "documentation":"

        Identifies the ramdisk for the instance.

        " + }, + "SecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

        Specifies security group identifiers.

        " + }, + "SecurityGroups":{ + "shape":"SecurityGroupNames", + "documentation":"

        Configures security group settings.

        " + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

        Identifies the subnet for the instance.

        " + }, + "TagSpecifications":{ + "shape":"TagSpecifications", + "documentation":"

        Configures resource tagging specifications.

        " + }, + "UserData":{ + "shape":"UserData", + "documentation":"

        Provides custom initialization data for the instance.

        " + } + }, + "documentation":"

        Defines comprehensive configuration for a managed WorkSpace Instance.

        " + }, + "MarketTypeEnum":{ + "type":"string", + "enum":[ + "spot", + "capacity-block" + ] + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "NetworkInterfaceId":{ + "type":"string", + "pattern":"eni-[0-9a-zA-Z]{1,63}" + }, + "NetworkInterfaces":{ + "type":"list", + "member":{"shape":"InstanceNetworkInterfaceSpecification"} + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1, + "sensitive":true + }, + "NonNegativeInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "Placement":{ + "type":"structure", + "members":{ + "Affinity":{ + "shape":"String64", + "documentation":"

        Specifies host affinity for dedicated instances.

        " + }, + "AvailabilityZone":{ + "shape":"AvailabilityZone", + "documentation":"

        Identifies the specific AWS availability zone.

        " + }, + "GroupId":{ + "shape":"PlacementGroupId", + "documentation":"

        Unique identifier for placement group.

        " + }, + "GroupName":{ + "shape":"String64", + "documentation":"

        Name of the placement group.

        " + }, + "HostId":{ + "shape":"HostId", + "documentation":"

        Identifies the specific dedicated host.

        " + }, + "HostResourceGroupArn":{ + "shape":"ARN", + "documentation":"

        ARN of the host resource group.

        " + }, + "PartitionNumber":{ + "shape":"NonNegativeInteger", + "documentation":"

        Specifies partition number for partition placement groups.

        " + }, + "Tenancy":{ + "shape":"TenancyEnum", + "documentation":"

        Defines instance tenancy configuration.

        " + } + }, + "documentation":"

        Defines instance placement configuration for WorkSpace Instance.

        " + }, + "PlacementGroupId":{ + "type":"string", + "pattern":"pg-[0-9a-zA-Z]{1,63}" + }, + "PrivateDnsNameOptionsRequest":{ + "type":"structure", + "members":{ + "HostnameType":{ + "shape":"HostnameTypeEnum", + "documentation":"

        Specifies the type of hostname configuration.

        " + }, + "EnableResourceNameDnsARecord":{ + "shape":"Boolean", + "documentation":"

        Enables DNS A record for resource name resolution.

        " + }, + "EnableResourceNameDnsAAAARecord":{ + "shape":"Boolean", + "documentation":"

        Enables DNS AAAA record for resource name resolution.

        " + } + }, + "documentation":"

        Configures private DNS name settings for WorkSpace Instance.

        " + }, + "PrivateIpAddressSpecification":{ + "type":"structure", + "members":{ + "Primary":{ + "shape":"Boolean", + "documentation":"

        Indicates if this is the primary private IP address.

        " + }, + "PrivateIpAddress":{ + "shape":"Ipv4Address", + "documentation":"

        Specific private IP address for the network interface.

        " + } + }, + "documentation":"

        Defines private IP address configuration for network interface.

        " + }, + "PrivateIpAddresses":{ + "type":"list", + "member":{"shape":"PrivateIpAddressSpecification"} + }, + "ProvisionStateEnum":{ + "type":"string", + "enum":[ + "ALLOCATING", + "ALLOCATED", + "DEALLOCATING", + "DEALLOCATED", + "ERROR_ALLOCATING", + "ERROR_DEALLOCATING" + ] + }, + "ProvisionStates":{ + "type":"list", + "member":{"shape":"ProvisionStateEnum"} + }, + "Region":{ + "type":"structure", + "members":{ + "RegionName":{ + "shape":"RegionName", + "documentation":"

        Name of the AWS region.

        " + } + }, + "documentation":"

        Represents an AWS region supported by WorkSpaces Instances.

        " + }, + "RegionList":{ + "type":"list", + "member":{"shape":"Region"} + }, + "RegionName":{ + "type":"string", + "pattern":"[-0-9a-z]{1,31}" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType" + ], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Details about the missing resource.

        " + }, + "ResourceId":{ + "shape":"String", + "documentation":"

        Identifier of the resource that was not found.

        " + }, + "ResourceType":{ + "shape":"String", + "documentation":"

        Type of the resource that was not found.

        " + } + }, + "documentation":"

        Indicates the requested resource could not be found.

        ", + "exception":true + }, + "ResourceTypeEnum":{ + "type":"string", + "enum":[ + "instance", + "volume", + "spot-instances-request", + "network-interface" + ] + }, + "RunInstancesMonitoringEnabled":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

        Enables or disables detailed instance monitoring.

        " + } + }, + "documentation":"

        Configures detailed monitoring for WorkSpace Instance.

        " + }, + "SecurityGroupId":{ + "type":"string", + "pattern":"sg-[0-9a-zA-Z]{1,63}" + }, + "SecurityGroupIds":{ + "type":"list", + "member":{"shape":"SecurityGroupId"} + }, + "SecurityGroupName":{ + "type":"string", + "pattern":"(?!sg-)[\\w .:/()#,@\\[\\]+=&;{}!$*-]{0,255}" + }, + "SecurityGroupNames":{ + "type":"list", + "member":{"shape":"SecurityGroupName"} + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "Message", + "ResourceId", + "ResourceType", + "ServiceCode", + "QuotaCode" + ], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Description of the quota limitation.

        " + }, + "ResourceId":{ + "shape":"String", + "documentation":"

        Identifier of the resource related to the quota.

        " + }, + "ResourceType":{ + "shape":"String", + "documentation":"

        Type of resource related to the quota.

        " + }, + "ServiceCode":{ + "shape":"String", + "documentation":"

        Code identifying the service with the quota limitation.

        " + }, + "QuotaCode":{ + "shape":"String", + "documentation":"

        Specific code for the exceeded quota.

        " + } + }, + "documentation":"

        Indicates that a service quota has been exceeded.

        ", + "exception":true + }, + "SnapshotId":{ + "type":"string", + "pattern":"snap-[0-9a-zA-Z]{1,63}" + }, + "SpotInstanceTypeEnum":{ + "type":"string", + "enum":[ + "one-time", + "persistent" + ] + }, + "SpotMarketOptions":{ + "type":"structure", + "members":{ + "BlockDurationMinutes":{ + "shape":"NonNegativeInteger", + "documentation":"

        Duration of spot instance block reservation.

        " + }, + "InstanceInterruptionBehavior":{ + "shape":"InstanceInterruptionBehaviorEnum", + "documentation":"

        Specifies behavior when spot instance is interrupted.

        " + }, + "MaxPrice":{ + "shape":"String64", + "documentation":"

        Maximum hourly price for spot instance.

        " + }, + "SpotInstanceType":{ + "shape":"SpotInstanceTypeEnum", + "documentation":"

        Defines the type of spot instance request.

        " + }, + "ValidUntilUtc":{ + "shape":"Timestamp", + "documentation":"

        Timestamp until which spot instance request is valid.

        " + } + }, + "documentation":"

        Defines configuration for spot instance deployment.

        " + }, + "String":{"type":"string"}, + "String128":{ + "type":"string", + "max":128, + "min":0 + }, + "String64":{ + "type":"string", + "max":64, + "min":0 + }, + "SubnetId":{ + "type":"string", + "pattern":"subnet-[0-9a-zA-Z]{1,63}" + }, + "Tag":{ + "type":"structure", + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

        Unique identifier for the tag.

        " + }, + "Value":{ + "shape":"TagValue", + "documentation":"

        Value associated with the tag key.

        " + } + }, + "documentation":"

        Represents a key-value metadata tag.

        " + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+)" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":30, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":30, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "WorkspaceInstanceId", + "Tags" + ], + "members":{ + "WorkspaceInstanceId":{ + "shape":"WorkspaceInstanceId", + "documentation":"

        Unique identifier of the WorkSpace Instance to tag.

        " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

        Tags to be added to the WorkSpace Instance.

        " + } + }, + "documentation":"

        Specifies tags to add to a WorkSpace Instance.

        " + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

        Confirms successful tag addition.

        " + }, + "TagSpecification":{ + "type":"structure", + "members":{ + "ResourceType":{ + "shape":"ResourceTypeEnum", + "documentation":"

        Type of resource being tagged.

        " + }, + "Tags":{ + "shape":"TagList", + "documentation":"

        Collection of tags for the specified resource.

        " + } + }, + "documentation":"

        Defines tagging configuration for a resource.

        " + }, + "TagSpecifications":{ + "type":"list", + "member":{"shape":"TagSpecification"}, + "max":30, + "min":0 + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" + }, + "TenancyEnum":{ + "type":"string", + "enum":[ + "default", + "dedicated", + "host" + ] + }, + "ThrottlingException":{ + "type":"structure", + "required":["Message"], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Description of the throttling event.

        " + }, + "ServiceCode":{ + "shape":"String", + "documentation":"

        Code identifying the service experiencing throttling.

        " + }, + "QuotaCode":{ + "shape":"String", + "documentation":"

        Specific code for the throttling quota.

        " + }, + "RetryAfterSeconds":{ + "shape":"Integer", + "documentation":"

        Recommended wait time before retrying the request.

        " + } + }, + "documentation":"

        Indicates the request rate has exceeded limits.

        ", + "exception":true, + "retryable":{"throttling":true} + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "WorkspaceInstanceId", + "TagKeys" + ], + "members":{ + "WorkspaceInstanceId":{ + "shape":"WorkspaceInstanceId", + "documentation":"

        Unique identifier of the WorkSpace Instance to untag.

        " + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

        Keys of tags to be removed.

        " + } + }, + "documentation":"

        Specifies tags to remove from a WorkSpace Instance.

        " + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + }, + "documentation":"

        Confirms successful tag removal.

        " + }, + "UserData":{ + "type":"string", + "max":16000, + "min":0, + "sensitive":true + }, + "ValidationException":{ + "type":"structure", + "required":[ + "Message", + "Reason" + ], + "members":{ + "Message":{ + "shape":"String", + "documentation":"

        Overall description of validation failures.

        " + }, + "Reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

        Specific reason for the validation failure.

        " + }, + "FieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

        List of fields that failed validation.

        " + } + }, + "documentation":"

        Indicates invalid input parameters in the request.

        ", + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "Name", + "Reason", + "Message" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

        Name of the field that failed validation.

        " + }, + "Reason":{ + "shape":"String", + "documentation":"

        Reason for the validation failure.

        " + }, + "Message":{ + "shape":"String", + "documentation":"

        Detailed error message describing the validation issue.

        " + } + }, + "documentation":"

        Represents a validation error field in an API request.

        " + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "UNKNOWN_OPERATION", + "UNSUPPORTED_OPERATION", + "CANNOT_PARSE", + "FIELD_VALIDATION_FAILED", + "DEPENDENCY_FAILURE", + "OTHER" + ] + }, + "VirtualName":{ + "type":"string", + "pattern":"ephemeral(0|[1-9][0-9]{0,2})" + }, + "VolumeId":{ + "type":"string", + "pattern":"vol-[0-9a-zA-Z]{1,63}" + }, + "VolumeTypeEnum":{ + "type":"string", + "enum":[ + "standard", + "io1", + "io2", + "gp2", + "sc1", + "st1", + "gp3" + ] + }, + "WorkspaceInstance":{ + "type":"structure", + "members":{ + "ProvisionState":{ + "shape":"ProvisionStateEnum", + "documentation":"

        Current provisioning state of the WorkSpace Instance.

        " + }, + "WorkspaceInstanceId":{ + "shape":"WorkspaceInstanceId", + "documentation":"

        Unique identifier for the WorkSpace Instance.

        " + }, + "EC2ManagedInstance":{ + "shape":"EC2ManagedInstance", + "documentation":"

        Details of the associated EC2 managed instance.

        " + } + }, + "documentation":"

        Represents a single WorkSpace Instance.

        " + }, + "WorkspaceInstanceError":{ + "type":"structure", + "members":{ + "ErrorCode":{ + "shape":"String", + "documentation":"

        Unique error code for the WorkSpace Instance error.

        " + }, + "ErrorMessage":{ + "shape":"String", + "documentation":"

        Detailed description of the WorkSpace Instance error.

        " + } + }, + "documentation":"

        Captures errors specific to WorkSpace Instance operations.

        " + }, + "WorkspaceInstanceErrors":{ + "type":"list", + "member":{"shape":"WorkspaceInstanceError"} + }, + "WorkspaceInstanceId":{ + "type":"string", + "max":70, + "min":15, + "pattern":"wsinst-[0-9a-zA-Z]{8,63}" + }, + "WorkspaceInstances":{ + "type":"list", + "member":{"shape":"WorkspaceInstance"} + } + }, + "documentation":"

        Amazon WorkSpaces Instances provides an API framework for managing virtual workspace environments across multiple AWS regions, enabling programmatic creation and configuration of desktop infrastructure.

        " +} diff --git a/services/workspacesinstances/src/main/resources/codegen-resources/waiters-2.json b/services/workspacesinstances/src/main/resources/codegen-resources/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/services/workspacesinstances/src/main/resources/codegen-resources/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/services/workspacesthinclient/pom.xml b/services/workspacesthinclient/pom.xml index 815f7995857f..716e22823fa3 100644 --- a/services/workspacesthinclient/pom.xml +++ b/services/workspacesthinclient/pom.xml @@ -17,7 +17,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT workspacesthinclient AWS Java SDK :: Services :: Work Spaces Thin Client diff --git a/services/workspacesthinclient/src/main/resources/codegen-resources/customization.config b/services/workspacesthinclient/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/workspacesthinclient/src/main/resources/codegen-resources/customization.config +++ b/services/workspacesthinclient/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/workspacesthinclient/src/main/resources/codegen-resources/service-2.json b/services/workspacesthinclient/src/main/resources/codegen-resources/service-2.json index 4769a2e652cd..e2d58f7a7f04 100644 --- a/services/workspacesthinclient/src/main/resources/codegen-resources/service-2.json +++ b/services/workspacesthinclient/src/main/resources/codegen-resources/service-2.json @@ -26,9 +26,9 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], "documentation":"

        Creates an environment for your thin client devices.

        ", @@ -46,9 +46,9 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], "documentation":"

        Deletes a thin client device.

        ", @@ -67,9 +67,9 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], "documentation":"

        Deletes an environment.

        ", @@ -88,9 +88,9 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], "documentation":"

        Deregisters a thin client device.

        ", @@ -239,9 +239,9 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], "documentation":"

        Assigns one or more tags (key-value pairs) to the specified resource.

        ", @@ -259,9 +259,9 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], "documentation":"

        Removes a tag or tags from a resource.

        ", @@ -300,6 +300,7 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} @@ -634,13 +635,6 @@ "kmsKeyArn":{ "shape":"KmsKeyArn", "documentation":"

        The Amazon Resource Name (ARN) of the Key Management Service key used to encrypt the device.

        " - }, - "tags":{ - "shape":"TagsMap", - "documentation":"

        The tag keys and optional values for the resource.

        ", - "deprecated":true, - "deprecatedMessage":"This field will be removed in future releases. Use ListTagsForResource API instead.", - "deprecatedSince":"2025-03-25" } }, "documentation":"

        Describes a thin client device.

        " @@ -838,13 +832,6 @@ "shape":"KmsKeyArn", "documentation":"

        The Amazon Resource Name (ARN) of the Key Management Service key used to encrypt the environment.

        " }, - "tags":{ - "shape":"TagsMap", - "documentation":"

        The tag keys and optional values for the resource.

        ", - "deprecated":true, - "deprecatedMessage":"This field will be removed in future releases. Use ListTagsForResource API instead.", - "deprecatedSince":"2025-03-25" - }, "deviceCreationTags":{ "shape":"DeviceCreationTagsMap", "documentation":"

        The tag keys and optional values for the newly created devices for this environment.

        " @@ -1309,13 +1296,6 @@ "arn":{ "shape":"Arn", "documentation":"

        The Amazon Resource Name (ARN) of the software set.

        " - }, - "tags":{ - "shape":"TagsMap", - "documentation":"

        The tag keys and optional values for the resource.

        ", - "deprecated":true, - "deprecatedMessage":"This field will be removed in future releases. Use ListTagsForResource API instead.", - "deprecatedSince":"2025-03-25" } }, "documentation":"

        Describes a software set.

        " diff --git a/services/workspacesweb/pom.xml b/services/workspacesweb/pom.xml index ea81444bc575..849d73a83fa8 100644 --- a/services/workspacesweb/pom.xml +++ b/services/workspacesweb/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT workspacesweb AWS Java SDK :: Services :: Work Spaces Web diff --git a/services/workspacesweb/src/main/resources/codegen-resources/customization.config b/services/workspacesweb/src/main/resources/codegen-resources/customization.config index 6bc46bc3c310..e824e95e8fbd 100644 --- a/services/workspacesweb/src/main/resources/codegen-resources/customization.config +++ b/services/workspacesweb/src/main/resources/codegen-resources/customization.config @@ -1,4 +1,3 @@ { - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/services/xray/pom.xml b/services/xray/pom.xml index 89b70043ebaf..aff5a4155af8 100644 --- a/services/xray/pom.xml +++ b/services/xray/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk services - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT xray AWS Java SDK :: Services :: AWS X-Ray diff --git a/services/xray/src/main/resources/codegen-resources/customization.config b/services/xray/src/main/resources/codegen-resources/customization.config index df5fa552723f..e30f4faf438f 100644 --- a/services/xray/src/main/resources/codegen-resources/customization.config +++ b/services/xray/src/main/resources/codegen-resources/customization.config @@ -14,6 +14,5 @@ "union": true } }, - "enableGenerateCompiledEndpointRules": true, - "enableFastUnmarshaller": true + "enableGenerateCompiledEndpointRules": true } diff --git a/test/architecture-tests/pom.xml b/test/architecture-tests/pom.xml index 03290e317b4d..324c96e478b0 100644 --- a/test/architecture-tests/pom.xml +++ b/test/architecture-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml diff --git a/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/ArchUtils.java b/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/ArchUtils.java index d3b3af42cb20..acae7b3b4a0e 100644 --- a/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/ArchUtils.java +++ b/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/ArchUtils.java @@ -26,6 +26,11 @@ public static Pattern classNameToPattern(Class clazz) { return Pattern.compile(".*/" + clazz.getCanonicalName().replace('.', '/') + ".class"); } + public static Pattern classWithInnerClassesToPattern(Class clazz) { + // inner or inline/anonymous classes have $ followed by a name or number eg "$Inner" or "$1" + return Pattern.compile(".*/" + clazz.getCanonicalName().replace('.', '/') + "(\\$.*)?.class"); + } + public static Pattern classNameToPattern(String className) { return Pattern.compile(".*/" + className.replace('.', '/') + ".class"); } diff --git a/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/CodingConventionWithSuppressionTest.java b/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/CodingConventionWithSuppressionTest.java index fd921257f5f5..b54955cc56f2 100644 --- a/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/CodingConventionWithSuppressionTest.java +++ b/test/architecture-tests/src/test/java/software/amazon/awssdk/archtests/CodingConventionWithSuppressionTest.java @@ -33,6 +33,7 @@ import java.util.regex.Pattern; import org.junit.jupiter.api.Test; import software.amazon.awssdk.core.internal.http.pipeline.stages.MakeHttpRequestStage; +import software.amazon.awssdk.core.sync.ResponseTransformer; import software.amazon.awssdk.metrics.publishers.emf.EmfMetricLoggingPublisher; import software.amazon.awssdk.metrics.publishers.emf.internal.MetricEmfConverter; import software.amazon.awssdk.utils.Logger; @@ -52,7 +53,9 @@ public class CodingConventionWithSuppressionTest { ArchUtils.classNameToPattern("software.amazon.awssdk.services.s3.internal.crt.S3CrtResponseHandlerAdapter"))); private static final Set ALLOWED_ERROR_LOG_SUPPRESSION = new HashSet<>( - Arrays.asList(ArchUtils.classNameToPattern(EmfMetricLoggingPublisher.class))); + Arrays.asList( + ArchUtils.classNameToPattern(EmfMetricLoggingPublisher.class), + ArchUtils.classWithInnerClassesToPattern(ResponseTransformer.class))); @Test void shouldNotAbuseWarnLog() { diff --git a/test/auth-tests/pom.xml b/test/auth-tests/pom.xml index f4ee0b957fd3..cecc299addf0 100644 --- a/test/auth-tests/pom.xml +++ b/test/auth-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/bundle-logging-bridge-binding-test/pom.xml b/test/bundle-logging-bridge-binding-test/pom.xml index 15f951b75162..852559b3336c 100644 --- a/test/bundle-logging-bridge-binding-test/pom.xml +++ b/test/bundle-logging-bridge-binding-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/bundle-shading-tests/pom.xml b/test/bundle-shading-tests/pom.xml index 0d60933b1cfc..bf499fbc8355 100644 --- a/test/bundle-shading-tests/pom.xml +++ b/test/bundle-shading-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/codegen-generated-classes-test/pom.xml b/test/codegen-generated-classes-test/pom.xml index 3300463a58f6..e76abff3d705 100644 --- a/test/codegen-generated-classes-test/pom.xml +++ b/test/codegen-generated-classes-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/customization.config b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/customization.config new file mode 100644 index 000000000000..86839537eeab --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/customization.config @@ -0,0 +1,4 @@ +{ + "skipEndpointTestGeneration": true, + "enableEnvironmentBearerToken": true +} diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-rule-set.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-rule-set.json new file mode 100644 index 000000000000..cc38f1ffb165 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-rule-set.json @@ -0,0 +1,355 @@ +{ + "version": "1.3", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": true, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{Region}", + "signingName": "environment-token" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://environment-token-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{Region}", + "signingName": "environment-token" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://environment-token-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{Region}", + "signingName": "environment-token" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://environment-token.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{Region}", + "signingName": "environment-token" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://environment-token.{Region}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{Region}", + "signingName": "environment-token" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] +} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-tests.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-tests.json new file mode 100644 index 000000000000..f94902ff9d99 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/endpoint-tests.json @@ -0,0 +1,5 @@ +{ + "testCases": [ + ], + "version": "1.0" +} \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/service-2.json new file mode 100644 index 000000000000..c70811a87f80 --- /dev/null +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/environmenttokenprovider/service-2.json @@ -0,0 +1,38 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2016-03-11", + "endpointPrefix":"environment-token", + "auth":["aws.auth#sigv4", "smithy.api#httpBearerAuth"], + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"EnvironmentTokenProviderService", + "serviceFullName":"Environment Token Provider Service", + "serviceId":"EnvironmentTokenProviderService", + "signatureVersion":"v4", + "targetPrefix":"EnvironmentTokenProviderService", + "timestampFormat":"unixTimestamp", + "uid":"restjson-2016-03-11" + }, + "operations":{ + "OneOperation":{ + "name":"OneOperation", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/oneoperation" + }, + "input":{"shape":"OneShape"} + } + }, + "shapes": { + "OneShape": { + "type": "structure", + "members": { + "StringMember": { + "shape": "String" + } + } + }, + "String":{"type":"string"} + } +} diff --git a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/multiauth/service-2.json b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/multiauth/service-2.json index b5047e2734fc..6fd1f03e1fd1 100644 --- a/test/codegen-generated-classes-test/src/main/resources/codegen-resources/multiauth/service-2.json +++ b/test/codegen-generated-classes-test/src/main/resources/codegen-resources/multiauth/service-2.json @@ -13,7 +13,7 @@ "timestampFormat":"unixTimestamp", "uid":"restjson-2016-03-11" }, - "operations":{ + "operations": { "multiAuthWithOnlySigv4a":{ "name":"multiAuthWithOnlySigv4a", "http":{ @@ -26,6 +26,18 @@ "ApiType":{"value":"NoEndpointSigningProperties"} } }, + "multiAuthWithOnlySigv4":{ + "name":"multiAuthWithOnlySigv4", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/multiAuthWithOnlySigv4" + }, + "input":{"shape":"SampleRequest"}, + "auth": ["aws.auth#sigv4"], + "staticContextParams":{ + "ApiType":{"value":"NoEndpointSigningProperties"} + } + }, "multiAuthWithOnlySigv4aAndSigv4":{ "name":"multiAuthWithOnlySigv4aAndSigv4", "http":{ @@ -72,6 +84,17 @@ "value": "onlySigv4a" } } + }, + "multiAuthWithoutAuthScheme":{ + "name":"multiAuthWithoutAuthScheme", + "http":{ + "method":"POST", + "requestUri":"/2016-03-11/multiAuthWithoutAuthScheme" + }, + "input":{"shape":"SampleRequest"}, + "staticContextParams":{ + "ApiType":{"value":"NoEndpointSigningProperties"} + } } }, "shapes": { diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/BusinessMetricsUserAgentTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/BusinessMetricsUserAgentTest.java index ecfe48c5972b..ba6dff526c13 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/BusinessMetricsUserAgentTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/BusinessMetricsUserAgentTest.java @@ -47,6 +47,7 @@ import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClient; import software.amazon.awssdk.services.protocolrestjson.ProtocolRestJsonAsyncClientBuilder; +import software.amazon.awssdk.services.protocolrestjson.internal.ServiceVersionInfo; import software.amazon.awssdk.services.protocolrestjson.model.PaginatedOperationWithResultKeyResponse; import software.amazon.awssdk.services.protocolrestjson.paginators.PaginatedOperationWithResultKeyPublisher; import software.amazon.awssdk.services.restjsonendpointproviders.RestJsonEndpointProvidersAsyncClient; @@ -181,4 +182,20 @@ public ExecutionAttributes executionAttributes() { return executionAttributes; } } + + @Test + void validate_serviceUserAgent_format() { + ProtocolRestJsonAsyncClientBuilder clientBuilder = asyncClientBuilderForProtocolRestJson(); + + ProtocolRestJsonAsyncClient client = clientBuilder + .region(Region.US_WEST_2) + .credentialsProvider(CREDENTIALS_PROVIDER) + .overrideConfiguration(c -> c.addExecutionInterceptor(interceptor)) + .build(); + + client.headOperation(); + + String userAgent = assertAndGetUserAgentString(); + assertThat(userAgent).contains("AmazonProtocolRestJson#" + ServiceVersionInfo.VERSION); + } } \ No newline at end of file diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/PreferredAuthSchemeProviderTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/PreferredAuthSchemeProviderTest.java new file mode 100644 index 000000000000..1868848bdf3c --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/PreferredAuthSchemeProviderTest.java @@ -0,0 +1,121 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Stream; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.http.auth.spi.scheme.AuthSchemeOption; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.multiauth.auth.scheme.MultiauthAuthSchemeParams; +import software.amazon.awssdk.services.multiauth.auth.scheme.MultiauthAuthSchemeProvider; + +public class PreferredAuthSchemeProviderTest { + + private static final String OPERATION_SIGV4_ONLY = "multiAuthWithOnlySigv4"; + private static final String OPERATION_SIGV4A_ONLY = "multiAuthWithOnlySigv4a"; + private static final String OPERATION_SIGV4A_AND_SIGV4 = "multiAuthWithOnlySigv4aAndSigv4"; + private static final String OPERATION_NOAUTH = "multiAuthNoAuth"; + + private static final String SIGV4 = "sigv4"; + private static final String PREFIXED_SIGV4 = "aws.auth#sigv4"; + private static final String PREFIXED_SIGV4A = "aws.auth#sigv4a"; + private static final String SIGV4A = "sigv4a"; + private static final String BEARER = "bearer"; + private static final String ANONYMOUS = "noauth"; + + @ParameterizedTest(name = "{3}") + @MethodSource("authSchemeTestCases") + void testAuthSchemePreference(List preferredAuthSchemes, String operation, String expectedFirstScheme, String testName) { + MultiauthAuthSchemeProvider provider = MultiauthAuthSchemeProvider.defaultProvider(preferredAuthSchemes); + + MultiauthAuthSchemeParams params = MultiauthAuthSchemeParams + .builder() + .region(Region.US_WEST_2) + .operation(operation) + .build(); + + List authSchemes = provider.resolveAuthScheme(params); + + Assertions.assertFalse(authSchemes.isEmpty()); + Assertions.assertEquals(expectedFirstScheme, authSchemes.get(0).schemeId()); + } + + static Stream authSchemeTestCases() { + return Stream.of( + Arguments.of( + Arrays.asList(BEARER, ANONYMOUS), + OPERATION_SIGV4A_AND_SIGV4, + PREFIXED_SIGV4A, + "Unsupported auth schemes only" + ), + + Arguments.of( + Arrays.asList(SIGV4, SIGV4A), + OPERATION_NOAUTH, + PREFIXED_SIGV4, + "Operation with no auth scheme should default to Sigv4" + ), + + Arguments.of( + Arrays.asList(BEARER, SIGV4, ANONYMOUS), + OPERATION_SIGV4A_AND_SIGV4, + PREFIXED_SIGV4, + "Mix of supported and unsupported schemes" + ), + + Arguments.of( + Arrays.asList(SIGV4, SIGV4A), + OPERATION_SIGV4A_AND_SIGV4, + PREFIXED_SIGV4, + "All supported schemes in reverse order" + ), + + Arguments.of( + Arrays.asList(SIGV4A), + OPERATION_SIGV4_ONLY, + PREFIXED_SIGV4, + "Operation with only sigv4 supported scheme" + ), + + Arguments.of( + Arrays.asList(SIGV4, SIGV4A), + OPERATION_SIGV4A_ONLY, + PREFIXED_SIGV4A, + "Operation with only sigv4a supported scheme" + ), + + Arguments.of( + Collections.emptyList(), + OPERATION_SIGV4A_AND_SIGV4, + PREFIXED_SIGV4A, + "Empty preference list" + ), + + Arguments.of( + Arrays.asList(SIGV4A, SIGV4, BEARER), + OPERATION_SIGV4A_AND_SIGV4, + PREFIXED_SIGV4A, + "First preference is supported" + ) + ); + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/bearerauth/ClientBuilderTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/bearerauth/ClientBuilderTest.java index e9ff4da41fa1..371d089dea2b 100644 --- a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/bearerauth/ClientBuilderTest.java +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/bearerauth/ClientBuilderTest.java @@ -57,6 +57,9 @@ public void syncClient_customTokenIdentityProviderSet_presentInFinalConfig() { assertThat(config.option(AwsClientOption.TOKEN_IDENTITY_PROVIDER)) .isSameAs(mockProvider); + + assertThat(builder.buildClient().serviceClientConfiguration().tokenProvider()) + .isSameAs(mockProvider); } @Test diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/environmenttokenprovider/EnvironmentTokenProviderTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/environmenttokenprovider/EnvironmentTokenProviderTest.java new file mode 100644 index 000000000000..c16069a30dd5 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/environmenttokenprovider/EnvironmentTokenProviderTest.java @@ -0,0 +1,304 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.environmenttokenprovider; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Stream; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.auth.token.credentials.StaticTokenProvider; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.core.useragent.BusinessMetricFeatureId; +import software.amazon.awssdk.http.HttpExecuteResponse; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.services.environmenttokenprovider.auth.scheme.EnvironmentTokenProviderAuthSchemeProvider; +import software.amazon.awssdk.services.environmenttokenprovider.model.OneOperationRequest; +import software.amazon.awssdk.testutils.EnvironmentVariableHelper; +import software.amazon.awssdk.testutils.service.http.MockAsyncHttpClient; +import software.amazon.awssdk.testutils.service.http.MockSyncHttpClient; + +public class EnvironmentTokenProviderTest { + private static final String ENV_NAME = "AWS_BEARER_TOKEN_ENVIRONMENT_TOKEN"; + private static final String SYSTEM_PROPERTY_NAME = "aws.bearerTokenEnvironmentToken"; + public static final String ENV_TOKEN = "env-test-token"; + public static final String SYSTEM_TEST_TOKEN = "system-test-token"; + + private MockSyncHttpClient mockHttpClient; + private MockAsyncHttpClient mockAsyncHttpClient; + private String systemPropertyBeforeTest; + + private final EnvironmentVariableHelper environmentVariableHelper = new EnvironmentVariableHelper(); + + @BeforeEach + void setUp() { + mockHttpClient = new MockSyncHttpClient(); + mockAsyncHttpClient = new MockAsyncHttpClient(); + systemPropertyBeforeTest = System.getProperty(SYSTEM_PROPERTY_NAME); + } + + @AfterEach + void tearDown() { + mockHttpClient.reset(); + mockAsyncHttpClient.reset(); + environmentVariableHelper.reset(); + if (systemPropertyBeforeTest != null) { + System.setProperty(SYSTEM_PROPERTY_NAME, systemPropertyBeforeTest); + } else { + System.clearProperty(SYSTEM_PROPERTY_NAME); + } + } + + @ParameterizedTest + @MethodSource("testCases") + void testAsyncClient(TestCase testCase) { + setupSystemAndEnv(testCase); + + mockAsyncHttpClient.stubNextResponse(mockResponse()); + + EnvironmentTokenProviderAsyncClientBuilder clientBuilder = EnvironmentTokenProviderAsyncClient + .builder() + .httpClient(mockAsyncHttpClient); + + if (testCase.authSchemeProvider != null) { + clientBuilder.authSchemeProvider(testCase.authSchemeProvider); + } + + EnvironmentTokenProviderAsyncClient client = clientBuilder.build(); + + if (testCase.operationToken == null) { + client.oneOperation(b -> {} ).join(); + } else { + client.oneOperation(requestWithOperationToken(testCase)).join(); + } + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockAsyncHttpClient.getLastRequest(); + + verifyRequest(testCase, loggedRequest); + } + + @ParameterizedTest + @MethodSource("testCases") + void testSyncClient(TestCase testCase) { + setupSystemAndEnv(testCase); + + mockHttpClient.stubNextResponse(mockResponse()); + + EnvironmentTokenProviderClientBuilder clientBuilder = EnvironmentTokenProviderClient + .builder() + .httpClient(mockHttpClient); + + if (testCase.authSchemeProvider != null) { + clientBuilder.authSchemeProvider(testCase.authSchemeProvider); + } + + EnvironmentTokenProviderClient client = clientBuilder.build(); + + if (testCase.operationToken == null) { + client.oneOperation(b -> {} ); + } else { + client.oneOperation(requestWithOperationToken(testCase)); + } + + + SdkHttpFullRequest loggedRequest = (SdkHttpFullRequest) mockHttpClient.getLastRequest(); + + verifyRequest(testCase, loggedRequest); + } + + private static void verifyRequest(TestCase testCase, SdkHttpFullRequest loggedRequest) { + if (testCase.expectBearerAuth) { + assertThat(loggedRequest.firstMatchingHeader("Authorization").get()) + .startsWith("Bearer"); + } else { + assertThat(loggedRequest.firstMatchingHeader("Authorization") + .get()).startsWith("AWS4-HMAC-SHA256"); + } + + if (testCase.expectBusinessMetricSet) { + assertThat(loggedRequest.firstMatchingHeader("User-Agent").get()) + .matches(".*m\\/[A-Za-z0-9,]+" + BusinessMetricFeatureId.BEARER_SERVICE_ENV_VARS); + } else { + assertThat(loggedRequest.firstMatchingHeader("User-Agent").get()) + .doesNotMatch(".*m\\/[A-Za-z0-9,]+" + BusinessMetricFeatureId.BEARER_SERVICE_ENV_VARS); + } + } + + static Stream testCases() { + return Stream.of( + TestCase.builder() + .description("Does not use bearer auth when ENV token is unset") + .expectBearerAuth(false) + .build(), + + TestCase.builder() + .description("Uses bearer auth when ENV token is set") + .envVar(ENV_NAME, ENV_TOKEN) + .expectBearerAuth(true) + .expectedBearerToken(ENV_TOKEN) + .expectBusinessMetricSet(true) + .build(), + + TestCase.builder() + .description("Uses bearer auth when system property token is set") + .envVar(ENV_NAME, "some-other-token") + .systemProperty(SYSTEM_TEST_TOKEN) + .expectBearerAuth(true) + .expectedBearerToken(SYSTEM_TEST_TOKEN) + .expectBusinessMetricSet(true) + .build(), + + TestCase.builder() + .description("Uses bearer auth from environment over auth scheme preference") + .envVar(ENV_NAME, ENV_TOKEN) + .envVar( + SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.environmentVariable(), + "sigv4") + .expectBearerAuth(true) + .expectedBearerToken(ENV_TOKEN) + .expectBusinessMetricSet(true) + .build(), + + TestCase.builder() + .description("Doesn't use bearer when AuthSchemeProvider is manually configured on the client") + .envVar(ENV_NAME, ENV_TOKEN) + .authSchemeProvider(EnvironmentTokenProviderAuthSchemeProvider.defaultProvider()) + .expectBearerAuth(false) + .expectBusinessMetricSet(false) + .build(), + + TestCase.builder() + .description("Business metric is not set when the token is overridden on the operation") + .envVar(ENV_NAME, ENV_TOKEN) + .operationToken("operation-token") + .expectBearerAuth(true) + .expectedBearerToken("operation-token") + .expectBusinessMetricSet(false) + .build() + ); + } + + private static OneOperationRequest requestWithOperationToken(TestCase testCase) { + return OneOperationRequest.builder() + .overrideConfiguration(c -> c.tokenIdentityProvider( + StaticTokenProvider.create(() -> testCase.operationToken))) + .build(); + } + + private void setupSystemAndEnv(TestCase testCase) { + testCase.envVars.forEach(environmentVariableHelper::set); + if (testCase.systemProperty != null) { + System.setProperty(SYSTEM_PROPERTY_NAME, testCase.systemProperty); + } + } + + private HttpExecuteResponse mockResponse() { + return HttpExecuteResponse.builder() + .response(SdkHttpResponse.builder().statusCode(200).build()) + .build(); + } + + static final class TestCase { + final String description; + final Map envVars; + final String systemProperty; + final EnvironmentTokenProviderAuthSchemeProvider authSchemeProvider; + final String operationToken; + final boolean expectBearerAuth; + final String expectedBearerToken; + final boolean expectBusinessMetricSet; + + private TestCase(Builder builder) { + this.description = builder.description; + this.envVars = builder.envVars; + this.systemProperty = builder.systemProperty; + this.authSchemeProvider = builder.authSchemeProvider; + this.operationToken = builder.operationToken; + this.expectBearerAuth = builder.expectBearerAuth; + this.expectedBearerToken = builder.expectedBearerToken; + this.expectBusinessMetricSet = builder.expectBusinessMetricSet; + } + + @Override + public String toString() { + return description; + } + + static Builder builder() { + return new Builder(); + } + + static class Builder { + private String description; + private Map envVars = new HashMap<>(); + private String systemProperty; + private EnvironmentTokenProviderAuthSchemeProvider authSchemeProvider; + private String operationToken; + private boolean expectBearerAuth; + private String expectedBearerToken; + private boolean expectBusinessMetricSet; + + public Builder description(String description) { + this.description = description; + return this; + } + + public Builder envVar(String key, String value) { + this.envVars.put(key, value); + return this; + } + + public Builder systemProperty(String systemProperty) { + this.systemProperty = systemProperty; + return this; + } + + public Builder authSchemeProvider(EnvironmentTokenProviderAuthSchemeProvider authSchemeProvider) { + this.authSchemeProvider = authSchemeProvider; + return this; + } + + public Builder operationToken(String operationToken) { + this.operationToken = operationToken; + return this; + } + + public Builder expectBearerAuth(boolean expectBearerAuth) { + this.expectBearerAuth = expectBearerAuth; + return this; + } + + public Builder expectedBearerToken(String expectedBearerToken) { + this.expectedBearerToken = expectedBearerToken; + return this; + } + + public Builder expectBusinessMetricSet(boolean expectBusinessMetricSet) { + this.expectBusinessMetricSet = expectBusinessMetricSet; + return this; + } + + public TestCase build() { + return new TestCase(this); + } + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/multiauth/AuthSchemePreferenceResolverFunctionalTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/multiauth/AuthSchemePreferenceResolverFunctionalTest.java new file mode 100644 index 000000000000..502f5aa20be9 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/multiauth/AuthSchemePreferenceResolverFunctionalTest.java @@ -0,0 +1,258 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.multiauth; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Stream; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider; +import software.amazon.awssdk.core.SdkSystemSetting; +import software.amazon.awssdk.core.SelectedAuthScheme; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute; +import software.amazon.awssdk.http.auth.spi.scheme.AuthScheme; +import software.amazon.awssdk.http.auth.spi.signer.AsyncSignRequest; +import software.amazon.awssdk.http.auth.spi.signer.AsyncSignedRequest; +import software.amazon.awssdk.http.auth.spi.signer.HttpSigner; +import software.amazon.awssdk.http.auth.spi.signer.SignRequest; +import software.amazon.awssdk.http.auth.spi.signer.SignedRequest; +import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; +import software.amazon.awssdk.identity.spi.IdentityProvider; +import software.amazon.awssdk.identity.spi.IdentityProviders; +import software.amazon.awssdk.profiles.ProfileFile; +import software.amazon.awssdk.profiles.ProfileProperty; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.multiauth.auth.scheme.MultiauthAuthSchemeProvider; +import software.amazon.awssdk.services.multiauth.model.MultiAuthWithOnlySigv4AAndSigv4Request; +import software.amazon.awssdk.testutils.EnvironmentVariableHelper; +import software.amazon.awssdk.utils.StringInputStream; + +public class AuthSchemePreferenceResolverFunctionalTest { + private final EnvironmentVariableHelper helper = new EnvironmentVariableHelper(); + + @AfterEach + void tearDown() { + System.clearProperty(SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.property()); + helper.reset(); + } + + @ParameterizedTest + @MethodSource("testCases") + void resolvesAuthSchemePreference(TestCase testCase) { + try { + MultiauthClientBuilder builder = + MultiauthClient.builder() + .region(Region.US_WEST_2) + .credentialsProvider(AnonymousCredentialsProvider.create()); + + builder.putAuthScheme(authScheme("aws.auth#sigv4a", new SkipCrtNoOpSigner())); + + if (testCase.clientSetting != null) { + builder.authSchemeProvider(MultiauthAuthSchemeProvider.defaultProvider(testCase.clientSetting)); + } + + if (testCase.systemPropSetting != null) { + System.setProperty(SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.property(), testCase.systemPropSetting); + } + + if (testCase.envVarSetting != null) { + helper.set(SdkSystemSetting.AWS_AUTH_SCHEME_PREFERENCE.environmentVariable(), testCase.envVarSetting); + } + + ProfileFile.Builder profileFile = ProfileFile.builder().type(ProfileFile.Type.CONFIGURATION); + + if (testCase.profileSetting != null) { + profileFile.content(new StringInputStream("[default]\n" + + ProfileProperty.AUTH_SCHEME_PREFERENCE + " = " + testCase.profileSetting)); + } else { + profileFile.content(new StringInputStream("")); + } + + AutSchemeCapturingInterceptor interceptor = new AutSchemeCapturingInterceptor(); + + builder.overrideConfiguration(c -> c.defaultProfileFile(profileFile.build()) + .defaultProfileName("default") + .addExecutionInterceptor(interceptor)); + + MultiauthClient client = builder.build(); + + assertThatThrownBy(() -> + client.multiAuthWithOnlySigv4aAndSigv4(MultiAuthWithOnlySigv4AAndSigv4Request.builder().build()) + ).isInstanceOf(AutSchemeCapturingInterceptor.CaptureException.class); + + assertThat(interceptor.authScheme()).isEqualTo(testCase.resolvedAuthScheme); + } finally { + tearDown(); + } + } + + private static AuthScheme authScheme(String schemeId, HttpSigner signer) { + return new AuthScheme() { + @Override + public String schemeId() { + return schemeId; + } + + @Override + public IdentityProvider identityProvider(IdentityProviders providers) { + return providers.identityProvider(AwsCredentialsIdentity.class); + } + + @Override + public HttpSigner signer() { + return signer; + } + }; + } + + static Stream testCases() { + return Stream.of( + Arguments.of(new TestCase( + null, + null, + null, + Arrays.asList("sigv4", "noauth"), + "sigv4", + "Client config is used when set")), + + Arguments.of(new TestCase( + null, + null, + "sigv4,sigv4a,bearer", + null, + "sigv4", + "System property value is used")), + + Arguments.of(new TestCase( + null, + "sigv4a,sigv4,bearer", + null, + null, + "sigv4a", + "Environment variable is used when other properties is null")), + + Arguments.of(new TestCase( + "bearer,sigv4,sigv4a", + null, + null, + null, + "sigv4", + "Profile setting is used when others are null")), + + Arguments.of(new TestCase( + "", + null, + null, + null, + "sigv4a", + "Profile setting is used when explicit empty string is supplied")), + + + Arguments.of(new TestCase( + "bearer,sigv4,sigv4a", + "sigv4a,sigv4,bearer", + "sigv4,sigv4a,bearer", + null, + "sigv4", + "JVM system property has precedence over env var and profile")), + + Arguments.of(new TestCase( + "bearer,sigv4,sigv4a", + "sigv4,sigv4a,bearer", + "sigv4,sigv4a,bearer", + Arrays.asList("sigv4a", "noauth", "bearer"), + "sigv4a", + "Client config has highest precedence")) + ); + } + + public static class TestCase { + private final String profileSetting; + private final String envVarSetting; + private final String systemPropSetting; + private final List clientSetting; + private final String resolvedAuthScheme; + private final String caseName; + + public TestCase(String profileSetting, String envVarSetting, String systemPropSetting, List clientSetting, + String resolvedAuthScheme, String caseName) { + this.profileSetting = profileSetting; + this.envVarSetting = envVarSetting; + this.systemPropSetting = systemPropSetting; + this.clientSetting = clientSetting; + this.resolvedAuthScheme = resolvedAuthScheme; + + this.caseName = caseName; + } + + @Override + public String toString() { + return caseName; + } + } + + public static class AutSchemeCapturingInterceptor implements ExecutionInterceptor { + private final AtomicReference authScheme = new AtomicReference<>(); + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + SelectedAuthScheme scheme = executionAttributes.getAttribute(SdkInternalExecutionAttribute.SELECTED_AUTH_SCHEME); + String schemeId = scheme.authSchemeOption().schemeId(); + authScheme.set(schemeId.replace("aws.auth#", "")); + throw new CaptureException(); + } + + + public String authScheme() { + return this.authScheme.get(); + } + + public static class CaptureException extends RuntimeException { + } + } + + public static class SkipCrtNoOpSigner implements HttpSigner { + + @Override + public SignedRequest sign(SignRequest request) { + return SignedRequest + .builder() + .request(request.request()) + .build(); + } + + @Override + public CompletableFuture signAsync( + AsyncSignRequest request) { + return CompletableFuture.completedFuture( + AsyncSignedRequest.builder() + .request(request.request()) + .build() + ); + } + } +} diff --git a/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/useragent/StreamingBodyAndTransformerImplTrackingTest.java b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/useragent/StreamingBodyAndTransformerImplTrackingTest.java new file mode 100644 index 000000000000..6f93bff6a7d9 --- /dev/null +++ b/test/codegen-generated-classes-test/src/test/java/software/amazon/awssdk/services/useragent/StreamingBodyAndTransformerImplTrackingTest.java @@ -0,0 +1,187 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.useragent; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.concurrent.Executors; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.services.protocolrestjsonwithconfig.ProtocolRestJsonWithConfigAsyncClient; +import software.amazon.awssdk.services.protocolrestjsonwithconfig.ProtocolRestJsonWithConfigClient; +import software.amazon.awssdk.services.protocolrestjsonwithconfig.model.StreamingOutputOperationResponse; +import software.amazon.awssdk.testutils.RandomTempFile; + +public class StreamingBodyAndTransformerImplTrackingTest { + + private CapturingInterceptor interceptor; + + @BeforeEach + public void setup() { + this.interceptor = new CapturingInterceptor(); + } + + @Test + public void streamingInputOperation_syncClient_stringBody_recordsMetadata() { + callStreamingInputOperation(syncClient(), RequestBody.fromString("body")); + assertThat(interceptor.userAgent()).contains("md/rb#b"); + } + + @Test + public void streamingInputOperation_syncClient_fileBody_recordsMetadata() throws IOException { + callStreamingInputOperation(syncClient(), RequestBody.fromFile(new RandomTempFile(64))); + assertThat(interceptor.userAgent()).contains("md/rb#f"); + } + + @Test + public void streamingInputOperation_syncClient_streamBody_recordsMetadata() throws IOException { + callStreamingInputOperation( + syncClient(), + RequestBody.fromInputStream(new ByteArrayInputStream(new byte[64]), 64)); + assertThat(interceptor.userAgent()).contains("md/rb#s"); + } + + @Test + public void streamingInputOperation_asyncClient_stringBody_recordsMetadata() { + callStreamingInputOperation(asyncClient(), AsyncRequestBody.fromString("body")); + assertThat(interceptor.userAgent()).contains("md/rb#b"); + } + + @Test + public void streamingInputOperation_asyncClient_fileBody_recordsMetadata() throws IOException { + callStreamingInputOperation(asyncClient(), AsyncRequestBody.fromFile(new RandomTempFile(64))); + assertThat(interceptor.userAgent()).contains("md/rb#f"); + } + + @Test + public void streamingInputOperation_asyncClient_streamBody_recordsMetadata() throws IOException { + callStreamingInputOperation( + asyncClient(), + AsyncRequestBody.fromInputStream(new ByteArrayInputStream(new byte[64]), 64L, Executors.newSingleThreadExecutor()) + ); + assertThat(interceptor.userAgent()).contains("md/rb#s"); + } + + @Test + public void streamingOutputOperation_syncClient_bytes_recordsMetadata() { + callStreamingOutputOperation(syncClient(), ResponseTransformer.toBytes()); + assertThat(interceptor.userAgent()).contains("md/rt#b"); + } + + @Test + public void streamingOutputOperation_syncClient_file_recordsMetadata() throws IOException { + callStreamingOutputOperation(syncClient(), ResponseTransformer.toFile(new RandomTempFile(0))); + assertThat(interceptor.userAgent()).contains("md/rt#f"); + } + + @Test + public void streamingOutputOperation_syncClient_stream_recordsMetadata() { + callStreamingOutputOperation(syncClient(), ResponseTransformer.toOutputStream(new OutputStream() { + @Override + public void write(int b) { + // no-op + } + })); + assertThat(interceptor.userAgent()).contains("md/rt#s"); + } + + @Test + public void streamingOutputOperation_asyncClient_bytes_recordsMetadata() { + callStreamingOutputOperation(asyncClient(), AsyncResponseTransformer.toBytes()); + assertThat(interceptor.userAgent()).contains("md/rt#b"); + } + + @Test + public void streamingOutputOperation_asyncClient_file_recordsMetadata() throws IOException { + callStreamingOutputOperation(asyncClient(), AsyncResponseTransformer.toFile(new RandomTempFile(0))); + assertThat(interceptor.userAgent()).contains("md/rt#f"); + } + + @Test + public void streamingOutputOperation_asyncClient_publisher_recordsMetadata() { + callStreamingOutputOperation(asyncClient(), AsyncResponseTransformer.toPublisher()); + assertThat(interceptor.userAgent()).contains("md/rt#p"); + } + + + + private ProtocolRestJsonWithConfigClient syncClient() { + return ProtocolRestJsonWithConfigClient + .builder() + .overrideConfiguration(c -> c.addExecutionInterceptor(interceptor)) + .build(); + } + + private static void callStreamingInputOperation(ProtocolRestJsonWithConfigClient client, RequestBody requestBody) { + assertThatThrownBy(() -> client.streamingInputOperation(r -> {}, requestBody)) + .hasMessageContaining("stop"); + } + + private void callStreamingOutputOperation( + ProtocolRestJsonWithConfigClient client, ResponseTransformer transformer) { + assertThatThrownBy(() -> client.streamingOutputOperation(r -> {}, transformer)) + .hasMessageContaining("stop"); + } + + private ProtocolRestJsonWithConfigAsyncClient asyncClient() { + return ProtocolRestJsonWithConfigAsyncClient + .builder() + .overrideConfiguration(c -> c.addExecutionInterceptor(interceptor)) + .build(); + } + + private static void callStreamingInputOperation(ProtocolRestJsonWithConfigAsyncClient client, AsyncRequestBody requestBody) { + assertThatThrownBy(() -> { + client.streamingInputOperation( + r -> { + r.overrideConfiguration( + c -> c.putHeader("x-amz-content-sha256", "value")); + }, + requestBody).join(); + }).hasMessageContaining("stop"); + } + + private void callStreamingOutputOperation( + ProtocolRestJsonWithConfigAsyncClient client, AsyncResponseTransformer transformer) { + assertThatThrownBy(() -> client.streamingOutputOperation(r -> {}, transformer).join()) + .hasMessageContaining("stop"); + } + + public static class CapturingInterceptor implements ExecutionInterceptor { + private Context.BeforeTransmission context; + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + this.context = context; + throw new RuntimeException("stop"); + } + + public String userAgent() { + return context.httpRequest().headers().get("User-Agent").get(0); + } + } +} diff --git a/test/crt-unavailable-tests/pom.xml b/test/crt-unavailable-tests/pom.xml index a91e3edc80d4..02486203ac3d 100644 --- a/test/crt-unavailable-tests/pom.xml +++ b/test/crt-unavailable-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/http-client-tests/pom.xml b/test/http-client-tests/pom.xml index 29a2978c8a77..faa2166c0115 100644 --- a/test/http-client-tests/pom.xml +++ b/test/http-client-tests/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml http-client-tests diff --git a/test/module-path-tests/pom.xml b/test/module-path-tests/pom.xml index a612f289233c..7f68476dbebb 100644 --- a/test/module-path-tests/pom.xml +++ b/test/module-path-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/old-client-version-compatibility-test/pom.xml b/test/old-client-version-compatibility-test/pom.xml index a5cab6196b2f..6f13a0ed4e13 100644 --- a/test/old-client-version-compatibility-test/pom.xml +++ b/test/old-client-version-compatibility-test/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml diff --git a/test/protocol-tests-core/pom.xml b/test/protocol-tests-core/pom.xml index 22860ead795e..36dbf9e739ff 100644 --- a/test/protocol-tests-core/pom.xml +++ b/test/protocol-tests-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/unmarshalling/UnmarshalledErrorAssertion.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/unmarshalling/UnmarshalledErrorAssertion.java new file mode 100644 index 000000000000..763e774a930f --- /dev/null +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/unmarshalling/UnmarshalledErrorAssertion.java @@ -0,0 +1,61 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.protocol.asserts.unmarshalling; + + +import static org.junit.Assert.fail; +import static org.unitils.reflectionassert.ReflectionAssert.assertReflectionEquals; + +import com.fasterxml.jackson.databind.JsonNode; +import java.lang.reflect.Field; +import org.junit.Assert; +import software.amazon.awssdk.core.exception.SdkServiceException; +import software.amazon.awssdk.protocol.reflect.ShapeModelReflector; + +public class UnmarshalledErrorAssertion extends UnmarshallingAssertion { + private final JsonNode expectedError; + + public UnmarshalledErrorAssertion(JsonNode expectedError) { + this.expectedError = expectedError; + } + + @Override + protected void doAssert(UnmarshallingTestContext context, Object actual) throws Exception { + if (!(actual instanceof SdkServiceException)) { + fail("Expected unmarshalled object to be an instance of SdkServiceException"); + } + SdkServiceException actualException = (SdkServiceException) actual; + SdkServiceException expectedException = createExpectedResult(context); + for (Field field : expectedException.getClass().getDeclaredFields()) { + assertFieldEquals(field, actualException, expectedException); + } + + if (expectedException.getMessage() != null) { + Assert.assertTrue(actualException.getMessage().startsWith(expectedException.getMessage())); + } + } + + private SdkServiceException createExpectedResult(UnmarshallingTestContext context) { + return (SdkServiceException) new ShapeModelReflector(context.getModel(), context.getErrorName() + "Exception", + this.expectedError).createShapeObject(); + } + + private void assertFieldEquals(Field field, Object actual, Object expectedResult) throws + Exception { + field.setAccessible(true); + assertReflectionEquals(field.get(expectedResult), field.get(actual)); + } +} \ No newline at end of file diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/unmarshalling/UnmarshallingTestContext.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/unmarshalling/UnmarshallingTestContext.java index ee5d1e22987e..7ad69dd7828d 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/unmarshalling/UnmarshallingTestContext.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/asserts/unmarshalling/UnmarshallingTestContext.java @@ -26,6 +26,7 @@ public class UnmarshallingTestContext { private IntermediateModel model; private String operationName; private String streamedResponse; + private String errorName; public UnmarshallingTestContext withModel(IntermediateModel model) { this.model = model; @@ -58,4 +59,12 @@ public String getStreamedResponse() { return streamedResponse; } + public UnmarshallingTestContext withErrorName(String errorName) { + this.errorName = errorName; + return this; + } + + public String getErrorName() { + return errorName; + } } diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/Then.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/Then.java index 7d7e934cf030..0f6bc1268bd1 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/Then.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/Then.java @@ -20,6 +20,7 @@ import com.fasterxml.jackson.databind.JsonNode; import software.amazon.awssdk.protocol.asserts.marshalling.MarshallingAssertion; import software.amazon.awssdk.protocol.asserts.marshalling.SerializedAs; +import software.amazon.awssdk.protocol.asserts.unmarshalling.UnmarshalledErrorAssertion; import software.amazon.awssdk.protocol.asserts.unmarshalling.UnmarshalledResultAssertion; import software.amazon.awssdk.protocol.asserts.unmarshalling.UnmarshallingAssertion; @@ -27,12 +28,14 @@ public class Then { private final MarshallingAssertion serializedAs; private final UnmarshallingAssertion deserializedAs; + private final UnmarshallingAssertion errorDeserializedAs; @JsonCreator public Then(@JsonProperty("serializedAs") SerializedAs serializedAs, @JsonProperty("deserializedAs") JsonNode deserializedAs) { this.serializedAs = serializedAs; this.deserializedAs = new UnmarshalledResultAssertion(deserializedAs); + this.errorDeserializedAs = new UnmarshalledErrorAssertion(deserializedAs); } /** @@ -49,4 +52,11 @@ public UnmarshallingAssertion getUnmarshallingAssertion() { return deserializedAs; } + /** + * + * @return The assertion object to use for error unmarshalling tests + */ + public UnmarshallingAssertion getErrorUnmarshallingAssertion() { + return errorDeserializedAs; + } } diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/When.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/When.java index 9611a8ba42fa..1db3442a364c 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/When.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/When.java @@ -26,6 +26,9 @@ public class When { @JsonProperty(value = "operation") private String operationName; + @JsonProperty(value = "error") + private String errorName; + public WhenAction getAction() { return action; } @@ -41,4 +44,12 @@ public String getOperationName() { public void setOperationName(String operationName) { this.operationName = operationName; } + + public void setErrorName(String errorName) { + this.errorName = errorName; + } + + public String getErrorName() { + return errorName; + } } diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/WhenAction.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/WhenAction.java index 0304b2e2a741..cf6317d4700c 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/WhenAction.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/model/WhenAction.java @@ -17,7 +17,8 @@ public enum WhenAction { MARSHALL("marshall"), - UNMARSHALL("unmarshall"); + UNMARSHALL("unmarshall"), + ERROR_UNMARSHALL("errorUnmarshall"); private final String action; @@ -31,6 +32,8 @@ public static WhenAction fromValue(String action) { return MARSHALL; case "unmarshall": return UNMARSHALL; + case "errorUnmarshall": + return ERROR_UNMARSHALL; default: throw new IllegalArgumentException("Unsupported test action " + action); } diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/reflect/ShapeModelReflector.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/reflect/ShapeModelReflector.java index 377f7b4e0863..c39a12ebf0e8 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/reflect/ShapeModelReflector.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/reflect/ShapeModelReflector.java @@ -110,6 +110,14 @@ private void initializeFields(ShapeModel structureShape, JsonNode input, Iterator fieldNames = input.fieldNames(); while (fieldNames.hasNext()) { String memberName = fieldNames.next(); + // error structures have special case handling of "message" + if (structureShape.getErrorCode() != null && memberName.equalsIgnoreCase("message")) { + Method setter = shapeObject.getClass().getMethod("message", String.class); + setter.setAccessible(true); + setter.invoke(shapeObject, input.get(memberName).asText()); + continue; + } + MemberModel memberModel = structureShape.getMemberByC2jName(memberName); if (memberModel == null) { throw new IllegalArgumentException("Member " + memberName + " was not found in the " + diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/runners/ProtocolTestRunner.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/runners/ProtocolTestRunner.java index dde66e09e251..6c86766b1796 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/runners/ProtocolTestRunner.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/runners/ProtocolTestRunner.java @@ -72,6 +72,7 @@ public void runTest(TestCase testCase) throws Exception { marshallingTestRunner.runTest(testCase); break; case UNMARSHALL: + case ERROR_UNMARSHALL: unmarshallingTestRunner.runTest(testCase); break; default: diff --git a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/runners/UnmarshallingTestRunner.java b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/runners/UnmarshallingTestRunner.java index 6ee3b88cb010..967b4823bd90 100644 --- a/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/runners/UnmarshallingTestRunner.java +++ b/test/protocol-tests-core/src/main/java/software/amazon/awssdk/protocol/runners/UnmarshallingTestRunner.java @@ -23,6 +23,7 @@ import com.fasterxml.jackson.databind.JsonNode; import com.github.tomakehurst.wiremock.client.ResponseDefinitionBuilder; import com.github.tomakehurst.wiremock.client.WireMock; +import java.lang.reflect.InvocationTargetException; import java.util.Base64; import software.amazon.awssdk.codegen.model.intermediate.IntermediateModel; import software.amazon.awssdk.codegen.model.intermediate.Metadata; @@ -52,6 +53,21 @@ class UnmarshallingTestRunner { void runTest(TestCase testCase) throws Exception { resetWireMock(testCase.getGiven().getResponse()); + + switch (testCase.getWhen().getAction()) { + case UNMARSHALL: + runUnmarshallTest(testCase); + break; + case ERROR_UNMARSHALL: + runErrorUnmarshallTest(testCase); + break; + default: + throw new IllegalArgumentException("UnmarshallingTestRunner unable to run test case for action " + + testCase.getWhen().getAction()); + } + } + + private void runUnmarshallTest(TestCase testCase) throws Exception { String operationName = testCase.getWhen().getOperationName(); ShapeModelReflector shapeModelReflector = createShapeModelReflector(testCase); if (!hasStreamingMember(operationName)) { @@ -60,12 +76,32 @@ void runTest(TestCase testCase) throws Exception { } else { CapturingResponseTransformer responseHandler = new CapturingResponseTransformer(); Object actualResult = clientReflector - .invokeStreamingMethod(testCase, shapeModelReflector.createShapeObject(), responseHandler); + .invokeStreamingMethod(testCase, shapeModelReflector.createShapeObject(), responseHandler); testCase.getThen().getUnmarshallingAssertion() .assertMatches(createContext(operationName, responseHandler.captured), actualResult); } } + private void runErrorUnmarshallTest(TestCase testCase) throws Exception { + String operationName = testCase.getWhen().getOperationName(); + ShapeModelReflector shapeModelReflector = createShapeModelReflector(testCase); + try { + clientReflector.invokeMethod(testCase, shapeModelReflector.createShapeObject()); + throw new IllegalStateException("Test case expected client to throw error"); + } catch (InvocationTargetException t) { + String errorName = testCase.getWhen().getErrorName(); + testCase.getThen().getErrorUnmarshallingAssertion().assertMatches( + createErrorContext(operationName, errorName), t.getCause()); + } + } + + private UnmarshallingTestContext createErrorContext(String operationName, String errorName) { + return new UnmarshallingTestContext() + .withModel(model) + .withOperationName(operationName) + .withErrorName(errorName); + } + /** * {@link ResponseTransformer} that simply captures all the content as a String so we * can compare it with the expected in diff --git a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-output.json b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-output.json index d660230cf715..357a6605be5c 100644 --- a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-output.json +++ b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/rest-json-output.json @@ -19,6 +19,24 @@ } } }, + { + "description": "Operation with explicit payload structure, with emtpy output is unmarshalled as null value", + "given": { + "response": { + "status_code": 200, + "body": "" + } + }, + "when": { + "action": "unmarshall", + "operation": "OperationWithExplicitPayloadStructure" + }, + "then": { + "deserializedAs": { + "PayloadMember": null + } + } + }, { "description": "Operation with streaming payload in output is unmarshalled correctly", "given": { diff --git a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/smithy-rpcv2-output.json b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/smithy-rpcv2-output.json index bc82842c02d5..c7523d9776f6 100644 --- a/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/smithy-rpcv2-output.json +++ b/test/protocol-tests-core/src/main/resources/software/amazon/awssdk/protocol/suites/cases/smithy-rpcv2-output.json @@ -663,5 +663,76 @@ "then": { "deserializedAs": {} } + }, + { + "description": "Parses simple RpcV2 Cbor errors.", + "given": { + "response": { + "status_code": 400, + "headers": { + "smithy-protocol": "rpc-v2-cbor", + "Content-Type": "application/cbor" + }, + "binaryBody": "v2ZfX3R5cGV4LnNtaXRoeS5wcm90b2NvbHRlc3RzLnJwY3YyQ2JvciNJbnZhbGlkR3JlZXRpbmdnTWVzc2FnZWJIaf8=" + } + }, + "when": { + "action": "errorUnmarshall", + "operation": "GreetingWithErrors", + "error": "InvalidGreeting" + }, + "then": { + "deserializedAs": { + "Message": "Hi" + } + } + }, + { + "description": "Parses a complex error with no message member", + "given": { + "response": { + "status_code": 400, + "headers": { + "smithy-protocol": "rpc-v2-cbor", + "Content-Type": "application/cbor" + }, + "binaryBody": "v2ZfX3R5cGV4K3NtaXRoeS5wcm90b2NvbHRlc3RzLnJwY3YyQ2JvciNDb21wbGV4RXJyb3JoVG9wTGV2ZWxpVG9wIGxldmVsZk5lc3RlZL9jRm9vY2Jhcv//" + } + }, + "when": { + "action": "errorUnmarshall", + "operation": "GreetingWithErrors", + "error": "ComplexError" + }, + "then": { + "deserializedAs": { + "TopLevel": "Top level", + "Nested": { + "Foo": "bar" + } + } + } + }, + { + "description": "Parses an empty complex error", + "given": { + "response": { + "status_code": 400, + "headers": { + "smithy-protocol": "rpc-v2-cbor", + "Content-Type": "application/cbor" + }, + "binaryBody": "v2ZfX3R5cGV4K3NtaXRoeS5wcm90b2NvbHRlc3RzLnJwY3YyQ2JvciNDb21wbGV4RXJyb3L/" + } + }, + "when": { + "action": "errorUnmarshall", + "operation": "GreetingWithErrors", + "error": "ComplexError" + }, + "then": { + "deserializedAs": { + } + } } ] \ No newline at end of file diff --git a/test/protocol-tests/pom.xml b/test/protocol-tests/pom.xml index fa7dc21cffb0..c9c1bb54f0d0 100644 --- a/test/protocol-tests/pom.xml +++ b/test/protocol-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/protocol-tests/src/main/resources/codegen-resources/sdkrpcv2/service-2.json b/test/protocol-tests/src/main/resources/codegen-resources/sdkrpcv2/service-2.json index 62357f2cf5f7..4d3c2ea4a0d7 100644 --- a/test/protocol-tests/src/main/resources/codegen-resources/sdkrpcv2/service-2.json +++ b/test/protocol-tests/src/main/resources/codegen-resources/sdkrpcv2/service-2.json @@ -95,6 +95,19 @@ "method": "POST", "requestUri": "/" } + }, + "GreetingWithErrors":{ + "name":"GreetingWithErrors", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "output":{"shape":"GreetingWithErrorsOutput"}, + "errors":[ + {"shape":"ComplexError"}, + {"shape":"InvalidGreeting"} + ], + "idempotent":true } }, "shapes": { @@ -679,6 +692,33 @@ "shape": "AllTypesUnionStructure" } } + }, + "GreetingWithErrorsOutput":{ + "type":"structure", + "members":{ + "greeting":{"shape":"String"} + } + }, + "ComplexError":{ + "type":"structure", + "members":{ + "TopLevel":{"shape":"String"}, + "Nested":{"shape":"ComplexNestedErrorData"} + }, + "exception":true + }, + "ComplexNestedErrorData":{ + "type":"structure", + "members":{ + "Foo":{"shape":"String"} + } + }, + "InvalidGreeting":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "exception":true } } } diff --git a/test/region-testing/pom.xml b/test/region-testing/pom.xml index 0158560c089e..b5228ddb73f0 100644 --- a/test/region-testing/pom.xml +++ b/test/region-testing/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/ruleset-testing-core/pom.xml b/test/ruleset-testing-core/pom.xml index e081fc788177..fd91185ff41d 100644 --- a/test/ruleset-testing-core/pom.xml +++ b/test/ruleset-testing-core/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/s3-benchmarks/pom.xml b/test/s3-benchmarks/pom.xml index 6ae40c113058..1481492ba648 100644 --- a/test/s3-benchmarks/pom.xml +++ b/test/s3-benchmarks/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/s3-tests/README.md b/test/s3-tests/README.md new file mode 100644 index 000000000000..6ffc6f51f20e --- /dev/null +++ b/test/s3-tests/README.md @@ -0,0 +1,31 @@ +# SDK Regression Tests for Amazon S3 + +## Description +This module contains SDK regression tests for Amazon S3 streaming operations with various SDK configurations. + + +## How to run + +### Credentials + +The tests require valid AWS credentials to be available in the default credential file under the `aws-test-account` profile. + +### Run the tests + +- Run from your IDE + +- Run from maven command. Include the class you want to run with the `regression.test` property + +``` +mvn clean install -P s3-regression-tests -pl :s3-tests -am -T1C -Dregression.test=DownloadStreamingRegressionTesting +``` + +## Adding New Tests + +- The tests are built using [JUnit 5](https://junit.org/junit5/). Make sure you are using the correct APIs and mixing of + Junit 4 and Junit 5 APIs on the same test can have unexpected results. + +- All tests should have the suffix of `RegressionTesting`, eg: `DownloadStreamingRegressionTesting` + + + diff --git a/test/s3-tests/pom.xml b/test/s3-tests/pom.xml index 28af511b9e17..f1935490ccff 100644 --- a/test/s3-tests/pom.xml +++ b/test/s3-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 @@ -81,6 +81,7 @@ software.amazon.awssdk s3 ${awsjavasdk.version} + test
        software.amazon.awssdk @@ -115,11 +116,6 @@ junit-jupiter test - - org.junit.vintage - junit-vintage-engine - test - org.apache.logging.log4j log4j-api @@ -152,6 +148,18 @@ ${awsjavasdk.version} test + + software.amazon.awssdk + http-auth-aws-crt + ${awsjavasdk.version} + test + + + software.amazon.awssdk + s3-transfer-manager + ${awsjavasdk.version} + test + org.eclipse.jetty jetty-servlet @@ -164,7 +172,13 @@ software.amazon.awssdk - bundle-sdk + test-utils + ${project.version} + test + + + software.amazon.awssdk + apache-client ${project.version} test diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/checksum/ChecksumIntegrationTesting.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/checksum/ChecksumIntegrationTesting.java deleted file mode 100644 index ab927aa9eda0..000000000000 --- a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/checksum/ChecksumIntegrationTesting.java +++ /dev/null @@ -1,1061 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.awssdk.services.s3.checksum; - -import static org.assertj.core.api.Assertions.assertThat; - -import io.reactivex.Flowable; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.time.Duration; -import java.time.Instant; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assumptions; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain; -import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; -import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider; -import software.amazon.awssdk.auth.signer.S3SignerExecutionAttribute; -import software.amazon.awssdk.awscore.AwsClient; -import software.amazon.awssdk.awscore.exception.AwsErrorDetails; -import software.amazon.awssdk.checksums.DefaultChecksumAlgorithm; -import software.amazon.awssdk.checksums.SdkChecksum; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.core.checksums.RequestChecksumCalculation; -import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; -import software.amazon.awssdk.core.interceptor.Context; -import software.amazon.awssdk.core.interceptor.ExecutionAttributes; -import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; -import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.http.SdkHttpMethod; -import software.amazon.awssdk.http.SdkHttpRequest; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.s3.S3AsyncClient; -import software.amazon.awssdk.services.s3.S3Client; -import software.amazon.awssdk.services.s3.model.BucketAccelerateStatus; -import software.amazon.awssdk.services.s3.model.BucketLocationConstraint; -import software.amazon.awssdk.services.s3.model.CreateBucketConfiguration; -import software.amazon.awssdk.services.s3.model.CreateBucketRequest; -import software.amazon.awssdk.services.s3.model.DataRedundancy; -import software.amazon.awssdk.services.s3.model.Delete; -import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; -import software.amazon.awssdk.services.s3.model.GlacierJobParameters; -import software.amazon.awssdk.services.s3.model.LocationInfo; -import software.amazon.awssdk.services.s3.model.LocationType; -import software.amazon.awssdk.services.s3.model.ObjectIdentifier; -import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import software.amazon.awssdk.services.s3.model.PutObjectResponse; -import software.amazon.awssdk.services.s3.model.RestoreObjectRequest; -import software.amazon.awssdk.services.s3.model.RestoreRequest; -import software.amazon.awssdk.services.s3.model.S3Exception; -import software.amazon.awssdk.services.s3.model.StorageClass; -import software.amazon.awssdk.services.s3.model.Tier; -import software.amazon.awssdk.services.s3control.S3ControlClient; -import software.amazon.awssdk.services.s3control.model.CreateMultiRegionAccessPointRequest; -import software.amazon.awssdk.services.s3control.model.GetMultiRegionAccessPointResponse; -import software.amazon.awssdk.services.s3control.model.MultiRegionAccessPointStatus; -import software.amazon.awssdk.services.s3control.model.S3ControlException; -import software.amazon.awssdk.services.sts.StsClient; -import software.amazon.awssdk.utils.BinaryUtils; -import software.amazon.awssdk.utils.CompletableFutureUtils; -import software.amazon.awssdk.utils.FunctionalUtils; -import software.amazon.awssdk.utils.Logger; -import software.amazon.awssdk.crt.Log; - -public class ChecksumIntegrationTesting { - private static final String BUCKET_NAME_PREFIX = "do-not-delete-checksums-"; - private static final String MRAP_NAME = "do-not-delete-checksum-testing"; - private static final String AP_NAME = "do-not-delete-checksum-testing-ap"; - private static final String EOZ_SUFFIX = "--usw2-az3--x-s3"; - - private static final Logger LOG = Logger.loggerFor(ChecksumIntegrationTesting.class); - private static final Region REGION = Region.US_WEST_2; - private static final String TEST_CREDENTIALS_PROFILE_NAME = "aws-test-account"; - - - public static final AwsCredentialsProviderChain CREDENTIALS_PROVIDER_CHAIN = - AwsCredentialsProviderChain.of(ProfileCredentialsProvider.builder() - .profileName(TEST_CREDENTIALS_PROFILE_NAME) - .build(), - DefaultCredentialsProvider.create()); - - private static final SdkChecksum CRC32 = SdkChecksum.forAlgorithm(DefaultChecksumAlgorithm.CRC32); - - private static final ExecutorService ASYNC_REQUEST_BODY_EXECUTOR = Executors.newSingleThreadExecutor(); - - private static String accountId; - private static String bucketName; - private static String mrapArn; - private static String eozBucket; - private static String apArn; - - private static S3ControlClient s3Control; - private static S3Client s3; - private static StsClient sts; - - private static Path testFile; - - private Map> bucketCleanup = new HashMap<>(); - - @BeforeAll - static void setup() throws InterruptedException, IOException { - // Log.initLoggingToStdout(Log.LogLevel.Trace); - - s3 = S3Client.builder() - .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .region(REGION) - .build(); - - s3Control = S3ControlClient.builder() - .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .region(REGION) - .build(); - - sts = StsClient.builder().credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .region(REGION) - .build(); - - accountId = getAccountId(); - - bucketName = createBucket(); - - mrapArn = createMrap(); - - eozBucket = createEozBucket(); - - apArn = createAccessPoint(); - - testFile = createRandomFile(); - } - - @AfterEach - public void methodCleanup() { - bucketCleanup.forEach((bt, keys) -> { - String bucket = bucketForType(bt); - keys.forEach(k -> s3.deleteObject(r -> r.bucket(bucket).key(k))); - }); - - bucketCleanup.clear(); - } - - @AfterAll - public static void cleanup() { - ASYNC_REQUEST_BODY_EXECUTOR.shutdownNow(); - } - - private void assumeNotAccessPointWithPathStyle(TestConfig config) { - BucketType bucketType = config.getBucketType(); - Assumptions.assumeFalse(config.isForcePathStyle() && bucketType.isArnType(), - "Path style doesn't work with ARN type buckets"); - } - - private void assumeNotAccelerateWithPathStyle(TestConfig config) { - Assumptions.assumeFalse(config.isForcePathStyle() && config.isAccelerateEnabled(), - "Path style doesn't work with Accelerate"); - } - - private void assumeNotAccelerateWithArnType(TestConfig config) { - Assumptions.assumeFalse(config.isAccelerateEnabled() && config.getBucketType().isArnType(), - "Accelerate doesn't work with ARN buckets"); - } - - private void assumeNotAccelerateWithEoz(TestConfig config) { - Assumptions.assumeFalse(config.isAccelerateEnabled() && config.getBucketType() == BucketType.EOZ, - "Accelerate is not supported with Express One Zone"); - } - - // Request checksum required - @ParameterizedTest - @MethodSource("testConfigs") - void deleteObject(TestConfig config) throws Exception { - assumeNotAccessPointWithPathStyle(config); - assumeNotAccelerateWithPathStyle(config); - assumeNotAccelerateWithArnType(config); - assumeNotAccelerateWithEoz(config); - - String bucket = bucketForType(config.getBucketType()); - String key = putRandomObject(config.getBucketType()); - TestCallable callable = null; - try { - DeleteObjectsRequest req = DeleteObjectsRequest.builder() - .bucket(bucket) - .delete(Delete.builder() - .objects(ObjectIdentifier.builder() - .key(key) - .build()) - .build()) - .build(); - - callable = callDeleteObjects(req, config); - callable.runnable.call(); - } finally { - if (callable != null) { - callable.client.close(); - } - } - } - - // Request checksum optional - @ParameterizedTest - @MethodSource("testConfigs") - void restoreObject(TestConfig config) throws Exception { - assumeNotAccessPointWithPathStyle(config); - assumeNotAccelerateWithPathStyle(config); - assumeNotAccelerateWithArnType(config); - - Assumptions.assumeFalse(config.getBucketType() == BucketType.EOZ, - "Restore is not supported for S3 Express"); - - String bucket = bucketForType(config.getBucketType()); - String key = putRandomArchivedObject(config.getBucketType()); - TestCallable callable = null; - try { - RestoreObjectRequest request = RestoreObjectRequest.builder() - .bucket(bucket) - .key(key) - .restoreRequest(RestoreRequest.builder() - .days(5) - .glacierJobParameters(GlacierJobParameters.builder() - .tier(Tier.STANDARD) - .build()) - .build()) - .build(); - - callable = callRestoreObject(request, config); - callable.runnable.call(); - } finally { - if (callable != null) { - callable.client.close(); - } - } - } - - @ParameterizedTest - @MethodSource("uploadConfigs") - void putObject(UploadConfig config) throws Exception { - assumeNotAccelerateWithPathStyle(config.getBaseConfig()); - assumeNotAccessPointWithPathStyle(config.getBaseConfig()); - assumeNotAccelerateWithArnType(config.getBaseConfig()); - assumeNotAccelerateWithEoz(config.getBaseConfig()); - - // For testing purposes, ContentProvider is Publisher for async clients - // There is no way to create AsyncRequestBody with a Publisher and also provide the content length - Assumptions.assumeFalse(config.getBodyType() == BodyType.CONTENT_PROVIDER_WITH_LENGTH - && config.getBaseConfig().getFlavor().isAsync(), - "No way to create AsyncRequestBody by giving both an Publisher and the content length"); - - // Payload signing doesn't work correctly for async java based - Assumptions.assumeFalse(config.getBaseConfig().getFlavor() == S3ClientFlavor.ASYNC_JAVA_BASED - && (config.getBaseConfig().isPayloadSigning() - // MRAP requires body signing - || config.getBaseConfig().getBucketType() == BucketType.MRAP), - "Async payload signing doesn't work with Java based clients"); - - // For testing purposes, ContentProvider is Publisher for async clients - // Async java based clients don't currently support unknown content-length bodies - Assumptions.assumeFalse(config.getBaseConfig().getFlavor() == S3ClientFlavor.ASYNC_JAVA_BASED - && config.getBodyType() == BodyType.CONTENT_PROVIDER_NO_LENGTH, - "Async Java based support unknown content length"); - - BucketType bucketType = config.getBaseConfig().getBucketType(); - - String bucket = bucketForType(bucketType); - String key = randomKey(); - - PutObjectRequest request = PutObjectRequest.builder() - .bucket(bucket) - .key(key) - .build(); - - - RequestRecorder recorder = new RequestRecorder(); - - ClientOverrideConfiguration.Builder overrideConfiguration = - ClientOverrideConfiguration.builder() - .addExecutionInterceptor(recorder); - - if (config.getBaseConfig().isPayloadSigning()) { - overrideConfiguration.addExecutionInterceptor(new EnablePayloadSigningInterceptor()); - } - - TestCallable callable = null; - try { - - Long actualContentLength = null; - boolean requestBodyHasContentLength = false; - String actualCrc32; - - if (!config.getBaseConfig().getFlavor().isAsync()) { - TestRequestBody body = getRequestBody(config.getBodyType()); - callable = callPutObject(request, body, config.getBaseConfig(), overrideConfiguration.build()); - actualContentLength = body.getActualContentLength(); - requestBodyHasContentLength = body.optionalContentLength().isPresent(); - actualCrc32 = body.getChecksum(); - } else { - TestAsyncBody body = getAsyncRequestBody(config.getBodyType()); - callable = callPutObject(request, body, config.getBaseConfig(), overrideConfiguration.build()); - actualContentLength = body.getActualContentLength(); - requestBodyHasContentLength = body.getAsyncRequestBody().contentLength().isPresent(); - actualCrc32 = body.getChecksum(); - } - - PutObjectResponse response = callable.runnable.call(); - - recordObjectToCleanup(bucketType, key); - - // We only validate when configured to WHEN_SUPPORTED since checksums are optional for PutObject - if (config.getBaseConfig().getRequestChecksumValidation() == RequestChecksumCalculation.WHEN_SUPPORTED - // CRT switches to MPU under the hood which doesn't support checksums - && config.getBaseConfig().getFlavor() != S3ClientFlavor.ASYNC_CRT) { - assertThat(response.checksumCRC32()).isEqualTo(actualCrc32); - } - - // We can't set an execution interceptor when using CRT - if (config.getBaseConfig().getFlavor() != S3ClientFlavor.ASYNC_CRT) { - assertThat(recorder.getRequests()).isNotEmpty(); - - for (SdkHttpRequest httpRequest : recorder.getRequests()) { - // skip any non-PUT requests, e.g. GetSession for EOZ requests - if (httpRequest.method() != SdkHttpMethod.PUT) { - continue; - } - - String payloadSha = httpRequest.firstMatchingHeader("x-amz-content-sha256").get(); - if (payloadSha.startsWith("STREAMING")) { - String decodedContentLength = httpRequest.firstMatchingHeader("x-amz-decoded-content-length").get(); - assertThat(Long.parseLong(decodedContentLength)).isEqualTo(actualContentLength); - } else { - Optional contentLength = httpRequest.firstMatchingHeader("Content-Length"); - if (requestBodyHasContentLength) { - assertThat(Long.parseLong(contentLength.get())).isEqualTo(actualContentLength); - } - } - } - } - } finally { - if (callable != null) { - callable.client.close(); - } - } - } - - private TestCallable callDeleteObjects(DeleteObjectsRequest request, TestConfig config) { - AwsClient toClose; - Callable runnable = null; - - if (config.getFlavor().isAsync()) { - S3AsyncClient s3Async = makeAsyncClient(config, null); - toClose = s3Async; - runnable = () -> { - CompletableFutureUtils.joinLikeSync(s3Async.deleteObjects(request)); - return null; - }; - } else { - S3Client s3 = makeSyncClient(config, null); - toClose = s3; - runnable = () -> { - s3.deleteObjects(request); - return null; - }; - } - - return new TestCallable<>(toClose, runnable); - } - - private TestCallable callRestoreObject(RestoreObjectRequest request, TestConfig config) { - AwsClient toClose; - Callable callable = null; - - if (config.getFlavor().isAsync()) { - S3AsyncClient s3Async = makeAsyncClient(config, null); - toClose = s3Async; - callable = () -> { - s3Async.restoreObject(request).join(); - return null; - }; - } else { - S3Client s3 = makeSyncClient(config, null); - toClose = s3; - callable = () -> { - s3.restoreObject(request); - return null; - }; - } - - return new TestCallable<>(toClose, callable); - } - - private TestCallable callPutObject(PutObjectRequest request, TestRequestBody requestBody, TestConfig config, - ClientOverrideConfiguration overrideConfiguration) throws IOException { - S3Client s3Client = makeSyncClient(config, overrideConfiguration); - Callable callable = () -> { - try { - return s3Client.putObject(request, requestBody); - } catch (Exception e) { - throw new RuntimeException(e); - } - }; - return new TestCallable<>(s3Client, callable); - } - - private TestCallable callPutObject(PutObjectRequest request, TestAsyncBody requestBody, TestConfig config, - ClientOverrideConfiguration overrideConfiguration) throws IOException { - S3AsyncClient s3Client = makeAsyncClient(config, overrideConfiguration); - Callable callable = () -> { - try { - CompletableFuture future = s3Client.putObject(request, requestBody.getAsyncRequestBody()); - return CompletableFutureUtils.joinLikeSync(future); - } catch (Exception e) { - throw new RuntimeException(e); - } - }; - return new TestCallable<>(s3Client, callable); - } - - private static class TestCallable { - private AwsClient client; - private Callable runnable; - - TestCallable(AwsClient client, Callable runnable) { - this.client = client; - this.runnable = runnable; - } - } - - private S3Client makeSyncClient(TestConfig config, ClientOverrideConfiguration overrideConfiguration) { - switch (config.getFlavor()) { - case JAVA_BASED: - return S3Client.builder() - .forcePathStyle(config.isForcePathStyle()) - .requestChecksumCalculation(config.getRequestChecksumValidation()) - .region(REGION) - .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .accelerate(config.isAccelerateEnabled()) - .overrideConfiguration(overrideConfiguration) - .build(); - default: - throw new RuntimeException("Unsupported sync flavor: " + config.getFlavor()); - } - } - - private S3AsyncClient makeAsyncClient(TestConfig config, ClientOverrideConfiguration overrideConfiguration) { - switch (config.getFlavor()) { - case ASYNC_JAVA_BASED: - return S3AsyncClient.builder() - .forcePathStyle(config.isForcePathStyle()) - .requestChecksumCalculation(config.getRequestChecksumValidation()) - .region(REGION) - .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .accelerate(config.isAccelerateEnabled()) - .overrideConfiguration(overrideConfiguration) - .build(); - case ASYNC_CRT: { - if (overrideConfiguration != null) { - LOG.warn(() -> "Override configuration cannot be set for Async S3 CRT!"); - } - return S3AsyncClient.crtBuilder() - .forcePathStyle(config.isForcePathStyle()) - .requestChecksumCalculation(config.getRequestChecksumValidation()) - .region(REGION) - .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) - .accelerate(config.isAccelerateEnabled()) - .build(); - } - default: - throw new RuntimeException("Unsupported async flavor: " + config.getFlavor()); - } - } - - private static String bucketForType(BucketType type) { - switch (type) { - case STANDARD_BUCKET: - return bucketName; - case MRAP: - return mrapArn; - case EOZ: - return eozBucket; - case ACCESS_POINT: - return apArn; - default: - throw new RuntimeException("Unknown bucket type: " + type); - } - } - - enum BucketType { - STANDARD_BUCKET(false), - ACCESS_POINT(true), - // Multi-region access point - MRAP(true), - // Express one zone/S3 express - EOZ(false), - ; - - private boolean arnType; - - private BucketType(boolean arnType) { - this.arnType = arnType; - } - - public boolean isArnType() { - return arnType; - } - } - - enum S3ClientFlavor { - JAVA_BASED(false), - ASYNC_JAVA_BASED(true), - - ASYNC_CRT(true) - ; - - private boolean async; - - private S3ClientFlavor(boolean async) { - this.async = async; - } - - public boolean isAsync() { - return async; - } - } - - static class UploadConfig { - private TestConfig baseConfig; - private BodyType bodyType; - - public TestConfig getBaseConfig() { - return baseConfig; - } - - public void setBaseConfig(TestConfig baseConfig) { - this.baseConfig = baseConfig; - } - - public BodyType getBodyType() { - return bodyType; - } - - public void setBodyType(BodyType bodyType) { - this.bodyType = bodyType; - } - - @Override - public String toString() { - return "UploadConfig{" + - "baseConfig=" + baseConfig + - ", bodyType=" + bodyType + - '}'; - } - } - - static class TestRequestBody extends RequestBody { - private final long contentLength; - private final String checksum; - - protected TestRequestBody(RequestBody wrapped, long contentLength, String checksum) { - super(wrapped.contentStreamProvider(), wrapped.optionalContentLength().orElse(null), wrapped.contentType()); - this.contentLength = contentLength; - this.checksum = checksum; - } - - public long getActualContentLength() { - return contentLength; - } - - public String getChecksum() { - return checksum; - } - } - - private static class TestAsyncBody { - private final AsyncRequestBody asyncRequestBody; - private final long actualContentLength; - private final String checksum; - - private TestAsyncBody(AsyncRequestBody asyncRequestBody, long actualContentLength, String checksum) { - this.asyncRequestBody = asyncRequestBody; - this.actualContentLength = actualContentLength; - this.checksum = checksum; - } - - public AsyncRequestBody getAsyncRequestBody() { - return asyncRequestBody; - } - - public long getActualContentLength() { - return actualContentLength; - } - - public String getChecksum() { - return checksum; - } - - } - - static class TestConfig { - private S3ClientFlavor flavor; - private BucketType bucketType; - private boolean forcePathStyle; - private RequestChecksumCalculation requestChecksumValidation; - private boolean accelerateEnabled; - private boolean payloadSigning; - - public S3ClientFlavor getFlavor() { - return flavor; - } - - public void setFlavor(S3ClientFlavor flavor) { - this.flavor = flavor; - } - - public BucketType getBucketType() { - return bucketType; - } - - public void setBucketType(BucketType bucketType) { - this.bucketType = bucketType; - } - - public boolean isForcePathStyle() { - return forcePathStyle; - } - - public void setForcePathStyle(boolean forcePathStyle) { - this.forcePathStyle = forcePathStyle; - } - - public RequestChecksumCalculation getRequestChecksumValidation() { - return requestChecksumValidation; - } - - public void setRequestChecksumValidation(RequestChecksumCalculation requestChecksumValidation) { - this.requestChecksumValidation = requestChecksumValidation; - } - - public boolean isAccelerateEnabled() { - return accelerateEnabled; - } - - public void setAccelerateEnabled(boolean accelerateEnabled) { - this.accelerateEnabled = accelerateEnabled; - } - - public boolean isPayloadSigning() { - return payloadSigning; - } - - public void setPayloadSigning(boolean payloadSigning) { - this.payloadSigning = payloadSigning; - } - - @Override - public String toString() { - return "[" + - "flavor=" + flavor + - ", bucketType=" + bucketType + - ", forcePathStyle=" + forcePathStyle + - ", requestChecksumValidation=" + requestChecksumValidation + - ", accelerateEnabled=" + accelerateEnabled + - ", payloadSigning=" + payloadSigning + - ']'; - } - } - - static List testConfigs() { - List configs = new ArrayList<>(); - - boolean[] forcePathStyle = {true, false}; - RequestChecksumCalculation[] checksumValidations = {RequestChecksumCalculation.WHEN_REQUIRED, - RequestChecksumCalculation.WHEN_SUPPORTED}; - boolean[] accelerateEnabled = {true, false}; - boolean[] payloadSigningEnabled = {true, false}; - for (boolean pathStyle : forcePathStyle) { - for (RequestChecksumCalculation checksumValidation : checksumValidations) { - for (S3ClientFlavor flavor : S3ClientFlavor.values()) { - for (BucketType bucketType : BucketType.values()) { - for (boolean accelerate : accelerateEnabled) { - for (boolean payloadSigning : payloadSigningEnabled) { - TestConfig testConfig = new TestConfig(); - testConfig.setFlavor(flavor); - testConfig.setBucketType(bucketType); - testConfig.setForcePathStyle(pathStyle); - testConfig.setRequestChecksumValidation(checksumValidation); - testConfig.setAccelerateEnabled(accelerate); - testConfig.setPayloadSigning(payloadSigning); - configs.add(testConfig); - } - } - } - } - } - } - - return configs; - } - - enum BodyType { - INPUTSTREAM_RESETABLE, - INPUTSTREAM_NOT_RESETABLE, - - STRING, - - FILE, - - CONTENT_PROVIDER_WITH_LENGTH, - - CONTENT_PROVIDER_NO_LENGTH - } - - private static List uploadConfigs() { - List configs = new ArrayList<>(); - - for (BodyType bodyType : BodyType.values()) { - for (TestConfig baseConfig : testConfigs()) { - UploadConfig config = new UploadConfig(); - config.setBaseConfig(baseConfig); - config.setBodyType(bodyType); - configs.add(config); - } - } - return configs; - } - - private String putRandomObject(BucketType bucketType) { - String key = randomKey(); - String bucketName = bucketForType(bucketType); - s3.putObject(r -> r.bucket(bucketName).key(key), RequestBody.fromString("hello")); - recordObjectToCleanup(bucketType, key); - return key; - } - - - private String putRandomArchivedObject(BucketType bucketType) { - String key = randomKey(); - String bucketName = bucketForType(bucketType); - s3.putObject(r -> r.bucket(bucketName).key(key).storageClass(StorageClass.GLACIER), RequestBody.fromString("hello")); - recordObjectToCleanup(bucketType, key); - return key; - } - - private TestRequestBody getRequestBody(BodyType bodyType) throws IOException { - switch (bodyType) { - case STRING: { - String content = "Hello world"; - long contentLength = content.getBytes(StandardCharsets.UTF_8).length; - return new TestRequestBody(RequestBody.fromString("Hello world"), contentLength, crc32(content)); - } - case FILE: - return new TestRequestBody(RequestBody.fromFile(testFile), Files.size(testFile), crc32(testFile)); - case CONTENT_PROVIDER_NO_LENGTH: { - RequestBody wrapped = - RequestBody.fromContentProvider(() -> FunctionalUtils.invokeSafely(() -> Files.newInputStream(testFile)), - "application/octet-stream"); - - return new TestRequestBody(wrapped, Files.size(testFile), crc32(testFile)); - } - case CONTENT_PROVIDER_WITH_LENGTH: { - long contentLength = Files.size(testFile); - RequestBody wrapped = RequestBody.fromContentProvider(() -> FunctionalUtils.invokeSafely(() -> Files.newInputStream(testFile)), - Files.size(testFile), - "application/octet-stream"); - return new TestRequestBody(wrapped, contentLength, crc32(testFile)); - } - case INPUTSTREAM_RESETABLE: { - byte[] content = "Hello world".getBytes(StandardCharsets.UTF_8); - RequestBody wrapped = RequestBody.fromInputStream(new ByteArrayInputStream(content), content.length); - return new TestRequestBody(wrapped, content.length, crc32(content)); - } - case INPUTSTREAM_NOT_RESETABLE: { - byte[] content = "Hello world".getBytes(StandardCharsets.UTF_8); - RequestBody wrapped = RequestBody.fromInputStream(new NonResettableByteStream(content), content.length); - return new TestRequestBody(wrapped, content.length, crc32(content)); - } - default: - throw new RuntimeException("Unsupported body type: " + bodyType); - } - } - - private TestAsyncBody getAsyncRequestBody(BodyType bodyType) throws IOException { - switch (bodyType) { - case STRING: { - String content = "Hello world"; - long contentLength = content.getBytes(StandardCharsets.UTF_8).length; - return new TestAsyncBody(AsyncRequestBody.fromString(content), contentLength, crc32(content)); - } - case FILE: { - long contentLength = Files.size(testFile); - return new TestAsyncBody(AsyncRequestBody.fromFile(testFile), contentLength, crc32(testFile)); - } - case INPUTSTREAM_RESETABLE: { - byte[] content = "Hello world".getBytes(StandardCharsets.UTF_8); - AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromInputStream(new ByteArrayInputStream(content), - (long) content.length, - ASYNC_REQUEST_BODY_EXECUTOR); - return new TestAsyncBody(asyncRequestBody, content.length, crc32(content)); - } - case INPUTSTREAM_NOT_RESETABLE: { - byte[] content = "Hello world".getBytes(StandardCharsets.UTF_8); - AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromInputStream(new NonResettableByteStream(content), - (long) content.length, - ASYNC_REQUEST_BODY_EXECUTOR); - return new TestAsyncBody(asyncRequestBody, content.length, crc32(content)); - } - case CONTENT_PROVIDER_NO_LENGTH: { - byte[] content = "Hello world".getBytes(StandardCharsets.UTF_8); - Flowable publisher = Flowable.just(ByteBuffer.wrap(content)); - AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromPublisher(publisher); - return new TestAsyncBody(asyncRequestBody, content.length, crc32(content)); - } - default: - throw new RuntimeException("Unsupported async body type: " + bodyType); - } - } - - private String randomKey() { - return BinaryUtils.toHex(UUID.randomUUID().toString().getBytes()); - } - - private static String getAccountId() { - return sts.getCallerIdentity().account(); - } - - private static String getBucketName() { - return BUCKET_NAME_PREFIX + accountId; - } - - private static String createAccessPoint() { - try { - s3Control.getAccessPoint(r -> r.accountId(accountId).name(AP_NAME)); - } catch (S3ControlException e) { - if (e.awsErrorDetails().sdkHttpResponse().statusCode() != 404) { - throw e; - } - - s3Control.createAccessPoint(r -> r.bucket(bucketName).name(AP_NAME).accountId(accountId)); - } - - return waitForApToBeReady(); - } - - private static String createMrap() throws InterruptedException { - try { - s3Control.getMultiRegionAccessPoint(r -> r.accountId(accountId).name(MRAP_NAME)); - } catch (S3ControlException e) { - if (e.awsErrorDetails().sdkHttpResponse().statusCode() != 404) { - throw e; - } - - CreateMultiRegionAccessPointRequest createMrap = - CreateMultiRegionAccessPointRequest.builder() - .accountId(accountId) - .details(d -> d.name(MRAP_NAME) - .regions(software.amazon.awssdk.services.s3control.model.Region.builder() - .bucket(bucketName) - .build())) - .build(); - - s3Control.createMultiRegionAccessPoint(createMrap); - } - - return waitForMrapToBeReady(); - } - - private static String createBucket() { - String name = getBucketName(); - LOG.debug(() -> "Creating bucket: " + name); - createBucket(name, 3); - s3.putBucketAccelerateConfiguration(r -> r.bucket(name) - .accelerateConfiguration(c -> c.status(BucketAccelerateStatus.ENABLED))); - return name; - } - - private static String createEozBucket() { - String eozBucketName = getBucketName() + EOZ_SUFFIX; - LOG.debug(() -> "Creating EOZ bucket: " + eozBucketName); - CreateBucketConfiguration cfg = CreateBucketConfiguration.builder() - .bucket(info -> info.dataRedundancy(DataRedundancy.SINGLE_AVAILABILITY_ZONE) - .type(software.amazon.awssdk.services.s3.model.BucketType.DIRECTORY)) - .location(LocationInfo.builder() - .name("usw2-az3") - .type(LocationType.AVAILABILITY_ZONE) - .build()) - .build(); - - try { - s3.createBucket(r -> r.bucket(eozBucketName).createBucketConfiguration(cfg)); - } catch (S3Exception e) { - AwsErrorDetails awsErrorDetails = e.awsErrorDetails(); - if (!"BucketAlreadyOwnedByYou".equals(awsErrorDetails.errorCode())) { - throw e; - } - } - return eozBucketName; - } - - private static String waitForMrapToBeReady() throws InterruptedException { - GetMultiRegionAccessPointResponse getMrapResponse = null; - - Instant waitStart = Instant.now(); - boolean initial = true; - do { - if (!initial) { - Thread.sleep(Duration.ofSeconds(10).toMillis()); - initial = true; - } - GetMultiRegionAccessPointResponse response = s3Control.getMultiRegionAccessPoint(r -> r.accountId(accountId).name(MRAP_NAME)); - LOG.debug(() -> "Wait response: " + response); - getMrapResponse = response; - } while (MultiRegionAccessPointStatus.READY != getMrapResponse.accessPoint().status() - && Duration.between(Instant.now(), waitStart).compareTo(Duration.ofMinutes(5)) < 0); - - return "arn:aws:s3::" + accountId + ":accesspoint/" + getMrapResponse.accessPoint().alias(); - } - - private static String waitForApToBeReady() { - return s3Control.getAccessPoint(r -> r.accountId(accountId).name(AP_NAME)).accessPointArn(); - } - - private static void createBucket(String bucketName, int retryCount) { - try { - s3.createBucket( - CreateBucketRequest.builder() - .bucket(bucketName) - .createBucketConfiguration( - CreateBucketConfiguration.builder() - .locationConstraint(BucketLocationConstraint.US_WEST_2) - .build()) - .build()); - } catch (S3Exception e) { - LOG.debug(() -> "Error attempting to create bucket: " + bucketName); - if (e.awsErrorDetails().errorCode().equals("BucketAlreadyOwnedByYou")) { - LOG.debug(() -> String.format("%s bucket already exists, likely leaked by a previous run%n", bucketName)); - } else if (e.awsErrorDetails().errorCode().equals("TooManyBuckets")) { - LOG.debug(() -> "Printing all buckets for debug:"); - s3.listBuckets().buckets().forEach(l -> LOG.debug(l::toString)); - if (retryCount < 2) { - LOG.debug(() -> "Retrying..."); - createBucket(bucketName, retryCount + 1); - } else { - throw e; - } - } else { - throw e; - } - } - - s3.waiter().waitUntilBucketExists(r -> r.bucket(bucketName)); - } - - private static Path createRandomFile() throws IOException { - Path tmp = Files.createTempFile(null, null); - byte[] randomBytes = new byte[1024]; - new Random().nextBytes(randomBytes); - try (OutputStream os = Files.newOutputStream(tmp)) { - for (int i = 0; i < 16; ++i) { - os.write(randomBytes); - } - } - return tmp; - } - - private static class NonResettableByteStream extends ByteArrayInputStream { - public NonResettableByteStream(byte[] buf) { - super(buf); - } - - @Override - public boolean markSupported() { - return false; - } - - @Override - public synchronized void reset() { - throw new UnsupportedOperationException(); - } - } - - private static String crc32(String s) { - return crc32(s.getBytes(StandardCharsets.UTF_8)); - } - - private static String crc32(byte[] bytes) { - CRC32.reset(); - CRC32.update(bytes); - return BinaryUtils.toBase64(CRC32.getChecksumBytes()); - } - - private static String crc32(Path p) throws IOException { - CRC32.reset(); - - byte[] buff = new byte[4096]; - int read; - try (InputStream is = Files.newInputStream(p)) { - while (true) { - read = is.read(buff); - if (read == -1) { - break; - } - CRC32.update(buff, 0, read); - } - } - - return BinaryUtils.toBase64(CRC32.getChecksumBytes()); - } - - private void recordObjectToCleanup(BucketType type, String key) { - bucketCleanup.computeIfAbsent(type, k -> new ArrayList<>()).add(key); - } - - private static class RequestRecorder implements ExecutionInterceptor { - private final List requests = new ArrayList<>(); - @Override - public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { - requests.add(context.httpRequest()); - } - - public List getRequests() { - return requests; - } - } - - private static class EnablePayloadSigningInterceptor implements ExecutionInterceptor { - @Override - public void beforeExecution(Context.BeforeExecution context, ExecutionAttributes executionAttributes) { - executionAttributes.putAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING, true); - ExecutionInterceptor.super.beforeExecution(context, executionAttributes); - } - } -} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/BaseS3RegressionTest.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/BaseS3RegressionTest.java new file mode 100644 index 000000000000..85570488a7da --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/BaseS3RegressionTest.java @@ -0,0 +1,128 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3control.S3ControlClient; +import software.amazon.awssdk.services.sts.StsClient; +import software.amazon.awssdk.utils.Logger; + +public abstract class BaseS3RegressionTest { + private static final Logger LOG = Logger.loggerFor(BaseS3RegressionTest.class); + + private static final String BUCKET_NAME_PREFIX = "do-not-delete-checksums-"; + private static final String MRAP_NAME = "do-not-delete-checksum-testing"; + private static final String AP_NAME = "do-not-delete-checksum-testing-ap"; + private static final String EOZ_SUFFIX = "--usw2-az3--x-s3"; + protected static final Region REGION = Region.US_WEST_2; + + protected static final String TEST_CREDENTIALS_PROFILE_NAME = "aws-test-account"; + protected static final AwsCredentialsProviderChain CREDENTIALS_PROVIDER_CHAIN = + AwsCredentialsProviderChain.of(ProfileCredentialsProvider.builder() + .profileName(TEST_CREDENTIALS_PROFILE_NAME) + .build(), + DefaultCredentialsProvider.create()); + + protected static String accountId; + protected static String bucketName; + protected static String mrapArn; + protected static String eozBucket; + protected static String apArn; + + protected static S3ControlClient s3Control; + protected static S3Client s3; + protected static StsClient sts; + + private Map> bucketCleanup = new HashMap<>(); + + @BeforeAll + static void setup() throws InterruptedException, IOException { + + s3 = S3Client.builder() + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .region(REGION) + .build(); + + s3Control = S3ControlClient.builder() + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .region(REGION) + .build(); + + sts = StsClient.builder().credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .region(REGION) + .build(); + + accountId = S3ChecksumsTestUtils.getAccountId(sts); + bucketName = S3ChecksumsTestUtils.createBucket(s3, getBucketName(), LOG); + mrapArn = S3ChecksumsTestUtils.createMrap(s3Control, accountId, MRAP_NAME, bucketName, LOG); + eozBucket = S3ChecksumsTestUtils.createEozBucket(s3, getBucketName() + EOZ_SUFFIX, LOG); + apArn = S3ChecksumsTestUtils.createAccessPoint(s3Control, accountId, AP_NAME, bucketName); + + LOG.info(() -> "Using bucket: " + bucketName); + + } + + @AfterEach + public void methodCleanup() { + bucketCleanup.forEach((bt, keys) -> { + String bucket = bucketForType(bt); + keys.forEach(k -> { + try { + s3.deleteObject(r -> r.bucket(bucket).key(k)); + } catch (Exception e) { + LOG.error(() -> String.format("Error in cleaning for bucket %s, key: %s: %s", bucket, k, e.getMessage())); + } + }); + }); + + bucketCleanup.clear(); + } + + protected void recordObjectToCleanup(BucketType type, String key) { + bucketCleanup.computeIfAbsent(type, k -> new ArrayList<>()).add(key); + } + + protected static String getBucketName() { + return BUCKET_NAME_PREFIX + accountId; + } + + protected static String bucketForType(BucketType type) { + switch (type) { + case STANDARD_BUCKET: + return bucketName; + case MRAP: + return mrapArn; + case EOZ: + return eozBucket; + case ACCESS_POINT: + return apArn; + default: + throw new RuntimeException("Unknown bucket type: " + type); + } + } + +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/BucketType.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/BucketType.java new file mode 100644 index 000000000000..e7f2ebe36276 --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/BucketType.java @@ -0,0 +1,36 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression; + +public enum BucketType { + STANDARD_BUCKET(false), + ACCESS_POINT(true), + // Multi-region access point + MRAP(true), + // Express one zone/S3 express + EOZ(false), + ; + + private final boolean arnType; + + BucketType(boolean arnType) { + this.arnType = arnType; + } + + public boolean isArnType() { + return arnType; + } +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/ControlPlaneOperationRegressionTesting.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/ControlPlaneOperationRegressionTesting.java new file mode 100644 index 000000000000..64fad69264ec --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/ControlPlaneOperationRegressionTesting.java @@ -0,0 +1,180 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression; + +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.assumeNotAccessPointWithPathStyle; +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.makeAsyncClient; +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.makeSyncClient; + +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.awscore.AwsClient; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.Delete; +import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; +import software.amazon.awssdk.services.s3.model.GlacierJobParameters; +import software.amazon.awssdk.services.s3.model.ObjectIdentifier; +import software.amazon.awssdk.services.s3.model.RestoreObjectRequest; +import software.amazon.awssdk.services.s3.model.RestoreRequest; +import software.amazon.awssdk.services.s3.model.StorageClass; +import software.amazon.awssdk.services.s3.model.Tier; +import software.amazon.awssdk.utils.CompletableFutureUtils; +import software.amazon.awssdk.utils.Logger; + +public class ControlPlaneOperationRegressionTesting extends BaseS3RegressionTest { + private static final Logger LOG = Logger.loggerFor(ControlPlaneOperationRegressionTesting.class); + + // Request checksum required + @ParameterizedTest + @MethodSource("testConfigs") + @Timeout(value = 120, unit = TimeUnit.SECONDS) + void deleteObject(TestConfig config) throws Exception { + assumeNotAccessPointWithPathStyle(config); + + LOG.info(() -> "Running deleteObject with config: " + config.toString()); + + String bucket = bucketForType(config.getBucketType()); + String key = putRandomObject(config.getBucketType()); + TestCallable callable = null; + try { + DeleteObjectsRequest req = DeleteObjectsRequest.builder() + .bucket(bucket) + .delete(Delete.builder() + .objects(ObjectIdentifier.builder() + .key(key) + .build()) + .build()) + .build(); + + callable = callDeleteObjects(req, config); + callable.runnable().call(); + } finally { + if (callable != null) { + callable.client().close(); + } + } + } + + // Request checksum optional + @ParameterizedTest + @MethodSource("testConfigs") + void restoreObject(TestConfig config) throws Exception { + assumeNotAccessPointWithPathStyle(config); + + Assumptions.assumeFalse(config.getBucketType() == BucketType.EOZ, + "Restore is not supported for S3 Express"); + + LOG.info(() -> "Running restoreObject with config: " + config); + + String bucket = bucketForType(config.getBucketType()); + String key = putRandomArchivedObject(config.getBucketType()); + TestCallable callable = null; + try { + RestoreObjectRequest request = RestoreObjectRequest.builder() + .bucket(bucket) + .key(key) + .restoreRequest(RestoreRequest.builder() + .days(5) + .glacierJobParameters(GlacierJobParameters.builder() + .tier(Tier.STANDARD) + .build()) + .build()) + .build(); + + callable = callRestoreObject(request, config); + callable.runnable().call(); + } finally { + if (callable != null) { + callable.client().close(); + } + } + } + + private TestCallable callDeleteObjects(DeleteObjectsRequest request, TestConfig config) { + AwsClient toClose; + Callable runnable = null; + + if (config.getFlavor().isAsync()) { + S3AsyncClient s3Async = makeAsyncClient(config, REGION, CREDENTIALS_PROVIDER_CHAIN); + toClose = s3Async; + runnable = () -> { + CompletableFutureUtils.joinLikeSync(s3Async.deleteObjects(request)); + return null; + }; + } else { + S3Client s3 = makeSyncClient(config, REGION, CREDENTIALS_PROVIDER_CHAIN); + toClose = s3; + runnable = () -> { + s3.deleteObjects(request); + return null; + }; + } + + return new TestCallable<>(toClose, runnable); + } + + private TestCallable callRestoreObject(RestoreObjectRequest request, TestConfig config) { + AwsClient toClose; + Callable callable = null; + + if (config.getFlavor().isAsync()) { + S3AsyncClient s3Async = makeAsyncClient(config, REGION, CREDENTIALS_PROVIDER_CHAIN); + toClose = s3Async; + callable = () -> { + s3Async.restoreObject(request).join(); + return null; + }; + } else { + S3Client s3 = makeSyncClient(config, REGION, CREDENTIALS_PROVIDER_CHAIN); + toClose = s3; + callable = () -> { + s3.restoreObject(request); + return null; + }; + } + + return new TestCallable<>(toClose, callable); + } + + static List testConfigs() { + return TestConfig.testConfigs(); + } + + private String putRandomObject(BucketType bucketType) { + String key = S3ChecksumsTestUtils.randomKey(); + String bucketName = bucketForType(bucketType); + s3.putObject(r -> r.bucket(bucketName).key(key), RequestBody.fromString("hello")); + recordObjectToCleanup(bucketType, key); + return key; + } + + private String putRandomArchivedObject(BucketType bucketType) { + String key = S3ChecksumsTestUtils.randomKey(); + String bucketName = bucketForType(bucketType); + s3.putObject(r -> r.bucket(bucketName).key(key).storageClass(StorageClass.GLACIER), RequestBody.fromString("hello")); + recordObjectToCleanup(bucketType, key); + return key; + } + + +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/DownloadStreamingRegressionTesting.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/DownloadStreamingRegressionTesting.java new file mode 100644 index 000000000000..0c1ae81b048b --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/DownloadStreamingRegressionTesting.java @@ -0,0 +1,604 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.services.s3.regression.ControlPlaneOperationRegressionTesting.testConfigs; +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.assumeNotAccessPointWithPathStyle; +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.crc32; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.core.ResponseBytes; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.core.async.ResponsePublisher; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.core.sync.ResponseTransformer; +import software.amazon.awssdk.http.AbortableInputStream; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.ChecksumMode; +import software.amazon.awssdk.services.s3.model.CompletedPart; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; +import software.amazon.awssdk.testutils.InputStreamUtils; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.StringUtils; +import software.amazon.awssdk.utils.ToString; + +public class DownloadStreamingRegressionTesting extends BaseS3RegressionTest { + private static final Logger LOG = Logger.loggerFor(DownloadStreamingRegressionTesting.class); + + static ObjectWithCRC smallObject; + static ObjectWithCRC largeObject; + static ObjectWithCRC largeObjectMulti; + + private static Path tempDirPath; + + private List pathsToDelete; + + @BeforeAll + static void init() throws Exception { + tempDirPath = createTempDir("DownloadStreamingIntegrationTesting"); + smallObject = uploadObjectSmall(); // 16 KiB + largeObject = uploadObjectLarge(); // 60 MiB + largeObjectMulti = uploadMultiPartObject(); // 60 MiB, default multipart config + } + + @AfterAll + static void cleanup() { + for (BucketType bucketType : BucketType.values()) { + String bucket = bucketForType(bucketType); + s3.deleteObject(req -> req.bucket(bucket).key(smallObject.key())); + s3.deleteObject(req -> req.bucket(bucket).key(largeObject.key())); + s3.deleteObject(req -> req.bucket(bucket).key(largeObjectMulti.key())); + } + } + + @BeforeEach + void setupMethod() { + pathsToDelete = new ArrayList<>(); + } + + @AfterEach + void testCleanup() { + pathsToDelete.forEach(p -> { + try { + Files.delete(p); + } catch (Exception e) { + LOG.info(() -> String.format("Unable to delete file %s", p.toString()), e); + } + }); + } + + @ParameterizedTest + @MethodSource("downloadConfigs") + @Timeout(value = 120, unit = TimeUnit.SECONDS) + void downloadObject(DownloadConfig config) throws Exception { + assumeNotAccessPointWithPathStyle(config.baseConfig()); + + LOG.info(() -> "Running downloadObject with config: " + config); + + String key = config.contentSize().s3Object().key(); + GetObjectRequest.Builder b = GetObjectRequest.builder() + .bucket(bucketForType(config.baseConfig().getBucketType())) + .key(key); + if (config.checksumModeEnabled()) { + b.checksumMode(ChecksumMode.ENABLED); + } + + GetObjectRequest request = b.build(); + + CallResponse response; + switch (config.baseConfig().getFlavor()) { + case STANDARD_SYNC: { + response = callSyncGetObject(config, request); + break; + } + case STANDARD_ASYNC: + case MULTIPART_ENABLED: + case CRT_BASED: { + response = callAsyncGetObject(request, config); + break; + } + default: + throw new RuntimeException("Unsupported java client flavor: " + config.baseConfig().getFlavor()); + } + + String receivedContentCRC32 = crc32(response.content()); + String s3Crc32 = response.crc32(); + if (config.checksumModeEnabled() && StringUtils.isNotBlank(s3Crc32)) { + assertThat(receivedContentCRC32) + .withFailMessage("Mismatch with s3 crc32 for config " + config) + .isEqualTo(s3Crc32); + } + String expectedCRC32 = config.contentSize().s3Object().crc32(); + assertThat(receivedContentCRC32) + .withFailMessage("Mismatch with calculated crc32 for config " + config) + .isEqualTo(expectedCRC32); + } + + // 16 KiB + static ObjectWithCRC uploadObjectSmall() throws IOException { + String name = String.format("%s-%s.dat", System.currentTimeMillis(), UUID.randomUUID()); + LOG.info(() -> "test setup - uploading small test object: " + name); + ByteArrayOutputStream os = new ByteArrayOutputStream(); + byte[] rand = new byte[1024]; + for (int i = 0; i < 16; i++) { + new Random().nextBytes(rand); + os.write(rand); + } + byte[] fullContent = os.toByteArray(); + String crc32 = crc32(fullContent); + for (BucketType bucketType : BucketType.values()) { + String bucket = bucketForType(bucketType); + PutObjectRequest req = PutObjectRequest + .builder() + .bucket(bucket) + .key(name) + .build(); + s3.putObject(req, RequestBody.fromBytes(fullContent)); + } + return new ObjectWithCRC(name, crc32); + } + + // 60 MiB + static ObjectWithCRC uploadObjectLarge() throws IOException { + String name = String.format("%s-%s.dat", System.currentTimeMillis(), UUID.randomUUID()); + LOG.info(() -> "test setup - uploading large test object: " + name); + ByteArrayOutputStream os = new ByteArrayOutputStream(); + byte[] rand = new byte[1024 * 1024]; + for (int i = 0; i < 60; i++) { + new Random().nextBytes(rand); + os.write(rand); + } + byte[] fullContent = os.toByteArray(); + String crc32 = crc32(fullContent); + for (BucketType bucketType : BucketType.values()) { + String bucket = bucketForType(bucketType); + PutObjectRequest req = PutObjectRequest + .builder() + .bucket(bucket) + .key(name) + .build(); + + s3.putObject(req, RequestBody.fromBytes(fullContent)); + } + return new ObjectWithCRC(name, crc32); + } + + // 60MiB, multipart default config + static ObjectWithCRC uploadMultiPartObject() throws Exception { + String name = String.format("%s-%s.dat", System.currentTimeMillis(), UUID.randomUUID()); + LOG.info(() -> "test setup - uploading large test object - multipart: " + name); + ByteArrayOutputStream os = new ByteArrayOutputStream(); + byte[] rand = new byte[6 * 1024 * 1024]; + for (int i = 0; i < 10; i++) { + new Random().nextBytes(rand); + os.write(rand); + } + byte[] fullContent = os.toByteArray(); + String crc32 = crc32(fullContent); + for (BucketType bucketType : BucketType.values()) { + doMultipartUpload(bucketType, name, fullContent); + } + return new ObjectWithCRC(name, crc32); + } + + static void doMultipartUpload(BucketType bucketType, String objectName, byte[] content) { + String bucket = bucketForType(bucketType); + LOG.info(() -> String.format("Uploading multipart object for bucket type: %s - %s", bucketType, bucket)); + CreateMultipartUploadRequest createMulti = CreateMultipartUploadRequest.builder() + .bucket(bucket) + .key(objectName) + .build(); + + CreateMultipartUploadResponse res = s3.createMultipartUpload(createMulti); + String uploadId = res.uploadId(); + + List completedParts = new ArrayList<>(); + int partAmount = 10; + int partSize = 6 * 1024 * 1024; + for (int i = 0; i < partAmount; i++) { + final int partNumber = i + 1; + int startIndex = partSize * i; + int endIndex = startIndex + partSize; + byte[] partContent = Arrays.copyOfRange(content, startIndex, endIndex); + LOG.info(() -> "Uploading part: " + partNumber); + UploadPartResponse partResponse = s3.uploadPart(req -> req.partNumber(partNumber) + .uploadId(uploadId) + .key(objectName) + .bucket(bucket), + RequestBody.fromBytes(partContent)); + completedParts.add(CompletedPart.builder() + .eTag(partResponse.eTag()) + .partNumber(partNumber) + .build()); + LOG.info(() -> String.format("done part %s - etag: %s: ", partNumber, partResponse.eTag())); + } + + LOG.info(() -> "Finishing MPU, completed parts: " + completedParts); + + s3.completeMultipartUpload(req -> req.multipartUpload(u -> u.parts(completedParts)) + .bucket(bucket) + .key(objectName) + .uploadId(uploadId)); + s3.waiter().waitUntilObjectExists(r -> r.bucket(bucket).key(objectName), + c -> c.waitTimeout(Duration.ofMinutes(5))); + } + + private static List downloadConfigs() { + List configs = new ArrayList<>(); + for (ResponseTransformerType responseTransformerType : ResponseTransformerType.values()) { + for (TestConfig baseConfig : testConfigs()) { + for (ContentSize contentSize : ContentSize.values()) { + DownloadConfig checksumEnabled = + new DownloadConfig(baseConfig, responseTransformerType, contentSize, true); + DownloadConfig checksumDisabled = + new DownloadConfig(baseConfig, responseTransformerType, contentSize, false); + configs.add(checksumEnabled); + configs.add(checksumDisabled); + } + } + } + return configs; + } + + CallResponse callSyncGetObject(DownloadConfig config, GetObjectRequest request) throws IOException { + S3Client s3Client = makeSyncClient(config.baseConfig()); + + byte[] content; + String s3Crc32 = null; + switch (config.responseTransformerType()) { + case FILE: { + String filename = request.key(); + Path filePath = Paths.get(tempDirPath.toString(), filename); + pathsToDelete.add(filePath); + GetObjectResponse res = s3Client.getObject(request, ResponseTransformer.toFile(filePath)); + s3Crc32 = res.checksumCRC32(); + content = Files.readAllBytes(filePath); + break; + } + + case BYTES: { + ResponseBytes res = s3Client.getObject(request, ResponseTransformer.toBytes()); + content = res.asByteArray(); + s3Crc32 = res.response().checksumCRC32(); + break; + } + + case INPUT_STREAM: { + ResponseInputStream res = s3Client.getObject(request, ResponseTransformer.toInputStream()); + content = InputStreamUtils.drainInputStream(res); + s3Crc32 = res.response().checksumCRC32(); + break; + } + + case OUTPUT_STREAM: { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + GetObjectResponse res = s3Client.getObject(request, ResponseTransformer.toOutputStream(os)); + content = os.toByteArray(); + s3Crc32 = res.checksumCRC32(); + break; + } + + case UNMANAGED: { + UnmanagedResponseTransformer tr = new UnmanagedResponseTransformer(); + s3Client.getObject(request, ResponseTransformer.unmanaged(tr)); + content = tr.content; + s3Crc32 = tr.response().checksumCRC32(); + break; + } + + case PUBLISHER: + Assumptions.abort("Skipping 'publisher' transformer type for sync client: " + config); + content = null; + break; + + default: + throw new UnsupportedOperationException("unsupported response transformer type: " + config.responseTransformerType()); + + } + s3Client.close(); + return new CallResponse(content, s3Crc32); + } + + CallResponse callAsyncGetObject(GetObjectRequest request, DownloadConfig config) throws Exception { + S3AsyncClient s3AsyncClient = makeAsyncClient(config.baseConfig()); + + byte[] content; + String s3crc32 = null; + switch (config.responseTransformerType()) { + case FILE: { + String filename = randomFileName(); + Path filePath = Paths.get(tempDirPath.toString(), filename); + pathsToDelete.add(filePath); + GetObjectResponse res = s3AsyncClient.getObject(request, AsyncResponseTransformer.toFile(filePath)) + .get(5, TimeUnit.MINUTES); + content = Files.readAllBytes(filePath); + s3crc32 = res.checksumCRC32(); + break; + } + + case BYTES: { + ResponseBytes res = s3AsyncClient.getObject(request, AsyncResponseTransformer.toBytes()) + .get(5, TimeUnit.MINUTES); + content = res.asByteArray(); + s3crc32 = res.response().checksumCRC32(); + break; + } + + case INPUT_STREAM: { + ResponseInputStream res = s3AsyncClient.getObject(request, + AsyncResponseTransformer.toBlockingInputStream()) + .get(5, TimeUnit.MINUTES); + content = InputStreamUtils.drainInputStream(res); + s3crc32 = res.response().checksumCRC32(); + break; + } + + case PUBLISHER: { + ResponsePublisher res = s3AsyncClient.getObject(request, + AsyncResponseTransformer.toPublisher()) + .get(5, TimeUnit.MINUTES); + ContentConsumer consumer = new ContentConsumer(); + CompletableFuture fut = res.subscribe(consumer); + fut.get(5, TimeUnit.MINUTES); + content = consumer.getFullContent(); + s3crc32 = res.response().checksumCRC32(); + break; + } + + case OUTPUT_STREAM: + case UNMANAGED: + Assumptions.abort(String.format("Skipping '%s' transformer type for async client: %s", + config.responseTransformerType(), config)); + content = null; + break; + default: + throw new UnsupportedOperationException("unsupported response transformer type: " + config.responseTransformerType()); + } + s3AsyncClient.close(); + return new CallResponse(content, s3crc32); + } + + private static class CallResponse { + byte[] content; + String crc32; + + public CallResponse(byte[] content, String crc32) { + this.content = content; + this.crc32 = crc32; + } + + public byte[] content() { + return content; + } + + public String crc32() { + return crc32; + } + } + + enum ResponseTransformerType { + FILE, + BYTES, + INPUT_STREAM, + OUTPUT_STREAM, + UNMANAGED, + PUBLISHER + } + + private String randomFileName() { + return String.format("%s-%S", System.currentTimeMillis(), UUID.randomUUID()); + } + + static class DownloadConfig { + private TestConfig baseConfig; + private ResponseTransformerType responseTransformerType; + private ContentSize contentSize; + private boolean checksumModeEnabled; + + public DownloadConfig(TestConfig baseConfig, ResponseTransformerType responseTransformerType, + ContentSize contentSize, boolean checksumModeEnabled) { + this.baseConfig = baseConfig; + this.responseTransformerType = responseTransformerType; + this.contentSize = contentSize; + this.checksumModeEnabled = checksumModeEnabled; + } + + public TestConfig baseConfig() { + return this.baseConfig; + } + + public ResponseTransformerType responseTransformerType() { + return responseTransformerType; + } + + public ContentSize contentSize() { + return contentSize; + } + + private boolean checksumModeEnabled() { + return this.checksumModeEnabled; + } + + @Override + public String toString() { + return ToString.builder("DownloadConfig") + .add("baseConfig", baseConfig) + .add("responseTransformerType", responseTransformerType) + .add("contentSize", contentSize) + .add("checksumModeEnabled", checksumModeEnabled) + .build(); + } + } + + private static Path createTempDir(String path) { + try { + return Files.createDirectories(Paths.get(path)); + } catch (Exception e) { + LOG.info(() -> "Unable to create directory", e); + throw new RuntimeException(e); + } + } + + private S3Client makeSyncClient(TestConfig config) { + switch (config.getFlavor()) { + case STANDARD_SYNC: + return S3Client.builder() + .forcePathStyle(config.isForcePathStyle()) + .requestChecksumCalculation(config.getRequestChecksumValidation()) + .region(REGION) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build(); + default: + throw new RuntimeException("Unsupported sync flavor: " + config.getFlavor()); + } + } + + private S3AsyncClient makeAsyncClient(TestConfig config) { + switch (config.getFlavor()) { + case STANDARD_ASYNC: + return S3AsyncClient.builder() + .forcePathStyle(config.isForcePathStyle()) + .requestChecksumCalculation(config.getRequestChecksumValidation()) + .region(REGION) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build(); + case MULTIPART_ENABLED: + return S3AsyncClient.builder() + .forcePathStyle(config.isForcePathStyle()) + .requestChecksumCalculation(config.getRequestChecksumValidation()) + .region(REGION) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .multipartEnabled(true) + .build(); + case CRT_BASED: { + return S3AsyncClient.crtBuilder() + .forcePathStyle(config.isForcePathStyle()) + .requestChecksumCalculation(config.getRequestChecksumValidation()) + .region(REGION) + .credentialsProvider(CREDENTIALS_PROVIDER_CHAIN) + .build(); + } + default: + throw new RuntimeException("Unsupported async flavor: " + config.getFlavor()); + } + } + + enum ContentSize { + SMALL, + LARGE, + LARGE_MULTI; + + ObjectWithCRC s3Object() { + switch (this) { + case SMALL: + return smallObject; + case LARGE: + return largeObject; + case LARGE_MULTI: + return largeObjectMulti; + default: + throw new IllegalArgumentException("Unknown ContentSize " + this); + } + } + } + + private static class ObjectWithCRC { + private String key; + private String crc32; + + public ObjectWithCRC(String key, String crc32) { + this.key = key; + this.crc32 = crc32; + } + + public String key() { + return key; + } + + public String crc32() { + return crc32; + } + } + + private static class UnmanagedResponseTransformer implements ResponseTransformer { + byte[] content; + GetObjectResponse response; + + @Override + public byte[] transform(GetObjectResponse response, AbortableInputStream inputStream) throws Exception { + this.response = response; + this.content = InputStreamUtils.drainInputStream(inputStream); // stream will be closed + return content; + } + + public GetObjectResponse response() { + return this.response; + } + } + + private static class ContentConsumer implements Consumer { + private List buffs = new ArrayList<>(); + + @Override + public void accept(ByteBuffer byteBuffer) { + buffs.add(byteBuffer); + } + + byte[] getFullContent() { + int totalSize = buffs.stream() + .mapToInt(ByteBuffer::remaining) + .sum(); + byte[] result = new byte[totalSize]; + int offset = 0; + for (ByteBuffer buff : buffs) { + int length = buff.remaining(); + buff.get(result, offset, length); + offset += length; + } + return result; + } + } +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/S3ChecksumsTestUtils.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/S3ChecksumsTestUtils.java new file mode 100644 index 000000000000..a6314ea5d556 --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/S3ChecksumsTestUtils.java @@ -0,0 +1,368 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Duration; +import java.time.Instant; +import java.util.Random; +import java.util.UUID; +import org.junit.jupiter.api.Assumptions; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.awscore.exception.AwsErrorDetails; +import software.amazon.awssdk.checksums.DefaultChecksumAlgorithm; +import software.amazon.awssdk.checksums.SdkChecksum; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.http.SdkHttpClient; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.BucketLocationConstraint; +import software.amazon.awssdk.services.s3.model.CreateBucketConfiguration; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.DataRedundancy; +import software.amazon.awssdk.services.s3.model.LocationInfo; +import software.amazon.awssdk.services.s3.model.LocationType; +import software.amazon.awssdk.services.s3.model.S3Exception; +import software.amazon.awssdk.services.s3.regression.upload.UploadConfig; +import software.amazon.awssdk.services.s3control.S3ControlClient; +import software.amazon.awssdk.services.s3control.model.CreateMultiRegionAccessPointRequest; +import software.amazon.awssdk.services.s3control.model.GetMultiRegionAccessPointResponse; +import software.amazon.awssdk.services.s3control.model.MultiRegionAccessPointStatus; +import software.amazon.awssdk.services.s3control.model.S3ControlException; +import software.amazon.awssdk.services.sts.StsClient; +import software.amazon.awssdk.transfer.s3.S3TransferManager; +import software.amazon.awssdk.utils.BinaryUtils; +import software.amazon.awssdk.utils.Logger; + +public final class S3ChecksumsTestUtils { + + private static final SdkChecksum CRC32 = SdkChecksum.forAlgorithm(DefaultChecksumAlgorithm.CRC32); + + private S3ChecksumsTestUtils() { + } + + public static String createBucket(S3Client s3, String name, Logger log) { + log.info(() -> "Creating bucket: " + name); + createBucket(s3, name, 3, log); + return name; + } + + public static void createBucket(S3Client s3, String bucketName, int retryCount, Logger log) { + try { + s3.createBucket( + CreateBucketRequest.builder() + .bucket(bucketName) + .createBucketConfiguration( + CreateBucketConfiguration.builder() + .locationConstraint(BucketLocationConstraint.US_WEST_2) + .build()) + .build()); + } catch (S3Exception e) { + log.info(() -> "Error attempting to create bucket: " + bucketName); + if ("BucketAlreadyOwnedByYou".equals(e.awsErrorDetails().errorCode())) { + log.info(() -> String.format("%s bucket already exists, likely leaked by a previous run%n", bucketName)); + } else if ("TooManyBuckets".equals(e.awsErrorDetails().errorCode())) { + log.info(() -> "Printing all buckets for debug:"); + s3.listBuckets().buckets().forEach(l -> log.info(l::toString)); + if (retryCount < 2) { + log.info(() -> "Retrying..."); + createBucket(s3, bucketName, retryCount + 1, log); + } else { + throw e; + } + } else if ("OperationAborted".equals(e.awsErrorDetails().errorCode())) { + log.warn(() -> e.awsErrorDetails().errorMessage() + " --- Likely another operation is creating the bucket, " + + "just wait for the bucket to be available"); + } else { + throw e; + } + } + + log.info(() -> String.format("waiting for bucket '%s' to be created and available", bucketName)); + s3.waiter().waitUntilBucketExists(r -> r.bucket(bucketName)); + } + + public static String createEozBucket(S3Client s3, String bucketName, Logger log) { + String eozBucketName = bucketName; + log.info(() -> "Creating EOZ bucket: " + eozBucketName); + CreateBucketConfiguration cfg = + CreateBucketConfiguration.builder() + .bucket(info -> info.dataRedundancy(DataRedundancy.SINGLE_AVAILABILITY_ZONE) + .type(software.amazon.awssdk.services.s3.model.BucketType.DIRECTORY)) + .location(LocationInfo.builder() + .name("usw2-az3") + .type(LocationType.AVAILABILITY_ZONE) + .build()) + .build(); + + try { + s3.createBucket(r -> r.bucket(eozBucketName).createBucketConfiguration(cfg)); + } catch (S3Exception e) { + AwsErrorDetails awsErrorDetails = e.awsErrorDetails(); + if (!"BucketAlreadyOwnedByYou".equals(awsErrorDetails.errorCode())) { + throw e; + } + } + return eozBucketName; + } + + public static String createMrap(S3ControlClient s3Control, String accountId, String mrapName, String bucketName, Logger log) + throws InterruptedException { + try { + s3Control.getMultiRegionAccessPoint(r -> r.accountId(accountId).name(mrapName)); + } catch (S3ControlException e) { + if (e.awsErrorDetails().sdkHttpResponse().statusCode() != 404) { + throw e; + } + + CreateMultiRegionAccessPointRequest createMrap = + CreateMultiRegionAccessPointRequest.builder() + .accountId(accountId) + .details(d -> d.name(mrapName) + .regions(software.amazon.awssdk.services.s3control.model.Region.builder() + .bucket(bucketName) + .build())) + .build(); + + s3Control.createMultiRegionAccessPoint(createMrap); + } + + return waitForMrapToBeReady(s3Control, accountId, mrapName, log); + } + + private static String waitForMrapToBeReady(S3ControlClient s3Control, String accountId, String mrapName, Logger log) + throws InterruptedException { + GetMultiRegionAccessPointResponse getMrapResponse = null; + + Instant waitStart = Instant.now(); + boolean initial = true; + do { + if (!initial) { + Thread.sleep(Duration.ofSeconds(10).toMillis()); + initial = true; + } + GetMultiRegionAccessPointResponse response = + s3Control.getMultiRegionAccessPoint(r -> r.accountId(accountId).name(mrapName)); + log.info(() -> "Wait response: " + response); + getMrapResponse = response; + } while (MultiRegionAccessPointStatus.READY != getMrapResponse.accessPoint().status() + && Duration.between(Instant.now(), waitStart).compareTo(Duration.ofMinutes(5)) < 0); + + return "arn:aws:s3::" + accountId + ":accesspoint/" + getMrapResponse.accessPoint().alias(); + } + + public static String getAccountId(StsClient sts) { + return sts.getCallerIdentity().account(); + } + + public static String createAccessPoint(S3ControlClient s3Control, String accountId, String apName, String bucketName) { + try { + s3Control.getAccessPoint(r -> r.accountId(accountId).name(apName)); + } catch (S3ControlException e) { + if (e.awsErrorDetails().sdkHttpResponse().statusCode() != 404) { + throw e; + } + + s3Control.createAccessPoint(r -> r.bucket(bucketName).name(apName).accountId(accountId)); + } + + // wait for AP to be ready + return s3Control.getAccessPoint(r -> r.accountId(accountId).name(apName)).accessPointArn(); + } + + + public static void assumeNotAccessPointWithPathStyle(TestConfig config) { + BucketType bucketType = config.getBucketType(); + Assumptions.assumeFalse(config.isForcePathStyle() && bucketType.isArnType(), + "Path style doesn't work with ARN type buckets"); + } + + public static void assumeNotAccessPointWithPathStyle(UploadConfig config) { + BucketType bucketType = config.getBucketType(); + Assumptions.assumeFalse(config.isForcePathStyle() && bucketType.isArnType(), + "Path style doesn't work with ARN type buckets"); + } + + public static String crc32(String s) { + return crc32(s.getBytes(StandardCharsets.UTF_8)); + } + + public static String crc32(byte[] bytes) { + CRC32.reset(); + CRC32.update(bytes); + return BinaryUtils.toBase64(CRC32.getChecksumBytes()); + } + + public static String crc32(Path p) throws IOException { + CRC32.reset(); + + byte[] buff = new byte[4096]; + int read; + try (InputStream is = Files.newInputStream(p)) { + while (true) { + read = is.read(buff); + if (read == -1) { + break; + } + CRC32.update(buff, 0, read); + } + } + + return BinaryUtils.toBase64(CRC32.getChecksumBytes()); + } + + public static S3Client makeSyncClient(TestConfig config, Region region, AwsCredentialsProvider provider) { + switch (config.getFlavor()) { + case STANDARD_SYNC: + return S3Client.builder() + .forcePathStyle(config.isForcePathStyle()) + .requestChecksumCalculation(config.getRequestChecksumValidation()) + .region(region) + .credentialsProvider(provider) + .build(); + default: + throw new RuntimeException("Unsupported sync flavor: " + config.getFlavor()); + } + } + + public static S3AsyncClient makeAsyncClient(TestConfig config, Region region, AwsCredentialsProvider provider) { + switch (config.getFlavor()) { + case STANDARD_ASYNC: + return S3AsyncClient.builder() + .forcePathStyle(config.isForcePathStyle()) + .requestChecksumCalculation(config.getRequestChecksumValidation()) + .region(region) + .credentialsProvider(provider) + .build(); + case MULTIPART_ENABLED: + return S3AsyncClient.builder() + .forcePathStyle(config.isForcePathStyle()) + .requestChecksumCalculation(config.getRequestChecksumValidation()) + .region(region) + .credentialsProvider(provider) + .multipartEnabled(true) + .build(); + case CRT_BASED: { + return S3AsyncClient.crtBuilder() + .forcePathStyle(config.isForcePathStyle()) + .requestChecksumCalculation(config.getRequestChecksumValidation()) + .region(region) + .credentialsProvider(provider) + .build(); + } + default: + throw new RuntimeException("Unsupported async flavor: " + config.getFlavor()); + } + } + + public static S3Client makeSyncClient(UploadConfig config, ClientOverrideConfiguration overrideConfiguration, + Region region, AwsCredentialsProvider provider) { + return S3Client.builder() + .overrideConfiguration(overrideConfiguration) + .httpClient(makeHttpClient()) + .forcePathStyle(config.isForcePathStyle()) + .requestChecksumCalculation(config.getRequestChecksumValidation()) + .region(region) + .credentialsProvider(provider) + .build(); + } + + private static SdkHttpClient makeHttpClient() { + return ApacheHttpClient.builder() + .maxConnections(10_000) + .connectionAcquisitionTimeout(Duration.ofMinutes(10)) + .build(); + } + + public static S3AsyncClient makeAsyncClient(UploadConfig config, + S3ClientFlavor flavor, + ClientOverrideConfiguration overrideConfiguration, + Region region, AwsCredentialsProvider provider) { + switch (flavor) { + case STANDARD_ASYNC: + return S3AsyncClient.builder() + .overrideConfiguration(overrideConfiguration) + .forcePathStyle(config.isForcePathStyle()) + .requestChecksumCalculation(config.getRequestChecksumValidation()) + .region(region) + .credentialsProvider(provider) + .build(); + case MULTIPART_ENABLED: + return S3AsyncClient.builder() + .overrideConfiguration(overrideConfiguration) + .forcePathStyle(config.isForcePathStyle()) + .requestChecksumCalculation(config.getRequestChecksumValidation()) + .region(region) + .credentialsProvider(provider) + .multipartEnabled(true) + .build(); + case CRT_BASED: { + return S3AsyncClient.crtBuilder() + .forcePathStyle(config.isForcePathStyle()) + .requestChecksumCalculation(config.getRequestChecksumValidation()) + .region(region) + .credentialsProvider(provider) + .build(); + } + default: + throw new RuntimeException("Unsupported async flavor: " + flavor); + } + } + + public static S3TransferManager makeTm(UploadConfig config, + S3ClientFlavor flavor, + ClientOverrideConfiguration overrideConfiguration, + Region region, AwsCredentialsProvider provider) { + S3AsyncClient s3AsyncClient = makeAsyncClient(config, flavor, overrideConfiguration, region, provider); + return S3TransferManager.builder().s3Client(s3AsyncClient).build(); + } + + public static Path createRandomFile16KB() throws IOException { + Path tmp = Files.createTempFile(null, null); + byte[] randomBytes = new byte[1024]; + new Random().nextBytes(randomBytes); + try (OutputStream os = Files.newOutputStream(tmp)) { + for (int i = 0; i < 16; ++i) { + os.write(randomBytes); + } + } + return tmp; + } + + public static Path createRandomFile60MB() throws IOException { + Path tmp = Files.createTempFile(null, null); + byte[] randomBytes = new byte[1024 * 1024]; + new Random().nextBytes(randomBytes); + try (OutputStream os = Files.newOutputStream(tmp)) { + for (int i = 0; i < 60; ++i) { + os.write(randomBytes); + } + } + return tmp; + } + + public static String randomKey() { + return BinaryUtils.toHex(UUID.randomUUID().toString().getBytes()); + } + +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/S3ClientFlavor.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/S3ClientFlavor.java new file mode 100644 index 000000000000..54ddcd87e8e5 --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/S3ClientFlavor.java @@ -0,0 +1,34 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression; + +public enum S3ClientFlavor { + STANDARD_SYNC(false), + STANDARD_ASYNC(true), + MULTIPART_ENABLED(true), + CRT_BASED(true) + ; + + private final boolean async; + + private S3ClientFlavor(boolean async) { + this.async = async; + } + + public boolean isAsync() { + return async; + } +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/TestCallable.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/TestCallable.java new file mode 100644 index 000000000000..2758cef6c9d3 --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/TestCallable.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression; + +import java.util.concurrent.Callable; +import software.amazon.awssdk.utils.SdkAutoCloseable; + +public class TestCallable { + private SdkAutoCloseable client; + private Callable runnable; + + public TestCallable(SdkAutoCloseable client, Callable runnable) { + this.client = client; + this.runnable = runnable; + } + + public SdkAutoCloseable client() { + return client; + } + + public Callable runnable() { + return runnable; + } +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/TestConfig.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/TestConfig.java new file mode 100644 index 000000000000..b45d55617e6f --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/TestConfig.java @@ -0,0 +1,94 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression; + +import java.util.ArrayList; +import java.util.List; +import software.amazon.awssdk.core.checksums.RequestChecksumCalculation; + +public class TestConfig { + private S3ClientFlavor flavor; + private BucketType bucketType; + private boolean forcePathStyle; + private RequestChecksumCalculation requestChecksumValidation; + + public S3ClientFlavor getFlavor() { + return flavor; + } + + public void setFlavor(S3ClientFlavor flavor) { + this.flavor = flavor; + } + + public BucketType getBucketType() { + return bucketType; + } + + public void setBucketType(BucketType bucketType) { + this.bucketType = bucketType; + } + + public boolean isForcePathStyle() { + return forcePathStyle; + } + + public void setForcePathStyle(boolean forcePathStyle) { + this.forcePathStyle = forcePathStyle; + } + + public RequestChecksumCalculation getRequestChecksumValidation() { + return requestChecksumValidation; + } + + public void setRequestChecksumValidation(RequestChecksumCalculation requestChecksumValidation) { + this.requestChecksumValidation = requestChecksumValidation; + } + + @Override + public String toString() { + return "[" + + "flavor=" + flavor + + ", bucketType=" + bucketType + + ", forcePathStyle=" + forcePathStyle + + ", requestChecksumValidation=" + requestChecksumValidation + + ']'; + } + + public static List testConfigs() { + List configs = new ArrayList<>(); + + boolean[] forcePathStyle = {true, false}; + RequestChecksumCalculation[] checksumValidations = {RequestChecksumCalculation.WHEN_REQUIRED, + RequestChecksumCalculation.WHEN_SUPPORTED}; + for (boolean pathStyle : forcePathStyle) { + for (RequestChecksumCalculation checksumValidation : checksumValidations) { + for (S3ClientFlavor flavor : S3ClientFlavor.values()) { + for (BucketType bucketType : BucketType.values()) { + TestConfig testConfig = new TestConfig(); + testConfig.setFlavor(flavor); + testConfig.setBucketType(bucketType); + testConfig.setForcePathStyle(pathStyle); + testConfig.setRequestChecksumValidation(checksumValidation); + configs.add(testConfig); + } + } + } + } + + return configs; + } + +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadAsyncRegressionTesting.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadAsyncRegressionTesting.java new file mode 100644 index 000000000000..770f45518500 --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadAsyncRegressionTesting.java @@ -0,0 +1,150 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression.upload; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.assumeNotAccessPointWithPathStyle; +import static software.amazon.awssdk.services.s3.regression.S3ClientFlavor.STANDARD_ASYNC; + +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.core.checksums.RequestChecksumCalculation; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.regression.BucketType; +import software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils; +import software.amazon.awssdk.services.s3.regression.TestCallable; +import software.amazon.awssdk.utils.Logger; + +public class UploadAsyncRegressionTesting extends UploadStreamingRegressionTesting { + private static final Logger LOG = Logger.loggerFor(UploadAsyncRegressionTesting.class); + + public static List testConfigs() { + return UploadConfig.testConfigs(); + } + + @ParameterizedTest + @MethodSource("testConfigs") + @Timeout(value = 120, unit = TimeUnit.SECONDS) + void putObject(UploadConfig config) throws Exception { + assumeNotAccessPointWithPathStyle(config); + + // For testing purposes, ContentProvider is Publisher for async clients + // There is no way to create AsyncRequestBody with a Publisher and also provide the content length + Assumptions.assumeFalse(config.getBodyType() == BodyType.CONTENT_PROVIDER_WITH_LENGTH, + "No way to create AsyncRequestBody by giving both an Publisher and the content length"); + + // Payload signing doesn't work correctly for async java based + // TODO(sra-identity-auth) remove when chunked encoding support is added in async code path + Assumptions.assumeFalse(config.isPayloadSigning() + // MRAP requires body signing + || config.getBucketType() == BucketType.MRAP, + "Async payload signing doesn't work with Java based clients"); + + // For testing purposes, ContentProvider is Publisher for async clients + // Async java based clients don't currently support unknown content-length bodies + Assumptions.assumeFalse(config.getBodyType() == BodyType.CONTENT_PROVIDER_NO_LENGTH + || config.getBodyType() == BodyType.INPUTSTREAM_NO_LENGTH, + "Async Java based support unknown content length"); + + LOG.info(() -> "Running UploadAsyncRegressionTesting putObject with config: " + config); + + BucketType bucketType = config.getBucketType(); + + String bucket = bucketForType(bucketType); + String key = S3ChecksumsTestUtils.randomKey(); + + PutObjectRequest request = PutObjectRequest.builder() + .bucket(bucket) + .key(key) + .build(); + + + RequestRecorder recorder = new RequestRecorder(); + + ClientOverrideConfiguration.Builder overrideConfiguration = + ClientOverrideConfiguration.builder() + .addExecutionInterceptor(recorder) + .apiCallTimeout(Duration.of(30, ChronoUnit.SECONDS)); + + if (config.isPayloadSigning()) { + overrideConfiguration.addExecutionInterceptor(new EnablePayloadSigningInterceptor()); + } + + TestCallable callable = null; + try { + + Long actualContentLength = null; + boolean requestBodyHasContentLength = false; + String actualCrc32; + + TestAsyncBody body = getAsyncRequestBody(config.getBodyType(), config.getContentSize()); + callable = callPutObject(request, STANDARD_ASYNC, body, config, overrideConfiguration.build()); + actualContentLength = body.getActualContentLength(); + requestBodyHasContentLength = body.getAsyncRequestBody().contentLength().isPresent(); + actualCrc32 = body.getChecksum(); + + PutObjectResponse response = callable.runnable().call(); + + recordObjectToCleanup(bucketType, key); + + // We only validate when configured to WHEN_SUPPORTED since checksums are optional for PutObject + // CRT switches to MPU under the hood which doesn't support checksums + if (config.getRequestChecksumValidation() == RequestChecksumCalculation.WHEN_SUPPORTED) { + assertThat(response.checksumCRC32()).isEqualTo(actualCrc32); + } + + assertThat(recorder.getRequests()).isNotEmpty(); + + for (SdkHttpRequest httpRequest : recorder.getRequests()) { + // skip any non-PUT requests, e.g. GetSession for EOZ requests + if (httpRequest.method() != SdkHttpMethod.PUT) { + continue; + } + + String payloadSha = httpRequest.firstMatchingHeader("x-amz-content-sha256").get(); + if (payloadSha.startsWith("STREAMING")) { + String decodedContentLength = httpRequest.firstMatchingHeader("x-amz-decoded-content-length").get(); + assertThat(Long.parseLong(decodedContentLength)).isEqualTo(actualContentLength); + verifyChecksumResponsePayload(config, key, actualCrc32); + } else { + Optional contentLength = httpRequest.firstMatchingHeader("Content-Length"); + if (requestBodyHasContentLength) { + assertThat(Long.parseLong(contentLength.get())).isEqualTo(actualContentLength); + } + } + } + } catch (Exception e) { + LOG.info(() -> String.format("Error while executing %s. Error message: %s", config, e.getMessage())); + throw e; + } finally { + if (callable != null) { + callable.client().close(); + } + } + } + +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadConfig.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadConfig.java new file mode 100644 index 000000000000..308558db4999 --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadConfig.java @@ -0,0 +1,116 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression.upload; + +import java.util.ArrayList; +import java.util.List; +import software.amazon.awssdk.core.checksums.RequestChecksumCalculation; +import software.amazon.awssdk.services.s3.regression.BucketType; +import software.amazon.awssdk.utils.ToString; + +public class UploadConfig { + private BucketType bucketType; + private boolean forcePathStyle; + private RequestChecksumCalculation requestChecksumValidation; + private UploadStreamingRegressionTesting.BodyType bodyType; + private UploadStreamingRegressionTesting.ContentSize contentSize; + private boolean payloadSigning; + + public static List testConfigs() { + List configs = new ArrayList<>(); + + boolean[] payloadSign = {true, false}; + RequestChecksumCalculation[] checksumValidations = {RequestChecksumCalculation.WHEN_REQUIRED, + RequestChecksumCalculation.WHEN_SUPPORTED}; + for (RequestChecksumCalculation checksumValidation : checksumValidations) { + for (UploadStreamingRegressionTesting.BodyType bodType : UploadStreamingRegressionTesting.BodyType.values()) { + for (UploadStreamingRegressionTesting.ContentSize cs : + UploadStreamingRegressionTesting.ContentSize.values()) { + for (boolean ps : payloadSign) { + UploadConfig testConfig = new UploadConfig(); + testConfig.setRequestChecksumValidation(checksumValidation); + testConfig.setBodyType(bodType); + testConfig.setContentSize(cs); + testConfig.setPayloadSigning(ps); + testConfig.setBucketType(BucketType.STANDARD_BUCKET); + configs.add(testConfig); + } + } + } + } + return configs; + } + + public BucketType getBucketType() { + return bucketType; + } + + public void setBucketType(BucketType bucketType) { + this.bucketType = bucketType; + } + + public boolean isForcePathStyle() { + return forcePathStyle; + } + + public void setForcePathStyle(boolean forcePathStyle) { + this.forcePathStyle = forcePathStyle; + } + + public RequestChecksumCalculation getRequestChecksumValidation() { + return requestChecksumValidation; + } + + public void setRequestChecksumValidation(RequestChecksumCalculation requestChecksumValidation) { + this.requestChecksumValidation = requestChecksumValidation; + } + + public UploadStreamingRegressionTesting.BodyType getBodyType() { + return bodyType; + } + + public void setBodyType(UploadStreamingRegressionTesting.BodyType bodyType) { + this.bodyType = bodyType; + } + + public UploadStreamingRegressionTesting.ContentSize getContentSize() { + return contentSize; + } + + public void setContentSize(UploadStreamingRegressionTesting.ContentSize contentSize) { + this.contentSize = contentSize; + } + + public boolean isPayloadSigning() { + return payloadSigning; + } + + public void setPayloadSigning(boolean payloadSigning) { + this.payloadSigning = payloadSigning; + } + + @Override + public String toString() { + return ToString.builder("FlattenUploadConfig") + .add("bucketType", bucketType) + .add("forcePathStyle", forcePathStyle) + .add("requestChecksumValidation", requestChecksumValidation) + .add("bodyType", bodyType) + .add("contentSize", contentSize) + .add("payloadSigning", payloadSigning) + .build(); + } +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadCrtRegressionTesting.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadCrtRegressionTesting.java new file mode 100644 index 000000000000..23f255d4d789 --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadCrtRegressionTesting.java @@ -0,0 +1,105 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression.upload; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.assumeNotAccessPointWithPathStyle; +import static software.amazon.awssdk.services.s3.regression.S3ClientFlavor.CRT_BASED; + +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.regression.BucketType; +import software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils; +import software.amazon.awssdk.services.s3.regression.TestCallable; +import software.amazon.awssdk.utils.Logger; + +public class UploadCrtRegressionTesting extends UploadStreamingRegressionTesting { + private static final Logger LOG = Logger.loggerFor(UploadCrtRegressionTesting.class); + + public static List testConfigs() { + return UploadConfig.testConfigs(); + } + + @ParameterizedTest + @MethodSource("testConfigs") + @Timeout(value = 120, unit = TimeUnit.SECONDS) + void putObject(UploadConfig config) throws Exception { + assumeNotAccessPointWithPathStyle(config); + + Assumptions.assumeFalse(config.getBodyType() == BodyType.CONTENT_PROVIDER_WITH_LENGTH, + "No way to create AsyncRequestBody by giving both an Publisher and the content length"); + + LOG.info(() -> "Running UploadCrtRegressionTesting putObject with config: " + config); + + BucketType bucketType = config.getBucketType(); + + String bucket = bucketForType(bucketType); + String key = S3ChecksumsTestUtils.randomKey(); + + PutObjectRequest request = PutObjectRequest.builder() + .bucket(bucket) + .key(key) + .build(); + + + RequestRecorder recorder = new RequestRecorder(); + + ClientOverrideConfiguration.Builder overrideConfiguration = + ClientOverrideConfiguration.builder() + .addExecutionInterceptor(recorder) + .apiCallTimeout(Duration.of(30, ChronoUnit.SECONDS)); + + if (config.isPayloadSigning()) { + overrideConfiguration.addExecutionInterceptor(new EnablePayloadSigningInterceptor()); + } + + TestCallable callable = null; + try { + + + TestAsyncBody body = getAsyncRequestBody(config.getBodyType(), config.getContentSize()); + callable = callPutObject(request, CRT_BASED, body, config, overrideConfiguration.build()); + String actualCrc32 = body.getChecksum(); + + PutObjectResponse response = callable.runnable().call(); + + recordObjectToCleanup(bucketType, key); + + if (response.checksumCRC32() != null && !response.checksumCRC32().isEmpty()) { + assertThat(actualCrc32).isEqualTo(response.checksumCRC32()); + } else { + LOG.info(() -> "Skipping checksum for config " + config); + } + } catch (Exception e) { + LOG.info(() -> String.format("Error while executing %s. Error message: %s", config, e.getMessage())); + throw e; + } finally { + if (callable != null) { + callable.client().close(); + } + } + } + +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadStreamingRegressionTesting.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadStreamingRegressionTesting.java new file mode 100644 index 000000000000..151dd6b02192 --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadStreamingRegressionTesting.java @@ -0,0 +1,580 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression.upload; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.assumeNotAccessPointWithPathStyle; +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.crc32; +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.makeAsyncClient; +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.makeSyncClient; + +import io.reactivex.Flowable; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.function.Consumer; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeAll; +import software.amazon.awssdk.auth.signer.S3SignerExecutionAttribute; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.BlockingInputStreamAsyncRequestBody; +import software.amazon.awssdk.core.async.BlockingOutputStreamAsyncRequestBody; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.ChecksumMode; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.regression.BaseS3RegressionTest; +import software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils; +import software.amazon.awssdk.services.s3.regression.S3ClientFlavor; +import software.amazon.awssdk.services.s3.regression.TestCallable; +import software.amazon.awssdk.transfer.s3.S3TransferManager; +import software.amazon.awssdk.transfer.s3.model.CompletedUpload; +import software.amazon.awssdk.transfer.s3.model.Upload; +import software.amazon.awssdk.utils.CancellableOutputStream; +import software.amazon.awssdk.utils.CompletableFutureUtils; +import software.amazon.awssdk.utils.FunctionalUtils; +import software.amazon.awssdk.utils.Logger; + +public class UploadStreamingRegressionTesting extends BaseS3RegressionTest { + private static final Logger LOG = Logger.loggerFor(UploadStreamingRegressionTesting.class); + + private static final ExecutorService ASYNC_REQUEST_BODY_EXECUTOR = Executors.newSingleThreadExecutor(); + + static final byte[] smallContent = "Hello world".getBytes(StandardCharsets.UTF_8); + static final byte[] largeContent = largeContent(); + static final String smallContentCrc32 = crc32(smallContent); + static final String largeContentCrc32 = crc32(largeContent); + + static String smallContentCRC32ForBuffersAPI; + static String largeContentCRC32ForBuffersAPI; + + protected static Path testFileSmall; + protected static Path testFileLarge; + + @BeforeAll + static void setupClass() throws IOException { + testFileSmall = S3ChecksumsTestUtils.createRandomFile16KB(); + testFileLarge = S3ChecksumsTestUtils.createRandomFile60MB(); + + // used in RequestBody.*buffers(...) API + // we calculate crc32 once to try to accelerate test execution + byte[] crcArraySmallContentForBuffersApi = new byte[smallContent.length + smallContent.length]; + System.arraycopy(smallContent, 0, crcArraySmallContentForBuffersApi, 0, smallContent.length); + System.arraycopy(smallContent, 0, crcArraySmallContentForBuffersApi, smallContent.length, smallContent.length); + smallContentCRC32ForBuffersAPI = crc32(crcArraySmallContentForBuffersApi); + + byte[] crcArrayLargeContentForBuffersApi = new byte[largeContent.length + largeContent.length]; + System.arraycopy(largeContent, 0, crcArrayLargeContentForBuffersApi, 0, largeContent.length); + System.arraycopy(largeContent, 0, crcArrayLargeContentForBuffersApi, largeContent.length, largeContent.length); + largeContentCRC32ForBuffersAPI = crc32(crcArrayLargeContentForBuffersApi); + } + + @AfterAll + public static void cleanup() { + ASYNC_REQUEST_BODY_EXECUTOR.shutdownNow(); + } + + protected void verifyChecksumResponsePayload(UploadConfig config, String key, String expectedCRC32) { + String bucket = bucketForType(config.getBucketType()); + ResponseInputStream response = s3.getObject(req -> req.checksumMode(ChecksumMode.ENABLED) + .key(key) + .bucket(bucket)); + String crc32 = response.response().checksumCRC32(); + if (crc32 != null) { + assertThat(crc32).isEqualTo(expectedCRC32); + } + + } + + protected TestCallable callPutObject(PutObjectRequest request, + TestRequestBody requestBody, + UploadConfig config, + ClientOverrideConfiguration overrideConfiguration) { + S3Client s3Client = makeSyncClient(config, overrideConfiguration, REGION, CREDENTIALS_PROVIDER_CHAIN); + Callable callable = () -> { + try { + return s3Client.putObject(request, requestBody); + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + s3Client.close(); + } + }; + return new TestCallable<>(s3Client, callable); + } + + protected TestCallable callPutObject(PutObjectRequest request, + S3ClientFlavor flavor, + TestAsyncBody requestBody, + UploadConfig config, + ClientOverrideConfiguration overrideConfiguration) { + S3AsyncClient s3Client = makeAsyncClient(config, flavor, overrideConfiguration, REGION, CREDENTIALS_PROVIDER_CHAIN); + Callable callable = () -> { + try { + AsyncRequestBody asyncRequestBody = requestBody.getAsyncRequestBody(); + CompletableFuture future = s3Client.putObject(request, asyncRequestBody); + performWriteIfNeeded(requestBody); + return CompletableFutureUtils.joinLikeSync(future); + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + s3Client.close(); + } + }; + return new TestCallable<>(s3Client, callable); + } + + protected TestCallable callTmUpload(PutObjectRequest request, + S3ClientFlavor flavor, + TestAsyncBody requestBody, + UploadConfig config, + ClientOverrideConfiguration overrideConfiguration) { + S3TransferManager transferManager = S3ChecksumsTestUtils.makeTm(config, flavor, overrideConfiguration, + REGION, CREDENTIALS_PROVIDER_CHAIN); + Callable callable = () -> { + try { + Upload upload = transferManager.upload( + r -> r.requestBody(requestBody.getAsyncRequestBody()).putObjectRequest(request)); + performWriteIfNeeded(requestBody); + CompletedUpload completedUpload = CompletableFutureUtils.joinLikeSync(upload.completionFuture()); + return completedUpload.response(); + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + transferManager.close(); + } + }; + return new TestCallable<>(transferManager, callable); + } + + protected TestRequestBody getRequestBody(BodyType bodyType, ContentSize contentSize) throws IOException { + switch (bodyType) { + case STRING: { + String content = contentSize.stringContent(); + return new TestRequestBody(RequestBody.fromString(content), + content.getBytes(StandardCharsets.UTF_8).length, + crc32(content)); + } + case FILE: + return new TestRequestBody(RequestBody.fromFile(contentSize.fileContent()), + Files.size(contentSize.fileContent()), crc32(contentSize.fileContent())); + case CONTENT_PROVIDER_NO_LENGTH: { + RequestBody wrapped = + RequestBody.fromContentProvider(() -> FunctionalUtils.invokeSafely(() -> Files.newInputStream(contentSize.fileContent())), + "application/octet-stream"); + + return new TestRequestBody(wrapped, Files.size(contentSize.fileContent()), crc32(contentSize.fileContent())); + } + case CONTENT_PROVIDER_WITH_LENGTH: { + long contentLength = Files.size(contentSize.fileContent()); + RequestBody wrapped = + RequestBody.fromContentProvider(() -> FunctionalUtils.invokeSafely(() -> Files.newInputStream(contentSize.fileContent())), + Files.size(contentSize.fileContent()), + "application/octet-stream"); + return new TestRequestBody(wrapped, contentLength, crc32(contentSize.fileContent())); + } + case INPUTSTREAM_RESETABLE: { + byte[] content = contentSize.byteContent(); + RequestBody wrapped = RequestBody.fromInputStream(new ByteArrayInputStream(content), content.length); + return new TestRequestBody(wrapped, content.length, contentSize.precalculatedCrc32()); + } + case INPUTSTREAM_NOT_RESETABLE: { + byte[] content = contentSize.byteContent(); + RequestBody wrapped = RequestBody.fromInputStream(new NonResettableByteStream(content), content.length); + return new TestRequestBody(wrapped, content.length, contentSize.precalculatedCrc32()); + } + case BYTES: { + byte[] content = contentSize.byteContent(); + RequestBody wrapped = RequestBody.fromBytes(content); + return new TestRequestBody(wrapped, content.length, contentSize.precalculatedCrc32()); + } + case BYTE_BUFFER: { + byte[] content = contentSize.byteContent(); + RequestBody wrapped = RequestBody.fromByteBuffer(ByteBuffer.wrap(content)); + return new TestRequestBody(wrapped, content.length, contentSize.precalculatedCrc32()); + } + case REMAINING_BYTE_BUFFER: { + byte[] content = contentSize.byteContent(); + ByteBuffer buff = ByteBuffer.wrap(content); + int offset = 2; + buff.position(offset); + RequestBody asyncRequestBody = RequestBody.fromRemainingByteBuffer(buff); + byte[] crcArray = Arrays.copyOfRange(content, offset, content.length); + return new TestRequestBody(asyncRequestBody, content.length - offset, crc32(crcArray)); + } + case BUFFERS: + case BUFFERS_REMAINING: + case BLOCKING_INPUT_STREAM: + case BLOCKING_OUTPUT_STREAM: + case INPUTSTREAM_NO_LENGTH: + Assumptions.abort("Test BodyType not supported for sync client: " + bodyType); + default: + throw new RuntimeException("Unsupported body type: " + bodyType); + } + } + + protected TestAsyncBody getAsyncRequestBody(BodyType bodyType, ContentSize contentSize) throws IOException { + switch (bodyType) { + case STRING: { + String content = contentSize.stringContent(); + return new TestAsyncBody(AsyncRequestBody.fromString(content), content.getBytes(StandardCharsets.UTF_8).length, + crc32(content), bodyType); + } + case FILE: { + long contentLength = Files.size(contentSize.fileContent()); + return new TestAsyncBody(AsyncRequestBody.fromFile(contentSize.fileContent()), contentLength, + crc32(contentSize.fileContent()), bodyType); + } + case INPUTSTREAM_RESETABLE: { + byte[] content = contentSize.byteContent(); + AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromInputStream(new ByteArrayInputStream(content), + (long) content.length, + ASYNC_REQUEST_BODY_EXECUTOR); + return new TestAsyncBody(asyncRequestBody, content.length, contentSize.precalculatedCrc32(), bodyType); + } + case INPUTSTREAM_NOT_RESETABLE: { + byte[] content = contentSize.byteContent(); + AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromInputStream(new NonResettableByteStream(content), + (long) content.length, + ASYNC_REQUEST_BODY_EXECUTOR); + return new TestAsyncBody(asyncRequestBody, content.length, contentSize.precalculatedCrc32(), bodyType); + } + case INPUTSTREAM_NO_LENGTH: { + byte[] content = contentSize.byteContent(); + AsyncRequestBody asyncRequestBody = AsyncRequestBody + .fromInputStream(conf -> conf.inputStream(new ByteArrayInputStream(content)) + .executor(ASYNC_REQUEST_BODY_EXECUTOR)); + return new TestAsyncBody(asyncRequestBody, content.length, contentSize.precalculatedCrc32(), bodyType); + } + case CONTENT_PROVIDER_NO_LENGTH: { + byte[] content = contentSize.byteContent(); + Flowable publisher = Flowable.just(ByteBuffer.wrap(content)); + AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromPublisher(publisher); + return new TestAsyncBody(asyncRequestBody, content.length, contentSize.precalculatedCrc32(), bodyType); + } + case BYTES: { + byte[] content = contentSize.byteContent(); + AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromBytes(content); + return new TestAsyncBody(asyncRequestBody, content.length, contentSize.precalculatedCrc32(), bodyType); + } + case BYTE_BUFFER: { + byte[] content = contentSize.byteContent(); + AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromByteBuffer(ByteBuffer.wrap(content)); + return new TestAsyncBody(asyncRequestBody, content.length, contentSize.precalculatedCrc32(), bodyType); + } + case REMAINING_BYTE_BUFFER: { + byte[] content = contentSize.byteContent(); + ByteBuffer buff = ByteBuffer.wrap(content); + int offset = 2; + buff.position(offset); + AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromRemainingByteBuffer(buff); + byte[] crcArray = Arrays.copyOfRange(content, offset, content.length); + return new TestAsyncBody(asyncRequestBody, content.length - offset, crc32(crcArray), bodyType); + } + case BUFFERS: { + byte[] content1 = contentSize.byteContent(); + byte[] content2 = contentSize.byteContent(); + AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromByteBuffers(ByteBuffer.wrap(content1), + ByteBuffer.wrap(content2)); + return new TestAsyncBody(asyncRequestBody, + content1.length + content2.length, + contentSize.precalculatedCrc32forBuffersAPI(), + bodyType); + } + case BUFFERS_REMAINING: { + byte[] content1 = contentSize.byteContent(); + byte[] content2 = contentSize.byteContent(); + AsyncRequestBody asyncRequestBody = AsyncRequestBody.fromRemainingByteBuffers(ByteBuffer.wrap(content1), + ByteBuffer.wrap(content2)); + byte[] crcArray = new byte[content2.length + content2.length]; + System.arraycopy(content1, 0, crcArray, 0, content1.length); + System.arraycopy(content2, 0, crcArray, content1.length, content2.length); + return new TestAsyncBody(asyncRequestBody, + content1.length + content2.length, + contentSize.precalculatedCrc32forBuffersAPI(), + bodyType); + } + case BLOCKING_INPUT_STREAM: { + byte[] content = contentSize.byteContent(); + long streamToSendLength = content.length; + BlockingInputStreamAsyncRequestBody body = AsyncRequestBody.forBlockingInputStream(streamToSendLength); + return new TestAsyncBodyForBlockingInputStream(body, + new ByteArrayInputStream(content), + content.length, + contentSize.precalculatedCrc32(), + bodyType); + } + case BLOCKING_OUTPUT_STREAM: { + byte[] content = contentSize.byteContent(); + long streamToSendLength = content.length; + BlockingOutputStreamAsyncRequestBody body = AsyncRequestBody.forBlockingOutputStream(streamToSendLength); + Consumer bodyWrite = outputStream -> { + try { + outputStream.write(content); + } catch (IOException ioe) { + throw new RuntimeException(ioe); + } + }; + return new TestAsyncBodyForBlockingOutputStream(body, + bodyWrite, + content.length, + contentSize.precalculatedCrc32(), + bodyType); + } + default: + throw new RuntimeException("Unsupported async body type: " + bodyType); + } + } + + void performWriteIfNeeded(TestAsyncBody requestBody) throws IOException { + if (requestBody.bodyType == BodyType.BLOCKING_INPUT_STREAM) { + BlockingInputStreamAsyncRequestBody body = (BlockingInputStreamAsyncRequestBody) requestBody.asyncRequestBody; + InputStream inputStream = ((TestAsyncBodyForBlockingInputStream) requestBody).inputStream; + body.writeInputStream(inputStream); + inputStream.close(); + } + if (requestBody.bodyType == BodyType.BLOCKING_OUTPUT_STREAM) { + TestAsyncBodyForBlockingOutputStream body = (TestAsyncBodyForBlockingOutputStream) requestBody; + CancellableOutputStream outputStream = + ((BlockingOutputStreamAsyncRequestBody) body.getAsyncRequestBody()).outputStream(); + body.bodyWrite.accept(outputStream); + outputStream.close(); + } + } + + protected enum BodyType { + INPUTSTREAM_RESETABLE, + INPUTSTREAM_NOT_RESETABLE, + INPUTSTREAM_NO_LENGTH, + + STRING, + + FILE, + + CONTENT_PROVIDER_WITH_LENGTH, + + CONTENT_PROVIDER_NO_LENGTH, + + BYTES, + BYTE_BUFFER, + REMAINING_BYTE_BUFFER, + + BUFFERS, + BUFFERS_REMAINING, + + BLOCKING_INPUT_STREAM, + BLOCKING_OUTPUT_STREAM + } + + protected enum ContentSize { + SMALL, + LARGE; + + byte[] byteContent() { + switch (this) { + case SMALL: + return smallContent; + case LARGE: + return largeContent; + default: + throw new IllegalArgumentException("not supported ContentSize " + this); + } + } + + String stringContent() { + switch (this) { + case SMALL: + return "Hello World!"; + case LARGE: + return new String(largeContent(), StandardCharsets.UTF_8); + default: + throw new IllegalArgumentException("not supported ContentSize " + this); + } + } + + Path fileContent() { + switch (this) { + case SMALL: + return testFileSmall; + case LARGE: + return testFileLarge; + default: + throw new IllegalArgumentException("not supported ContentSize " + this); + } + } + + String precalculatedCrc32() { + switch (this) { + case SMALL: + return smallContentCrc32; + case LARGE: + return largeContentCrc32; + default: + throw new IllegalArgumentException("not supported ContentSize " + this); + } + } + + String precalculatedCrc32forBuffersAPI() { + switch (this) { + case SMALL: + return smallContentCRC32ForBuffersAPI; + case LARGE: + return largeContentCRC32ForBuffersAPI; + default: + throw new IllegalArgumentException("not supported ContentSize " + this); + } + } + } + + private static byte[] largeContent() { + // 60 MiB + Random r = new Random(); + byte[] b = new byte[60 * 1024 * 1024]; + r.nextBytes(b); + return b; + } + + protected static class TestRequestBody extends RequestBody { + private final long contentLength; + private final String checksum; + + protected TestRequestBody(RequestBody wrapped, long contentLength, String checksum) { + super(wrapped.contentStreamProvider(), wrapped.optionalContentLength().orElse(null), wrapped.contentType()); + this.contentLength = contentLength; + this.checksum = checksum; + } + + public long getActualContentLength() { + return contentLength; + } + + public String getChecksum() { + return checksum; + } + } + + protected static class TestAsyncBody { + private final AsyncRequestBody asyncRequestBody; + private final long actualContentLength; + private final String checksum; + private final BodyType bodyType; + + private TestAsyncBody(AsyncRequestBody asyncRequestBody, long actualContentLength, String checksum, BodyType bodyType) { + this.asyncRequestBody = asyncRequestBody; + this.actualContentLength = actualContentLength; + this.checksum = checksum; + this.bodyType = bodyType; + } + + public AsyncRequestBody getAsyncRequestBody() { + return asyncRequestBody; + } + + public long getActualContentLength() { + return actualContentLength; + } + + public String getChecksum() { + return checksum; + } + } + + protected static class TestAsyncBodyForBlockingOutputStream extends TestAsyncBody { + private final Consumer bodyWrite; + + private TestAsyncBodyForBlockingOutputStream(AsyncRequestBody asyncRequestBody, + Consumer bodyWrite, + long actualContentLength, + String checksum, + BodyType bodyType) { + super(asyncRequestBody, actualContentLength, checksum, bodyType); + this.bodyWrite = bodyWrite; + } + } + + protected static class TestAsyncBodyForBlockingInputStream extends TestAsyncBody { + private final InputStream inputStream; + + private TestAsyncBodyForBlockingInputStream(AsyncRequestBody asyncRequestBody, + InputStream inputStream, + long actualContentLength, + String checksum, + BodyType bodyType) { + super(asyncRequestBody, actualContentLength, checksum, bodyType); + this.inputStream = inputStream; + } + } + + protected static class RequestRecorder implements ExecutionInterceptor { + private final List requests = new ArrayList<>(); + + @Override + public void beforeTransmission(Context.BeforeTransmission context, ExecutionAttributes executionAttributes) { + requests.add(context.httpRequest()); + } + + public List getRequests() { + return requests; + } + } + + protected static class EnablePayloadSigningInterceptor implements ExecutionInterceptor { + @Override + public void beforeExecution(Context.BeforeExecution context, ExecutionAttributes executionAttributes) { + executionAttributes.putAttribute(S3SignerExecutionAttribute.ENABLE_PAYLOAD_SIGNING, true); + ExecutionInterceptor.super.beforeExecution(context, executionAttributes); + } + } + + protected static class NonResettableByteStream extends ByteArrayInputStream { + public NonResettableByteStream(byte[] buf) { + super(buf); + } + + @Override + public boolean markSupported() { + return false; + } + + @Override + public synchronized void reset() { + throw new UnsupportedOperationException(); + } + } + +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadSyncRegressionTesting.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadSyncRegressionTesting.java new file mode 100644 index 000000000000..b855a61a5972 --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadSyncRegressionTesting.java @@ -0,0 +1,131 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression.upload; + +import static org.assertj.core.api.Assertions.assertThat; +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.assumeNotAccessPointWithPathStyle; + +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.core.checksums.RequestChecksumCalculation; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.regression.BucketType; +import software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils; +import software.amazon.awssdk.services.s3.regression.TestCallable; +import software.amazon.awssdk.utils.Logger; + +public class UploadSyncRegressionTesting extends UploadStreamingRegressionTesting { + private static final Logger LOG = Logger.loggerFor(UploadSyncRegressionTesting.class); + + public static List testConfigs() { + return UploadConfig.testConfigs(); + } + + @ParameterizedTest + @MethodSource("testConfigs") + @Timeout(value = 120, unit = TimeUnit.SECONDS) + void putObject(UploadConfig config) throws Exception { + assumeNotAccessPointWithPathStyle(config); + + // TODO connection acquire timeout when RequestBody.fromRemainingByteBuffer is used with RequestChecksumCalculation + // .WHEN_SUPPORTED + Assumptions.assumeFalse(config.getBodyType() == BodyType.REMAINING_BYTE_BUFFER + && config.getRequestChecksumValidation() == RequestChecksumCalculation.WHEN_SUPPORTED, + "TODO: investigate connection acquire timeout when using RequestBody.fromRemainingByteBuffer" + + " with RequestChecksumCalculation.WHEN_SUPPORTED"); + + LOG.info(() -> "Running putObject with config: " + config); + + BucketType bucketType = config.getBucketType(); + + String bucket = bucketForType(bucketType); + String key = S3ChecksumsTestUtils.randomKey(); + + PutObjectRequest request = PutObjectRequest.builder() + .bucket(bucket) + .key(key) + .build(); + + + RequestRecorder recorder = new RequestRecorder(); + + ClientOverrideConfiguration.Builder overrideConfiguration = + ClientOverrideConfiguration.builder() + .addExecutionInterceptor(recorder) + .apiCallTimeout(Duration.of(30, ChronoUnit.SECONDS)); + + if (config.isPayloadSigning()) { + overrideConfiguration.addExecutionInterceptor(new EnablePayloadSigningInterceptor()); + } + + TestCallable callable = null; + try { + TestRequestBody body = getRequestBody(config.getBodyType(), config.getContentSize()); + callable = callPutObject(request, body, config, overrideConfiguration.build()); + Long actualContentLength = body.getActualContentLength(); + boolean requestBodyHasContentLength = body.optionalContentLength().isPresent(); + String actualCrc32 = body.getChecksum(); + PutObjectResponse response = callable.runnable().call(); + recordObjectToCleanup(bucketType, key); + + // We only validate when configured to WHEN_SUPPORTED since checksums are optional for PutObject + if (config.getRequestChecksumValidation() == RequestChecksumCalculation.WHEN_SUPPORTED + && response.checksumCRC32() != null) { + assertThat(response.checksumCRC32()).isEqualTo(actualCrc32); + } + + assertThat(recorder.getRequests()).isNotEmpty(); + + for (SdkHttpRequest httpRequest : recorder.getRequests()) { + // skip any non-PUT requests, e.g. GetSession for EOZ requests + if (httpRequest.method() != SdkHttpMethod.PUT) { + continue; + } + + String payloadSha = httpRequest.firstMatchingHeader("x-amz-content-sha256").get(); + if (payloadSha.startsWith("STREAMING")) { + String decodedContentLength = httpRequest.firstMatchingHeader("x-amz-decoded-content-length").get(); + assertThat(Long.parseLong(decodedContentLength)).isEqualTo(actualContentLength); + verifyChecksumResponsePayload(config, key, actualCrc32); + } else { + Optional contentLength = httpRequest.firstMatchingHeader("Content-Length"); + if (requestBodyHasContentLength) { + assertThat(Long.parseLong(contentLength.get())).isEqualTo(actualContentLength); + } + } + } + } catch (Exception e) { + LOG.info(() -> String.format("Error while executing %s. Error message: %s", config, e.getMessage())); + throw e; + } finally { + if (callable != null) { + callable.client().close(); + } + } + } + +} diff --git a/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadTransferManagerRegressionTesting.java b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadTransferManagerRegressionTesting.java new file mode 100644 index 000000000000..91d1f0a68a40 --- /dev/null +++ b/test/s3-tests/src/it/java/software/amazon/awssdk/services/s3/regression/upload/UploadTransferManagerRegressionTesting.java @@ -0,0 +1,111 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.services.s3.regression.upload; + +import static software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils.assumeNotAccessPointWithPathStyle; +import static software.amazon.awssdk.services.s3.regression.S3ClientFlavor.MULTIPART_ENABLED; + +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.regression.BucketType; +import software.amazon.awssdk.services.s3.regression.S3ChecksumsTestUtils; +import software.amazon.awssdk.services.s3.regression.S3ClientFlavor; +import software.amazon.awssdk.services.s3.regression.TestCallable; +import software.amazon.awssdk.utils.Logger; + +public class UploadTransferManagerRegressionTesting extends UploadStreamingRegressionTesting { + private static final Logger LOG = Logger.loggerFor(UploadTransferManagerRegressionTesting.class); + + public static List testConfigs() { + return UploadConfig.testConfigs(); + } + + @ParameterizedTest + @MethodSource("testConfigs") + @Timeout(value = 120, unit = TimeUnit.SECONDS) + void putObject(UploadConfig config) throws Exception { + + assumeNotAccessPointWithPathStyle(config); + + // For testing purposes, ContentProvider is Publisher for async clients + // There is no way to create AsyncRequestBody with a Publisher and also provide the content length + S3ClientFlavor flavor = MULTIPART_ENABLED; + + Assumptions.assumeFalse(config.getBodyType() == BodyType.CONTENT_PROVIDER_WITH_LENGTH, + "No way to create AsyncRequestBody by giving both an Publisher and the content length"); + + // Payload signing doesn't work correctly for async java based + // TODO(sra-identity-auth) remove when chunked encoding support is added in async code path + // MRAP requires body signing + Assumptions.assumeFalse( + config.isPayloadSigning() || config.getBucketType() == BucketType.MRAP, + "Async payload signing doesn't work with Java based clients"); + + LOG.info(() -> "Running putObject with config: " + config); + + BucketType bucketType = config.getBucketType(); + + String bucket = bucketForType(bucketType); + String key = S3ChecksumsTestUtils.randomKey(); + + PutObjectRequest request = PutObjectRequest.builder() + .bucket(bucket) + .key(key) + .build(); + + + RequestRecorder recorder = new RequestRecorder(); + + ClientOverrideConfiguration.Builder overrideConfiguration = + ClientOverrideConfiguration.builder() + .addExecutionInterceptor(recorder) + .apiCallTimeout(Duration.of(30, ChronoUnit.SECONDS)); + + if (config.isPayloadSigning()) { + overrideConfiguration.addExecutionInterceptor(new EnablePayloadSigningInterceptor()); + } + + TestCallable callable = null; + try { + + TestAsyncBody body = getAsyncRequestBody(config.getBodyType(), config.getContentSize()); + callable = callTmUpload(request, flavor, body, config, overrideConfiguration.build()); + String actualCrc32 = body.getChecksum(); + + PutObjectResponse response = callable.runnable().call(); + + recordObjectToCleanup(bucketType, key); + } catch (Exception e) { + LOG.info(() -> String.format("Error while executing %s. Error message: %s", config, e.getMessage())); + throw e; + } finally { + if (callable != null) { + callable.client().close(); + } + } + } + + +} diff --git a/test/s3-tests/src/it/resources/log4j2.xml b/test/s3-tests/src/it/resources/log4j2.xml index 0ae8e7c45738..e1d4b93a676d 100644 --- a/test/s3-tests/src/it/resources/log4j2.xml +++ b/test/s3-tests/src/it/resources/log4j2.xml @@ -24,6 +24,7 @@ + @@ -31,4 +32,4 @@ - \ No newline at end of file + diff --git a/test/s3-tests/src/test/resources/log4j2.xml b/test/s3-tests/src/test/resources/log4j2.xml index 82f3e09ef895..cf0f78f4ca54 100644 --- a/test/s3-tests/src/test/resources/log4j2.xml +++ b/test/s3-tests/src/test/resources/log4j2.xml @@ -24,12 +24,13 @@ + - + diff --git a/test/sdk-benchmarks/pom.xml b/test/sdk-benchmarks/pom.xml index f8bc89724824..13c633dfef77 100644 --- a/test/sdk-benchmarks/pom.xml +++ b/test/sdk-benchmarks/pom.xml @@ -19,7 +19,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml diff --git a/test/sdk-native-image-test/pom.xml b/test/sdk-native-image-test/pom.xml index 849a287fbe21..c73f12bd4788 100644 --- a/test/sdk-native-image-test/pom.xml +++ b/test/sdk-native-image-test/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/service-test-utils/pom.xml b/test/service-test-utils/pom.xml index 9b4c39db38da..37a8293ef861 100644 --- a/test/service-test-utils/pom.xml +++ b/test/service-test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml service-test-utils diff --git a/test/stability-tests/pom.xml b/test/stability-tests/pom.xml index f0e6d2ee209e..9ed9880cddc0 100644 --- a/test/stability-tests/pom.xml +++ b/test/stability-tests/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/test-utils/pom.xml b/test/test-utils/pom.xml index 02d270216e54..1b316055f004 100644 --- a/test/test-utils/pom.xml +++ b/test/test-utils/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml test-utils diff --git a/test/tests-coverage-reporting/pom.xml b/test/tests-coverage-reporting/pom.xml index 98bf44c1de80..869694b40036 100644 --- a/test/tests-coverage-reporting/pom.xml +++ b/test/tests-coverage-reporting/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/test/v2-migration-tests/pom.xml b/test/v2-migration-tests/pom.xml index abc1b4d02ebb..f42d7bc9a057 100644 --- a/test/v2-migration-tests/pom.xml +++ b/test/v2-migration-tests/pom.xml @@ -22,7 +22,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../.. @@ -163,6 +163,17 @@ true + + maven-surefire-plugin + + + ${v2.migration.tests.skip} + + ${argLine} + false + 2 + + diff --git a/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/after/src/main/java/foo/bar/TransferManagerS3.java b/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/after/src/main/java/foo/bar/TransferManagerS3.java index 03a160657e8c..c4971ff63b1e 100644 --- a/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/after/src/main/java/foo/bar/TransferManagerS3.java +++ b/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/after/src/main/java/foo/bar/TransferManagerS3.java @@ -15,31 +15,48 @@ package foo.bar; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.model.CopyObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.transfer.s3.S3TransferManager; import software.amazon.awssdk.transfer.s3.model.Copy; import software.amazon.awssdk.transfer.s3.model.CopyRequest; +import software.amazon.awssdk.transfer.s3.model.DirectoryDownload; +import software.amazon.awssdk.transfer.s3.model.DirectoryUpload; +import software.amazon.awssdk.transfer.s3.model.DownloadDirectoryRequest; import software.amazon.awssdk.transfer.s3.model.DownloadFileRequest; import software.amazon.awssdk.transfer.s3.model.FileDownload; +import software.amazon.awssdk.transfer.s3.model.FileUpload; +import software.amazon.awssdk.transfer.s3.model.ResumableFileDownload; +import software.amazon.awssdk.transfer.s3.model.ResumableFileUpload; +import software.amazon.awssdk.transfer.s3.model.ResumableTransfer; +import software.amazon.awssdk.transfer.s3.model.UploadDirectoryRequest; import software.amazon.awssdk.transfer.s3.model.UploadFileRequest; import software.amazon.awssdk.transfer.s3.model.UploadRequest; +import software.amazon.awssdk.transfer.s3.progress.TransferProgress; import java.io.File; +import java.io.IOException; +import java.io.OutputStream; import java.time.Duration; public class TransferManagerS3 { File file = new File("path/to/file.txt"); - void tmConstructor() { + void tmConstructor(AwsCredentials credentials, AwsCredentialsProvider credentialsProvider) { S3TransferManager tm = S3TransferManager.builder() .build(); S3TransferManager tmBuilderDefault = S3TransferManager.create(); S3TransferManager tmBuilderWithS3 = S3TransferManager.builder().build(); + S3TransferManager tmConstructorWithCred = S3TransferManager.builder().s3Client(S3AsyncClient.builder().credentialsProvider(StaticCredentialsProvider.create(credentials)).build()).build(); + S3TransferManager tmConstructorWithCredProvider = S3TransferManager.builder().s3Client(S3AsyncClient.builder().credentialsProvider(credentialsProvider).build()).build(); } void download(S3TransferManager tm, String bucket, String key) { @@ -76,4 +93,25 @@ void copy(S3TransferManager tm, String sourceBucket, String sourceKey, String de .build(); Copy copy2 = tm.copy(CopyRequest.builder().copyObjectRequest(copyRequest).build()); } -} + + void downloadDirectory(S3TransferManager tm, File destination) { + DirectoryDownload fileDownload = tm.downloadDirectory(DownloadDirectoryRequest.builder().bucket("bucket").listObjectsV2RequestTransformer(builder -> builder.prefix("key")).destination(destination.toPath()).build()); + tm.close(); + } + + void uploadDirectory(S3TransferManager tm) { + DirectoryUpload fileUpload1 = tm.uploadDirectory(UploadDirectoryRequest.builder().bucket("bucket").s3Prefix("prefix").source(file.toPath()).maxDepth(true ? Integer.MAX_VALUE : 1).build()); + } + + void resume(S3TransferManager tm, ResumableFileDownload persistableDownload, ResumableFileUpload persistableUpload) { + FileDownload download = tm.resumeDownloadFile(persistableDownload); + FileUpload upload = tm.resumeUploadFile(persistableUpload); + } + + void POJO_methods(ResumableTransfer transfer, OutputStream outputStream, TransferProgress progress) throws IOException { + String s = transfer.serializeToString(); + transfer.serializeToOutputStream(outputStream); + + long bytesTransferred = progress.snapshot().transferredBytes(); + } +} \ No newline at end of file diff --git a/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/before/src/main/java/foo/bar/TransferManagerS3.java b/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/before/src/main/java/foo/bar/TransferManagerS3.java index 49975abfefed..8657cfd51f5c 100644 --- a/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/before/src/main/java/foo/bar/TransferManagerS3.java +++ b/test/v2-migration-tests/src/test/resources/software/amazon/awssdk/v2migrationtests/maven-tm/before/src/main/java/foo/bar/TransferManagerS3.java @@ -15,23 +15,36 @@ package foo.bar; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.s3.model.CopyObjectRequest; import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.transfer.Copy; import com.amazonaws.services.s3.transfer.Download; +import com.amazonaws.services.s3.transfer.MultipleFileDownload; +import com.amazonaws.services.s3.transfer.MultipleFileUpload; +import com.amazonaws.services.s3.transfer.PersistableDownload; +import com.amazonaws.services.s3.transfer.PersistableTransfer; +import com.amazonaws.services.s3.transfer.PersistableUpload; import com.amazonaws.services.s3.transfer.TransferManager; import com.amazonaws.services.s3.transfer.TransferManagerBuilder; +import com.amazonaws.services.s3.transfer.TransferProgress; +import com.amazonaws.services.s3.transfer.Upload; import java.io.File; +import java.io.IOException; +import java.io.OutputStream; public class TransferManagerS3 { File file = new File("path/to/file.txt"); - void tmConstructor() { + void tmConstructor(AWSCredentials credentials, AWSCredentialsProvider credentialsProvider) { TransferManager tm = new TransferManager(); TransferManager tmBuilderDefault = TransferManagerBuilder.defaultTransferManager(); TransferManager tmBuilderWithS3 = TransferManagerBuilder.standard().build(); + TransferManager tmConstructorWithCred = new TransferManager(credentials); + TransferManager tmConstructorWithCredProvider = new TransferManager(credentialsProvider); } void download(TransferManager tm, String bucket, String key) { @@ -64,4 +77,25 @@ void copy(TransferManager tm, String sourceBucket, String sourceKey, String dest CopyObjectRequest copyRequest = new CopyObjectRequest(sourceBucket, sourceKey, destinationBucket, destinationKey); Copy copy2 = tm.copy(copyRequest); } -} + + void downloadDirectory(TransferManager tm, File destination) { + MultipleFileDownload fileDownload = tm.downloadDirectory("bucket", "key", destination); + tm.shutdownNow(); + } + + void uploadDirectory(TransferManager tm) { + MultipleFileUpload fileUpload1 = tm.uploadDirectory("bucket", "prefix", file, true); + } + + void resume(TransferManager tm, PersistableDownload persistableDownload, PersistableUpload persistableUpload) { + Download download = tm.resumeDownload(persistableDownload); + Upload upload = tm.resumeUpload(persistableUpload); + } + + void POJO_methods(PersistableTransfer transfer, OutputStream outputStream, TransferProgress progress) throws IOException { + String s = transfer.serialize(); + transfer.serialize(outputStream); + + long bytesTransferred = progress.getBytesTransferred(); + } +} \ No newline at end of file diff --git a/third-party/pom.xml b/third-party/pom.xml index af90d40a15df..ce9e42290959 100644 --- a/third-party/pom.xml +++ b/third-party/pom.xml @@ -21,7 +21,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT third-party diff --git a/third-party/third-party-jackson-core/pom.xml b/third-party/third-party-jackson-core/pom.xml index 9988ef3072de..835929bb3530 100644 --- a/third-party/third-party-jackson-core/pom.xml +++ b/third-party/third-party-jackson-core/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/third-party/third-party-jackson-dataformat-cbor/pom.xml b/third-party/third-party-jackson-dataformat-cbor/pom.xml index cbc77cf840c4..3a61893332d2 100644 --- a/third-party/third-party-jackson-dataformat-cbor/pom.xml +++ b/third-party/third-party-jackson-dataformat-cbor/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/third-party/third-party-slf4j-api/pom.xml b/third-party/third-party-slf4j-api/pom.xml index b55ebfd53bc5..dbe655d7b7ef 100644 --- a/third-party/third-party-slf4j-api/pom.xml +++ b/third-party/third-party-slf4j-api/pom.xml @@ -20,7 +20,7 @@ third-party software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 diff --git a/utils/pom.xml b/utils/pom.xml index 23bce848125b..328b208b9af1 100644 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -20,7 +20,7 @@ aws-sdk-java-pom software.amazon.awssdk - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT 4.0.0 @@ -130,6 +130,11 @@ rxjava test + + nl.jqno.equalsverifier + equalsverifier + test + diff --git a/utils/src/main/java/software/amazon/awssdk/utils/cache/bounded/BoundedCache.java b/utils/src/main/java/software/amazon/awssdk/utils/cache/bounded/BoundedCache.java new file mode 100644 index 000000000000..37b22594c4df --- /dev/null +++ b/utils/src/main/java/software/amazon/awssdk/utils/cache/bounded/BoundedCache.java @@ -0,0 +1,126 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils.cache.bounded; + +import java.util.Iterator; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.annotations.ThreadSafe; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.Validate; + +/** + * A thread-safe cache implementation that returns the value for a specified key, + * retrieving it by either getting the stored value from the cache or using a supplied function to calculate that value + * and add it to the cache. + *

        + * When the cache is full, a new value will push out an unspecified value. + *

        + * The user can configure the maximum size of the cache, which is set to a default of 100. + *

        + * Null values are not cached. + */ +@SdkProtectedApi +@ThreadSafe +public final class BoundedCache { + + private static final Logger log = Logger.loggerFor(BoundedCache.class); + + private static final int DEFAULT_SIZE = 100; + + private final ConcurrentHashMap cache; + private final Function valueSupplier; + private final int maxCacheSize; + private final Object cacheLock; + + private BoundedCache(Builder builder) { + this.valueSupplier = builder.supplier; + this.maxCacheSize = builder.maxSize != null ? + Validate.isPositive(builder.maxSize, "maxSize") + : DEFAULT_SIZE; + this.cache = new ConcurrentHashMap<>(); + this.cacheLock = new Object(); + } + + /** + * Get a value based on the key. If the value exists in the cache, it's returned. + * Otherwise, the value is calculated based on the supplied function {@link Builder#builder(Function)}. + */ + public V get(K key) { + V value = cache.get(key); + if (value != null) { + return value; + } + + V newValue = valueSupplier.apply(key); + if (newValue == null) { + return null; + } + + synchronized (cacheLock) { + value = cache.get(key); + if (value != null) { + return value; + } + + if (cache.size() >= maxCacheSize) { + cleanup(); + } + + cache.put(key, newValue); + return newValue; + } + } + + /** + * Clean up the cache by removing an unspecified entry + */ + private void cleanup() { + Iterator iterator = cache.keySet().iterator(); + if (iterator.hasNext()) { + K key = iterator.next(); + cache.remove(key); + } + } + + public int size() { + return cache.size(); + } + + public static BoundedCache.Builder builder(Function supplier) { + return new Builder<>(supplier); + } + + public static final class Builder { + + private final Function supplier; + private Integer maxSize; + + private Builder(Function supplier) { + this.supplier = supplier; + } + + public Builder maxSize(Integer maxSize) { + this.maxSize = maxSize; + return this; + } + + public BoundedCache build() { + return new BoundedCache<>(this); + } + } +} \ No newline at end of file diff --git a/utils/src/main/java/software/amazon/awssdk/utils/cache/lru/LruCache.java b/utils/src/main/java/software/amazon/awssdk/utils/cache/lru/LruCache.java index df7bc222d261..e5bc23dca833 100644 --- a/utils/src/main/java/software/amazon/awssdk/utils/cache/lru/LruCache.java +++ b/utils/src/main/java/software/amazon/awssdk/utils/cache/lru/LruCache.java @@ -168,6 +168,10 @@ public int size() { return cache.size(); } + public boolean containsKey(K key) { + return cache.containsKey(key); + } + public static LruCache.Builder builder(Function supplier) { return new Builder<>(supplier); } diff --git a/utils/src/main/java/software/amazon/awssdk/utils/uri/SdkUri.java b/utils/src/main/java/software/amazon/awssdk/utils/uri/SdkUri.java new file mode 100644 index 000000000000..5093f4d10239 --- /dev/null +++ b/utils/src/main/java/software/amazon/awssdk/utils/uri/SdkUri.java @@ -0,0 +1,277 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils.uri; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Objects; +import software.amazon.awssdk.annotations.SdkProtectedApi; +import software.amazon.awssdk.utils.Lazy; +import software.amazon.awssdk.utils.Logger; +import software.amazon.awssdk.utils.cache.bounded.BoundedCache; +import software.amazon.awssdk.utils.uri.internal.UriConstructorArgs; + +/** + * Global cache for account-id based URI. Prevent calling new URI constructor for the same string, which can cause performance + * issues with some uri pattern. Do not directly depend on this class, it will be removed in the future. + */ +@SdkProtectedApi +public final class SdkUri { + private static final Logger log = Logger.loggerFor(SdkUri.class); + + private static final String HTTPS_PREFIX = "https://"; + private static final String HTTP_PREFIX = "http://"; + private static final int MAX_INT_DIGITS_BASE_10 = 10; + + /* + * The default BoundedCache size is 100, but for a single service call we cache at least 3 different URIs so the cache size is + * increased a bit to account for the different URIs. + */ + private static final int CACHE_SIZE = 150; + + private static final Lazy INSTANCE = new Lazy<>(SdkUri::new); + + private final BoundedCache cache; + + private SdkUri() { + this.cache = BoundedCache.builder(UriConstructorArgs::newInstance) + .maxSize(CACHE_SIZE) + .build(); + } + + public static SdkUri getInstance() { + return INSTANCE.getValue(); + } + + public URI create(String s) { + if (!isAccountIdUri(s)) { + log.trace(() -> "skipping cache for uri " + s); + return URI.create(s); + } + StringConstructorArgs key = new StringConstructorArgs(s); + URI uri = cache.get(key); + return uri; + } + + public URI newUri(String s) throws URISyntaxException { + if (!isAccountIdUri(s)) { + log.trace(() -> "skipping cache for uri " + s); + return new URI(s); + } + try { + StringConstructorArgs key = new StringConstructorArgs(s); + URI uri = cache.get(key); + return uri; + } catch (IllegalArgumentException e) { + // URI.create() wraps the URISyntaxException thrown by new URI in a IllegalArgumentException, we need to unwrap it + if (e.getCause() instanceof URISyntaxException) { + throw (URISyntaxException) e.getCause(); + } + throw e; + } + } + + public URI newUri(String scheme, + String userInfo, String host, int port, + String path, String query, String fragment) throws URISyntaxException { + if (!isAccountIdUri(host)) { + log.trace(() -> "skipping cache for host " + host); + return new URI(scheme, userInfo, host, port, path, query, fragment); + } + try { + HostConstructorArgs key = new HostConstructorArgs(scheme, userInfo, host, port, path, query, fragment); + URI uri = cache.get(key); + return uri; + } catch (IllegalArgumentException e) { + if (e.getCause() instanceof URISyntaxException) { + throw (URISyntaxException) e.getCause(); + } + throw e; + } + } + + public URI newUri(String scheme, + String authority, + String path, String query, String fragment) throws URISyntaxException { + if (!isAccountIdUri(authority)) { + log.trace(() -> "skipping cache for authority " + authority); + return new URI(scheme, authority, path, query, fragment); + } + try { + AuthorityConstructorArgs key = new AuthorityConstructorArgs(scheme, authority, path, query, fragment); + URI uri = cache.get(key); + return uri; + } catch (IllegalArgumentException e) { + if (e.getCause() instanceof URISyntaxException) { + throw (URISyntaxException) e.getCause(); + } + throw e; + } + } + + /* + * Best-effort check for uri string being account-id based. + * + * The troublesome uris are of the form 'https://123456789012.ddb.us-east-1.amazonaws.com' The heuristic chosen to detect such + * candidate URI is to check the first char after the scheme, and then the char 10 places further down the string. If both + * are digits, there is a potential for that string to represent a number that would exceed the value of Integer.MAX_VALUE, + * which would cause the performance degradation observed with such URIs. + */ + private boolean isAccountIdUri(String s) { + int firstCharAfterScheme = 0; + if (s.startsWith(HTTPS_PREFIX)) { + firstCharAfterScheme = HTTPS_PREFIX.length(); + } else if (s.startsWith(HTTP_PREFIX)) { + firstCharAfterScheme = HTTP_PREFIX.length(); + } + + if (s.length() > firstCharAfterScheme + MAX_INT_DIGITS_BASE_10) { + return Character.isDigit(s.charAt(firstCharAfterScheme)) + && Character.isDigit(s.charAt(firstCharAfterScheme + MAX_INT_DIGITS_BASE_10)); + } + return false; + } + + private static final class StringConstructorArgs implements UriConstructorArgs { + private final String str; + + private StringConstructorArgs(String str) { + this.str = str; + } + + @Override + public URI newInstance() { + return URI.create(str); + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + + StringConstructorArgs that = (StringConstructorArgs) o; + return Objects.equals(str, that.str); + } + + @Override + public int hashCode() { + return Objects.hashCode(str); + } + } + + private static final class HostConstructorArgs implements UriConstructorArgs { + private final String scheme; + private final String userInfo; + private final String host; + private final int port; + private final String path; + private final String query; + private final String fragment; + + private HostConstructorArgs(String scheme, + String userInfo, String host, int port, + String path, String query, String fragment) { + this.scheme = scheme; + this.userInfo = userInfo; + this.host = host; + this.port = port; + this.path = path; + this.query = query; + this.fragment = fragment; + } + + @Override + public URI newInstance() { + try { + return new URI(scheme, userInfo, host, port, path, query, fragment); + } catch (URISyntaxException x) { + throw new IllegalArgumentException(x.getMessage(), x); + } + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + + HostConstructorArgs that = (HostConstructorArgs) o; + return port == that.port && Objects.equals(scheme, that.scheme) && Objects.equals(userInfo, that.userInfo) + && Objects.equals(host, that.host) && Objects.equals(path, that.path) && Objects.equals(query, that.query) + && Objects.equals(fragment, that.fragment); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(scheme); + result = 31 * result + Objects.hashCode(userInfo); + result = 31 * result + Objects.hashCode(host); + result = 31 * result + port; + result = 31 * result + Objects.hashCode(path); + result = 31 * result + Objects.hashCode(query); + result = 31 * result + Objects.hashCode(fragment); + return result; + } + } + + private static final class AuthorityConstructorArgs implements UriConstructorArgs { + private final String scheme; + private final String authority; + private final String path; + private final String query; + private final String fragment; + + private AuthorityConstructorArgs(String scheme, String authority, String path, String query, String fragment) { + this.scheme = scheme; + this.authority = authority; + this.path = path; + this.query = query; + this.fragment = fragment; + } + + @Override + public URI newInstance() { + try { + return new URI(scheme, authority, path, query, fragment); + } catch (URISyntaxException x) { + throw new IllegalArgumentException(x.getMessage(), x); + } + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + + AuthorityConstructorArgs that = (AuthorityConstructorArgs) o; + return Objects.equals(scheme, that.scheme) && Objects.equals(authority, that.authority) + && Objects.equals(path, that.path) && Objects.equals(query, that.query) + && Objects.equals(fragment, that.fragment); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(scheme); + result = 31 * result + Objects.hashCode(authority); + result = 31 * result + Objects.hashCode(path); + result = 31 * result + Objects.hashCode(query); + result = 31 * result + Objects.hashCode(fragment); + return result; + } + } +} diff --git a/utils/src/main/java/software/amazon/awssdk/utils/uri/internal/UriConstructorArgs.java b/utils/src/main/java/software/amazon/awssdk/utils/uri/internal/UriConstructorArgs.java new file mode 100644 index 000000000000..86251e30d42d --- /dev/null +++ b/utils/src/main/java/software/amazon/awssdk/utils/uri/internal/UriConstructorArgs.java @@ -0,0 +1,38 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils.uri.internal; + +import java.net.URI; +import software.amazon.awssdk.annotations.SdkInternalApi; + +/** + * Represent the different constructor to the URI class used by the SDK. Implementation of this interface are able to create new + * URIs based on the different arguments passed to classes to them. + * + * @see URI#create(String) + * @see URI#URI(String, String, String, String, String) + * @see URI#URI(String, String, String, int, String, String, String) + */ +@SdkInternalApi +public interface UriConstructorArgs { + + /** + * Creates a new instance of the URI. Can return a new instance everytime it is called. + * + * @return a new URI instance + */ + URI newInstance(); +} diff --git a/utils/src/test/java/software/amazon/awssdk/utils/SdkUriTest.java b/utils/src/test/java/software/amazon/awssdk/utils/SdkUriTest.java new file mode 100644 index 000000000000..f738883b3f27 --- /dev/null +++ b/utils/src/test/java/software/amazon/awssdk/utils/SdkUriTest.java @@ -0,0 +1,305 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.utils; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assertions.*; + +import java.lang.reflect.Field; +import java.net.URI; +import java.net.URISyntaxException; +import nl.jqno.equalsverifier.EqualsVerifier; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.junit.platform.commons.util.ReflectionUtils; +import org.opentest4j.AssertionFailedError; +import software.amazon.awssdk.utils.cache.bounded.BoundedCache; +import software.amazon.awssdk.utils.uri.SdkUri; +import software.amazon.awssdk.utils.uri.internal.UriConstructorArgs; + +class SdkUriTest { + + @AfterEach + void resetCache() throws IllegalAccessException { + Field cacheField = getCacheField(); + cacheField.setAccessible(true); + cacheField.set(SdkUri.getInstance(), BoundedCache.builder(UriConstructorArgs::newInstance) + .maxSize(100) + .build()); + } + + @ParameterizedTest + @ValueSource(strings = {"https://123456789012.ddb.us-east-1.amazonaws.com", + "http://123456789012.ddb.us-east-1.amazonaws.com"}) + void multipleCreate_simpleURI_SameStringConstructor_ShouldCacheOnlyOnce(String strURI) { + URI uri = SdkUri.getInstance().create(strURI); + String scheme = strURI.startsWith("https") ? "https" : "http"; + assertThat(uri).hasHost("123456789012.ddb.us-east-1.amazonaws.com") + .hasScheme(scheme) + .hasNoParameters() + .hasNoPort() + .hasNoQuery(); + assertThat(getCache().size()).isEqualTo(1); + URI uri2 = SdkUri.getInstance().create(strURI); + assertThat(getCache().size()).isEqualTo(1); + assertThat(uri).isSameAs(uri2); + } + + @ParameterizedTest + @ValueSource(strings = {"http", "https"}) + void multipleCreate_FullUri_SameConstructor_ShouldCacheOnlyOne(String scheme) { + String strURI = scheme + "://123456789012.ddb.us-east-1.amazonaws.com:322/some/path?foo=bar#test"; + URI uri = SdkUri.getInstance().create(strURI); + assertThat(uri).hasHost("123456789012.ddb.us-east-1.amazonaws.com") + .hasScheme(scheme) + .hasNoUserInfo() + .hasPort(322) + .hasPath("/some/path") + .hasQuery("foo=bar") + .hasFragment("test"); + + assertThat(getCache().size()).isEqualTo(1); + URI uri2 = SdkUri.getInstance().create(strURI); + assertThat(getCache().size()).isEqualTo(1); + assertThat(uri).isSameAs(uri2); + + } + + @Test + void multipleCreate_withDifferentStringConstructor_shouldCacheOnlyOnce() { + String[] strURIs = { + "https://123456789012.ddb.us-east-1.amazonaws.com", + "https://123456789013.ddb.us-east-1.amazonaws.com", + "https://123456789014.ddb.us-east-1.amazonaws.com", + "https://123456789015.ddb.us-east-1.amazonaws.com", + "https://123456789016.ddb.us-east-1.amazonaws.com", + "https://123456789017.ddb.us-east-1.amazonaws.com", + "https://123456789018.ddb.us-east-1.amazonaws.com", + "https://123456789019.ddb.us-east-1.amazonaws.com", + }; + for (String uri : strURIs) { + URI u = SdkUri.getInstance().create(uri); + } + assertThat(getCache().size()).isEqualTo(8); + } + + @ParameterizedTest + @ValueSource(strings = {"http", "https"}) + void multipleNewUriWithNulls_SameAuthorityConstructor_ShouldCacheOnlyOnce(String scheme) throws URISyntaxException { + String strURI = "123456789012.ddb.us-east-1.amazonaws.com"; + URI uri = SdkUri.getInstance().newUri(scheme, strURI, null, null, null); + assertThat(uri).hasHost("123456789012.ddb.us-east-1.amazonaws.com") + .hasScheme(scheme) + .hasNoParameters() + .hasNoPort() + .hasNoQuery(); + assertThat(getCache().size()).isEqualTo(1); + URI uri2 = SdkUri.getInstance().newUri(scheme, strURI, null, null, null); + assertThat(getCache().size()).isEqualTo(1); + assertThat(uri).isSameAs(uri2); + } + + @ParameterizedTest + @ValueSource(strings = {"http", "https"}) + void multipleNewUri_SameAuthorityConstructor_ShouldCacheOnlyOnce(String scheme) throws URISyntaxException { + String strURI = "123456789012.ddb.us-east-1.amazonaws.com"; + URI uri = SdkUri.getInstance().newUri(scheme, strURI, "/somePath/to/resource", "foo=bar", "test"); + assertThat(uri).hasHost(strURI) + .hasPath("/somePath/to/resource") + .hasQuery("foo=bar") + .hasFragment("test") + .hasScheme(scheme); + assertThat(getCache().size()).isEqualTo(1); + URI uri2 = SdkUri.getInstance().newUri(scheme, strURI, "/somePath/to/resource", "foo=bar", "test"); + assertThat(getCache().size()).isEqualTo(1); + assertThat(uri).isSameAs(uri2); + } + + @ParameterizedTest + @ValueSource(strings = {"http", "https"}) + void multipleNewUri_DifferentAuthorityConstructor_ShouldCacheAll(String scheme) throws URISyntaxException { + String strURI = "123456789012.ddb.us-east-1.amazonaws.com"; + URI uri = SdkUri.getInstance().newUri(scheme, strURI, "/somePath/to/resource", "foo=bar", "test"); + assertThat(uri).hasHost(strURI) + .hasPath("/somePath/to/resource") + .hasQuery("foo=bar") + .hasFragment("test") + .hasScheme(scheme); + assertThat(getCache().size()).isEqualTo(1); + URI uri2 = SdkUri.getInstance().newUri(scheme, strURI, "/some/otherPath/to/resource", null, "test2"); + assertThat(getCache().size()).isEqualTo(2); + assertThat(uri).isNotSameAs(uri2); + } + + @ParameterizedTest + @ValueSource(strings = {"http", "https"}) + void multipleNewUriWithNulls_SameHostConstructor_ShouldCacheOnlyOnce(String scheme) throws URISyntaxException { + String strURI = "123456789012.ddb.us-east-1.amazonaws.com"; + URI uri = SdkUri.getInstance().newUri(scheme, null, strURI, 322, null, null, null); + assertThat(uri).hasHost("123456789012.ddb.us-east-1.amazonaws.com") + .hasNoParameters() + .hasPort(322) + .hasNoQuery(); + assertThat(getCache().size()).isEqualTo(1); + URI uri2 = SdkUri.getInstance().newUri(scheme, null, strURI, 322, null, null, null); + assertThat(getCache().size()).isEqualTo(1); + assertThat(uri).isSameAs(uri2); + } + + @ParameterizedTest + @ValueSource(strings = {"http", "https"}) + void multipleNewUri_SameHostConstructor_ShouldCacheOnlyOnce(String scheme) throws URISyntaxException { + String strURI = "123456789012.ddb.us-east-1.amazonaws.com"; + URI uri = SdkUri.getInstance().newUri(scheme, "user1", strURI, 322, "/some/path", "foo=bar", "test"); + assertThat(uri).hasHost("123456789012.ddb.us-east-1.amazonaws.com") + .hasScheme(scheme) + .hasUserInfo("user1") + .hasPort(322) + .hasPath("/some/path") + .hasQuery("foo=bar") + .hasFragment("test"); + assertThat(getCache().size()).isEqualTo(1); + URI uri2 = SdkUri.getInstance().newUri(scheme, "user1", strURI, 322, "/some/path", "foo=bar", "test"); + assertThat(getCache().size()).isEqualTo(1); + assertThat(uri).isSameAs(uri2); + } + + @ParameterizedTest + @ValueSource(strings = {"http", "https"}) + void multipleNewUri_DifferentHostConstructor_ShouldCacheOnlyOnce(String scheme) throws URISyntaxException { + String strURI = "123456789012.ddb.us-east-1.amazonaws.com"; + URI uri = SdkUri.getInstance().newUri(scheme, "user1", strURI, 322, "/some/path", "foo=bar", "test"); + assertThat(uri).hasHost("123456789012.ddb.us-east-1.amazonaws.com") + .hasScheme(scheme) + .hasUserInfo("user1") + .hasPort(322) + .hasPath("/some/path") + .hasQuery("foo=bar") + .hasFragment("test"); + assertThat(getCache().size()).isEqualTo(1); + URI uri2 = SdkUri.getInstance().newUri(scheme, "user1", strURI, 322, "/some/other/path", "foo=bar", "test2"); + assertThat(getCache().size()).isEqualTo(2); + assertThat(uri).isNotSameAs(uri2); + } + + @Test + void notCached_shouldCreateNewInstance() { + String strURI = "https://ddb.us-east-1.amazonaws.com"; + URI uri = SdkUri.getInstance().create(strURI); + assertThat(uri).hasHost("ddb.us-east-1.amazonaws.com") + .hasNoParameters() + .hasNoPort() + .hasNoQuery(); + assertThat(getCache().size()).isEqualTo(0); + URI uri2 = SdkUri.getInstance().create(strURI); + assertThat(getCache().size()).isEqualTo(0); + assertThat(uri).isNotSameAs(uri2); + } + + @ParameterizedTest + @ValueSource(strings = {"potatoes tomatoes", "123412341234 potatoes tomatoes"}) + void malformedURI_shouldThrowsSameExceptionAsUriClass(String malformedUri) { + + assertThatThrownBy(() -> SdkUri.getInstance().create(malformedUri)) + .as("Malformed uri should throw IllegalArgumentException using the create method") + .isInstanceOf(IllegalArgumentException.class); + assertThat(getCache().size()).as("Cache should be empty if create URI fails") + .isEqualTo(0); + + assertThatThrownBy(() -> SdkUri.getInstance().newUri(malformedUri)) + .as("Malformed uri should throw URISyntaxException using the newURI method") + .isInstanceOf(URISyntaxException.class); + assertThat(getCache().size()).as("Cache should be empty if create URI fails") + .isEqualTo(0); + + assertThatThrownBy(() -> SdkUri.getInstance().newUri("scheme", malformedUri, "path", "query", "fragment")) + .as("Malformed uri should throw URISyntaxException using the newURI with authority method") + .isInstanceOf(URISyntaxException.class); + assertThat(getCache().size()).as("Cache should be empty if create URI fails") + .isEqualTo(0); + + assertThatThrownBy(() -> new URI("scheme", malformedUri, "path", "query", "fragment")) + .as("CONSTRUCTOR") + .isInstanceOf(URISyntaxException.class); + assertThat(getCache().size()).as("Cache should be empty if create URI fails") + .isEqualTo(0); + + + assertThatThrownBy(() -> SdkUri.getInstance().newUri("scheme", "userInfo", malformedUri, + 444, "path", "query", "fragment")) + .as("Malformed uri should throw URISyntaxException using the newURI with host method") + .isInstanceOf(URISyntaxException.class); + assertThat(getCache().size()).as("Cache should be empty if create URI fails") + .isEqualTo(0); + } + + @ParameterizedTest + @ValueSource(strings = { + "http://123456789.ddb.com", + "https://123456789.ddb.com", + "123456789.ddb.com", + "http://123.ddb.com", + "https://123.ddb.com", + "123.ddb.com", + "http://123z.ddb.com", + "https://123z.ddb.com", + "123z.ddb.com", + "http://1", + "https://1", + "1", + "http://z", + "https://z", + "z" + }) + void shouldNotCache_whenLeadingDigitsDoNotExceedIntegerMaxValue(String strURI) { + URI uri = SdkUri.getInstance().create(strURI); + assertThat(getCache().size()).isEqualTo(0); + URI uri2 = SdkUri.getInstance().create(strURI); + assertThat(getCache().size()).isEqualTo(0); + assertThat(uri).isNotSameAs(uri2); + } + + + private BoundedCache getCache() { + Field field = getCacheField(); + field.setAccessible(true); + try { + return (BoundedCache) field.get(SdkUri.getInstance()); + } catch (IllegalAccessException e) { + fail(e); + return null; + } + } + + private Field getCacheField() { + return ReflectionUtils.streamFields(SdkUri.class, + f -> "cache".equals(f.getName()), + ReflectionUtils.HierarchyTraversalMode.TOP_DOWN) + .findFirst() + .orElseThrow(() -> new AssertionFailedError("Unexpected error - Could not find field " + + "'cache' in " + SdkUri.class.getName())); + } + + @Test + void equals_hashCode() { + EqualsVerifier.forPackage("software.amazon.awssdk.utils.uri") + .except(SdkUri.class) + .verify(); + } +} diff --git a/v2-migration/pom.xml b/v2-migration/pom.xml index b2e6cf01358a..e0d54629465e 100644 --- a/v2-migration/pom.xml +++ b/v2-migration/pom.xml @@ -21,7 +21,7 @@ software.amazon.awssdk aws-sdk-java-pom - 2.31.40-SNAPSHOT + 2.31.76-SNAPSHOT ../pom.xml diff --git a/v2-migration/src/main/java/software/amazon/awssdk/v2migration/S3AddImportsAndComments.java b/v2-migration/src/main/java/software/amazon/awssdk/v2migration/S3AddImportsAndComments.java index ff8a55c2597d..6f14bbeadb77 100644 --- a/v2-migration/src/main/java/software/amazon/awssdk/v2migration/S3AddImportsAndComments.java +++ b/v2-migration/src/main/java/software/amazon/awssdk/v2migration/S3AddImportsAndComments.java @@ -58,7 +58,6 @@ public class S3AddImportsAndComments extends Recipe { private static final MethodMatcher GET_EXPIRY_TIME = v1EnMethodMatcher("S3EventNotification.RestoreEventDataEntity " + "getLifecycleRestorationExpiryTime(..)"); - private static final Pattern CANNED_ACL = Pattern.compile(V1_S3_MODEL_PKG + "CannedAccessControlList"); private static final Pattern GET_OBJECT_REQUEST = Pattern.compile(V1_S3_MODEL_PKG + "GetObjectRequest"); private static final Pattern CREATE_BUCKET_REQUEST = Pattern.compile(V1_S3_MODEL_PKG + "CreateBucketRequest"); diff --git a/v2-migration/src/main/java/software/amazon/awssdk/v2migration/S3TmAddComments.java b/v2-migration/src/main/java/software/amazon/awssdk/v2migration/S3TmAddComments.java new file mode 100644 index 000000000000..5a1b0b527c8e --- /dev/null +++ b/v2-migration/src/main/java/software/amazon/awssdk/v2migration/S3TmAddComments.java @@ -0,0 +1,122 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.awssdk.v2migration; + +import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.V1_TM_PKG; +import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.V2_S3_CLIENT; +import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.V2_S3_MODEL_PKG; +import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.V2_TM_CLIENT; +import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.createComments; +import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.v2TmMethodMatcher; + +import java.util.regex.Pattern; +import org.openrewrite.ExecutionContext; +import org.openrewrite.Recipe; +import org.openrewrite.TreeVisitor; +import org.openrewrite.java.JavaIsoVisitor; +import org.openrewrite.java.MethodMatcher; +import org.openrewrite.java.tree.J; +import org.openrewrite.java.tree.JavaType; +import software.amazon.awssdk.annotations.SdkInternalApi; + +@SdkInternalApi +public class S3TmAddComments extends Recipe { + + private static final Pattern S3_TM = Pattern.compile(V2_TM_CLIENT); + private static final Pattern S3_CLIENT = Pattern.compile(V2_S3_CLIENT); + + private static final MethodMatcher COPY = v2TmMethodMatcher("copy(..)"); + private static final MethodMatcher DOWNLOAD = v2TmMethodMatcher(String.format("download(%sGetObjectRequest, java.io.File, " + + "%sinternal.S3ProgressListener, ..)", + V2_S3_MODEL_PKG, V1_TM_PKG)); + private static final MethodMatcher DOWNLOAD_DIRECTORY = v2TmMethodMatcher("downloadDirectory(..)"); + private static final MethodMatcher UPLOAD = v2TmMethodMatcher("upload(..)"); + private static final MethodMatcher UPLOAD_DIRECTORY = v2TmMethodMatcher("uploadDirectory(..)"); + + @Override + public String getDisplayName() { + return "Add imports and comments to unsupported S3 transfer manager transforms."; + } + + @Override + public String getDescription() { + return "Add imports and comments to unsupported S3 transfer manager transforms."; + } + + @Override + public TreeVisitor getVisitor() { + return new S3TmAddComments.Visitor(); + } + + private static class Visitor extends JavaIsoVisitor { + + @Override + public J.MethodInvocation visitMethodInvocation(J.MethodInvocation method, ExecutionContext ctx) { + if (COPY.matches(method) && (method.getArguments().size() == 2 || method.getArguments().size() == 3)) { + String comment = "Migration for TransferStateChangeListener is not supported by the migration tool. Please " + + "manually migrate the code using TransferListener in v2"; + return method.withComments(createComments(comment)); + } + if (DOWNLOAD.matches(method)) { + String comment = "Migration for S3ProgressListener is not supported by the migration tool. Please manually " + + "migrate the code using TransferListener in v2"; + return method.withComments(createComments(comment)); + } + if (DOWNLOAD_DIRECTORY.matches(method) && method.getArguments().size() > 3) { + String comment = "Migration for KeyFilter is not supported by the migration tool. Please " + + "manually migrate the code using DownloadFilter in v2"; + return method.withComments(createComments(comment)); + } + if (UPLOAD.matches(method) && method.getArguments().size() == 4) { + String comment = "Migration for InputStream and ObjectMetadata as argument for upload is not supported by the " + + "migration tool."; + return method.withComments(createComments(comment)); + } + if (UPLOAD.matches(method) && method.getArguments().size() == 2) { + String comment = "Migration for S3ProgressListener is not supported by the migration tool. Please manually " + + "migrate the code using TransferListener in v2"; + return method.withComments(createComments(comment)); + } + if (UPLOAD_DIRECTORY.matches(method) && method.getArguments().size() > 4) { + String comment = "Migration for ObjectMetadataProvider as argument for uploadDirectory is not supported by the " + + "migration tool."; + return method.withComments(createComments(comment)); + } + + return method; + } + + @Override + public J.NewClass visitNewClass(J.NewClass newClass, ExecutionContext ctx) { + JavaType type = newClass.getType(); + if (!(type instanceof JavaType.FullyQualified)) { + return newClass; + } + + if (type.isAssignableFrom(S3_TM) && + !newClass.getArguments().isEmpty() && + newClass.getArguments().get(0).getType() != null) { + if (newClass.getArguments().get(0).getType().isAssignableFrom(S3_CLIENT)) { + String comment = "S3TransferManager requires S3AsyncClient in v2. Please create a new S3AsyncClient " + + "instance for v2 S3TransferManager."; + return newClass.withComments(createComments(comment)); + } + } + + return newClass; + } + } +} diff --git a/v2-migration/src/main/java/software/amazon/awssdk/v2migration/TransferManagerMethodsToV2.java b/v2-migration/src/main/java/software/amazon/awssdk/v2migration/TransferManagerMethodsToV2.java index c2c12ff9ed5d..749d94d1ed8a 100644 --- a/v2-migration/src/main/java/software/amazon/awssdk/v2migration/TransferManagerMethodsToV2.java +++ b/v2-migration/src/main/java/software/amazon/awssdk/v2migration/TransferManagerMethodsToV2.java @@ -16,17 +16,21 @@ package software.amazon.awssdk.v2migration; import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.V2_S3_MODEL_PKG; +import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.V2_TM_CLIENT; import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.V2_TM_MODEL_PKG; import static software.amazon.awssdk.v2migration.internal.utils.S3TransformUtils.v2TmMethodMatcher; +import java.util.regex.Pattern; import org.openrewrite.ExecutionContext; import org.openrewrite.Recipe; import org.openrewrite.TreeVisitor; import org.openrewrite.java.AddImport; -import org.openrewrite.java.JavaIsoVisitor; import org.openrewrite.java.JavaTemplate; +import org.openrewrite.java.JavaVisitor; import org.openrewrite.java.MethodMatcher; +import org.openrewrite.java.tree.Expression; import org.openrewrite.java.tree.J; +import org.openrewrite.java.tree.JavaType; import software.amazon.awssdk.annotations.SdkInternalApi; @SdkInternalApi @@ -47,6 +51,15 @@ public class TransferManagerMethodsToV2 extends Recipe { private static final MethodMatcher COPY_BUCKET_KEY = v2TmMethodMatcher("copy(String, String, String, String"); + private static final MethodMatcher DOWNLOAD_DIR = v2TmMethodMatcher("downloadDirectory(String, String, java.io.File)"); + + private static final MethodMatcher UPLOAD_DIR = v2TmMethodMatcher("uploadDirectory(String, String, java.io.File, boolean)"); + + private static final Pattern S3_TM_CREDENTIAL = Pattern.compile(V2_TM_CLIENT); + private static final Pattern V2_AWSCREDENTAIL = Pattern.compile("software.amazon.awssdk.auth.credentials.AwsCredentials"); + private static final Pattern V2_CREDENTIAL_PROVIDER = Pattern.compile("software.amazon.awssdk.auth.credentials" + + ".AwsCredentialsProvider"); + @Override public String getDisplayName() { return "Transfer Manager Methods to V2"; @@ -62,10 +75,10 @@ public TreeVisitor getVisitor() { return new Visitor(); } - private static final class Visitor extends JavaIsoVisitor { + private static final class Visitor extends JavaVisitor { @Override - public J.MethodInvocation visitMethodInvocation(J.MethodInvocation method, ExecutionContext executionContext) { + public J visitMethodInvocation(J.MethodInvocation method, ExecutionContext executionContext) { if (DOWNLOAD_BUCKET_KEY_FILE.matches(method, false)) { method = transformDownloadWithBucketKeyFile(method); @@ -95,10 +108,87 @@ public J.MethodInvocation visitMethodInvocation(J.MethodInvocation method, Execu method = transformUploadWithBucketKeyFile(method); return super.visitMethodInvocation(method, executionContext); } + if (DOWNLOAD_DIR.matches(method, false)) { + method = transformDownloadDirectory(method); + return super.visitMethodInvocation(method, executionContext); + } + if (UPLOAD_DIR.matches(method, false)) { + method = transformUploadDirectory(method); + return super.visitMethodInvocation(method, executionContext); + } return super.visitMethodInvocation(method, executionContext); } + @Override + public J visitNewClass(J.NewClass newClass, ExecutionContext executionContext) { + JavaType type = newClass.getType(); + if (!(type instanceof JavaType.FullyQualified)) { + return newClass; + } + + if (type.isAssignableFrom(S3_TM_CREDENTIAL) && + newClass.getArguments().size() == 1 && + newClass.getArguments().get(0).getType() != null) { + Expression arg = newClass.getArguments().get(0); + if (arg.getType().isAssignableFrom(V2_AWSCREDENTAIL)) { + addS3AsyncClientImport(); + addStaticCredentialsProviderImport(); + + return JavaTemplate + .builder("S3TransferManager.builder()" + + ".s3Client(S3AsyncClient.builder()" + + ".credentialsProvider(StaticCredentialsProvider.create(#{any()}))" + + ".build())" + + ".build()") + .build() + .apply(getCursor(), newClass.getCoordinates().replace(), arg); + } + if (arg.getType().isAssignableFrom(V2_CREDENTIAL_PROVIDER)) { + addS3AsyncClientImport(); + + return JavaTemplate + .builder("S3TransferManager.builder()" + + ".s3Client(S3AsyncClient.builder()" + + ".credentialsProvider(#{any()})" + + ".build())" + + ".build()") + .build() + .apply(getCursor(), newClass.getCoordinates().replace(), arg); + } + } + + return super.visitNewClass(newClass, executionContext); + } + + private J.MethodInvocation transformDownloadDirectory(J.MethodInvocation method) { + String v2Method = "#{any()}.downloadDirectory(DownloadDirectoryRequest.builder()" + + ".bucket(#{any()}).listObjectsV2RequestTransformer(builder -> builder.prefix(#{any()}))" + + ".destination(#{any()}.toPath()).build())"; + + method = JavaTemplate.builder(v2Method).build() + .apply(getCursor(), method.getCoordinates().replace(), method.getSelect(), + method.getArguments().get(0), method.getArguments().get(1), + method.getArguments().get(2)); + + addTmImport("DownloadDirectoryRequest"); + return method; + } + + private J.MethodInvocation transformUploadDirectory(J.MethodInvocation method) { + String v2Method = "#{any()}.uploadDirectory(UploadDirectoryRequest.builder()" + + ".bucket(#{any()}).s3Prefix(#{any()}).source(#{any()}.toPath())" + + ".maxDepth(#{any()} ? Integer.MAX_VALUE : 1).build())"; + + method = JavaTemplate.builder(v2Method).build() + .apply(getCursor(), method.getCoordinates().replace(), method.getSelect(), + method.getArguments().get(0), method.getArguments().get(1), + method.getArguments().get(2), method.getArguments().get(3)); + + addTmImport("UploadDirectoryRequest"); + return method; + } + private J.MethodInvocation transformUploadWithBucketKeyFile(J.MethodInvocation method) { String v2Method = "#{any()}.uploadFile(UploadFileRequest.builder()" + ".putObjectRequest(PutObjectRequest.builder().bucket(#{any()}).key(#{any()}).build())" @@ -220,5 +310,13 @@ private void addDurationImport() { private void addRequestOverrideConfigImport() { doAfterVisit(new AddImport<>("software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration", null, false)); } + + private void addS3AsyncClientImport() { + doAfterVisit(new AddImport<>("software.amazon.awssdk.services.s3.S3AsyncClient", null, false)); + } + + private void addStaticCredentialsProviderImport() { + doAfterVisit(new AddImport<>("software.amazon.awssdk.auth.credentials.StaticCredentialsProvider", null, false)); + } } } diff --git a/v2-migration/src/main/java/software/amazon/awssdk/v2migration/internal/utils/S3TransformUtils.java b/v2-migration/src/main/java/software/amazon/awssdk/v2migration/internal/utils/S3TransformUtils.java index 2a4895952edc..87981c4f8bca 100644 --- a/v2-migration/src/main/java/software/amazon/awssdk/v2migration/internal/utils/S3TransformUtils.java +++ b/v2-migration/src/main/java/software/amazon/awssdk/v2migration/internal/utils/S3TransformUtils.java @@ -38,6 +38,7 @@ public final class S3TransformUtils { public static final String V1_S3_MODEL_PKG = "com.amazonaws.services.s3.model."; public static final String V1_S3_PKG = "com.amazonaws.services.s3."; public static final String V1_EN_PKG = "com.amazonaws.services.s3.event."; + public static final String V1_TM_PKG = "com.amazonaws.services.s3.transfer."; public static final String V2_S3_CLIENT = "software.amazon.awssdk.services.s3.S3Client"; public static final String V2_S3_MODEL_PKG = "software.amazon.awssdk.services.s3.model."; diff --git a/v2-migration/src/main/resources/META-INF/rewrite/aws-sdk-java-v1-to-v2-with-tm.yml b/v2-migration/src/main/resources/META-INF/rewrite/aws-sdk-java-v1-to-v2-with-tm.yml index 1409d4f853c0..9769969f3748 100644 --- a/v2-migration/src/main/resources/META-INF/rewrite/aws-sdk-java-v1-to-v2-with-tm.yml +++ b/v2-migration/src/main/resources/META-INF/rewrite/aws-sdk-java-v1-to-v2-with-tm.yml @@ -53,4 +53,6 @@ recipeList: - software.amazon.awssdk.v2migration.S3NonStreamingRequestToV2Complex - software.amazon.awssdk.v2migration.S3PutObjectRequestToV2 - software.amazon.awssdk.v2migration.SettersToBuilderV2 + - software.amazon.awssdk.v2migration.S3TmAddComments + - software.amazon.awssdk.v2migration.ChangeTransferManagerSimpleMethods - software.amazon.awssdk.v2migration.TransferManagerMethodsToV2 \ No newline at end of file diff --git a/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-simple-methods.yml b/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-simple-methods.yml new file mode 100644 index 000000000000..8fbc8dee0856 --- /dev/null +++ b/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-simple-methods.yml @@ -0,0 +1,46 @@ +# +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +--- +type: specs.openrewrite.org/v1beta/recipe +name: software.amazon.awssdk.v2migration.ChangeTransferManagerSimpleMethods +displayName: Change TransferManager simple methods to v2. +description: Change TransferManager simple methods to v2. +recipeList: + - org.openrewrite.java.ChangeMethodName: + methodPattern: software.amazon.awssdk.transfer.s3.S3TransferManager resumeDownload(..) + newMethodName: resumeDownloadFile + - org.openrewrite.java.ChangeMethodName: + methodPattern: software.amazon.awssdk.transfer.s3.S3TransferManager resumeUpload(..) + newMethodName: resumeUploadFile + - org.openrewrite.java.ChangeMethodName: + methodPattern: software.amazon.awssdk.transfer.s3.S3TransferManager shutdownNow() + newMethodName: close + - org.openrewrite.java.ChangeMethodName: + methodPattern: software.amazon.awssdk.transfer.s3.model.Transfer getProgress() + newMethodName: progress + - org.openrewrite.java.ChangeMethodName: + methodPattern: software.amazon.awssdk.transfer.s3.model.ResumableTransfer serialize() + newMethodName: serializeToString + - org.openrewrite.java.ChangeMethodName: + methodPattern: software.amazon.awssdk.transfer.s3.model.ResumableTransfer serialize(java.io.OutputStream) + newMethodName: serializeToOutputStream + - org.openrewrite.java.ChangeMethodName: + methodPattern: software.amazon.awssdk.transfer.s3.progress.TransferProgress getBytesTransferred() + newMethodName: snapshot().transferredBytes + - org.openrewrite.java.ChangeMethodName: + methodPattern: software.amazon.awssdk.transfer.s3.progress.TransferProgress getTotalBytesToTransfer() + newMethodName: snapshot().totalBytes + - org.openrewrite.java.ChangeMethodName: + methodPattern: software.amazon.awssdk.transfer.s3.progress.TransferProgress getPercentTransferred() + newMethodName: snapshot().ratioTransferred \ No newline at end of file diff --git a/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-types.yml b/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-types.yml index e546ecff46d6..159f7b123262 100644 --- a/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-types.yml +++ b/v2-migration/src/main/resources/META-INF/rewrite/change-transfer-manager-types.yml @@ -18,6 +18,12 @@ name: software.amazon.awssdk.v2migration.ChangeTransferManagerTypes displayName: Change SDK TransferManager types from v1 to v2 description: Change SDK TransferManager types from v1 to v2. recipeList: + - software.amazon.awssdk.v2migration.openrewrite.ChangeMethodInvocationReturnType: + methodPattern: com.amazonaws.services.s3.transfer.TransferManager resumeDownload(..) + newReturnType: software.amazon.awssdk.transfer.s3.model.FileDownload + - software.amazon.awssdk.v2migration.openrewrite.ChangeMethodInvocationReturnType: + methodPattern: com.amazonaws.services.s3.transfer.TransferManager resumeUpload(..) + newReturnType: software.amazon.awssdk.transfer.s3.model.FileUpload - org.openrewrite.java.ChangeType: oldFullyQualifiedTypeName: com.amazonaws.services.s3.transfer.TransferManager newFullyQualifiedTypeName: software.amazon.awssdk.transfer.s3.S3TransferManager