diff --git a/CHANGELOG.md b/CHANGELOG.md index 3ed4b5566b2..81c9b4d012e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,20 @@ +Release v1.49.19 (2024-01-11) +=== + +### Service Client Updates +* `service/ec2`: Updates service API and documentation + * This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks. +* `service/ecs`: Updates service API and documentation + * This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks. +* `service/eventbridge`: Updates service API and documentation +* `service/iot`: Updates service API and documentation + * Add ConflictException to Update APIs of AWS IoT Software Package Catalog +* `service/iotfleetwise`: Updates service API +* `service/secretsmanager`: Updates service documentation and examples + * Doc only update for Secrets Manager +* `service/workspaces`: Updates service documentation + * Added AWS Workspaces RebootWorkspaces API - Extended Reboot documentation update + Release v1.49.18 (2024-01-10) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 05a5189b9e0..3330dd4342e 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -43698,6 +43698,19 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "health": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/aws/version.go b/aws/version.go index e6dbda5167f..0daf567205b 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.49.18" +const SDKVersion = "1.49.19" diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index 964ceb776d0..c4528b885a5 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -20723,6 +20723,14 @@ "VolumeId":{ "shape":"String", "locationName":"volumeId" + }, + "AssociatedResource":{ + "shape":"String", + "locationName":"associatedResource" + }, + "VolumeOwnerId":{ + "shape":"String", + "locationName":"volumeOwnerId" } } }, @@ -45615,6 +45623,14 @@ "DeleteOnTermination":{ "shape":"Boolean", "locationName":"deleteOnTermination" + }, + "AssociatedResource":{ + "shape":"String", + "locationName":"associatedResource" + }, + "InstanceOwningService":{ + "shape":"String", + "locationName":"instanceOwningService" } } }, diff --git a/models/apis/ec2/2016-11-15/docs-2.json b/models/apis/ec2/2016-11-15/docs-2.json index f0ae52c74b1..8c37fad3c2f 100755 --- a/models/apis/ec2/2016-11-15/docs-2.json +++ b/models/apis/ec2/2016-11-15/docs-2.json @@ -374,7 +374,7 @@ "DetachInternetGateway": "
Detaches an internet gateway from a VPC, disabling connectivity between the internet and the VPC. The VPC must not contain any running instances with Elastic IP addresses or public IPv4 addresses.
", "DetachNetworkInterface": "Detaches a network interface from an instance.
", "DetachVerifiedAccessTrustProvider": "Detaches the specified Amazon Web Services Verified Access trust provider from the specified Amazon Web Services Verified Access instance.
", - "DetachVolume": "Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so can result in the volume becoming stuck in the busy
state while detaching. If this happens, detachment can be delayed indefinitely until you unmount the volume, force detachment, reboot the instance, or all three. If an EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.
When a volume with an Amazon Web Services Marketplace product code is detached from an instance, the product code is no longer associated with the instance.
For more information, see Detach an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide.
", + "DetachVolume": "Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so can result in the volume becoming stuck in the busy
state while detaching. If this happens, detachment can be delayed indefinitely until you unmount the volume, force detachment, reboot the instance, or all three. If an EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.
When a volume with an Amazon Web Services Marketplace product code is detached from an instance, the product code is no longer associated with the instance.
You can't detach or force detach volumes that are attached to Amazon ECS or Fargate tasks. Attempting to do this results in the UnsupportedOperationException
exception with the Unable to detach volume attached to ECS tasks
error message.
For more information, see Detach an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide.
", "DetachVpnGateway": "Detaches a virtual private gateway from a VPC. You do this if you're planning to turn off the VPC and not use it anymore. You can confirm a virtual private gateway has been completely detached from a VPC by describing the virtual private gateway (any attachments to the virtual private gateway are also described).
You must wait for the attachment's state to switch to detached
before you can delete the VPC or attach a different VPC to the virtual private gateway.
Disables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.
", "DisableAwsNetworkPerformanceMetricSubscription": "Disables Infrastructure Performance metric subscriptions.
", @@ -2810,7 +2810,7 @@ "ScheduledInstancesEbs$Encrypted": "Indicates whether the volume is encrypted. You can attached encrypted volumes only to instances that support them.
", "ScheduledInstancesLaunchSpecification$EbsOptimized": "Indicates whether the instances are optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.
Default: false
Indicates whether monitoring is enabled.
", - "ScheduledInstancesNetworkInterface$AssociatePublicIpAddress": "Indicates whether to assign a public IPv4 address to instances launched in a VPC. The public IPv4 address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true
.
Indicates whether to assign a public IPv4 address to instances launched in a VPC. The public IPv4 address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true
.
Starting on February 1, 2024, Amazon Web Services will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the Public IPv4 Address tab on the Amazon VPC pricing page.
", "ScheduledInstancesNetworkInterface$DeleteOnTermination": "Indicates whether to delete the interface when the instance is terminated.
", "ScheduledInstancesPrivateIpAddressConfig$Primary": "Indicates whether this is a primary IPv4 address. Otherwise, this is a secondary IPv4 address.
", "SearchLocalGatewayRoutesRequest$DryRun": "Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The default TotalTargetCapacity
, which is either Spot
or On-Demand
.
The default TotalTargetCapacity
, which is either Spot
or On-Demand
.
The default target capacity type.
", + "TargetCapacitySpecificationRequest$DefaultTargetCapacityType": "The default target capacity type.
" } }, "DefaultingDhcpOptionsId": { @@ -6127,7 +6127,7 @@ "DescribeCapacityBlockOfferingsMaxResults": { "base": null, "refs": { - "DescribeCapacityBlockOfferingsRequest$MaxResults": "The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken
value. This value can be between 5 and 500. If maxResults
is given a larger value than 500, you receive an error.
The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.
" } }, "DescribeCapacityBlockOfferingsRequest": { @@ -6143,7 +6143,7 @@ "DescribeCapacityReservationFleetsMaxResults": { "base": null, "refs": { - "DescribeCapacityReservationFleetsRequest$MaxResults": "The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken
value. This value can be between 5 and 500. If maxResults
is given a larger value than 500, you receive an error.
The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.
" } }, "DescribeCapacityReservationFleetsRequest": { @@ -6159,7 +6159,7 @@ "DescribeCapacityReservationsMaxResults": { "base": null, "refs": { - "DescribeCapacityReservationsRequest$MaxResults": "The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken
value. This value can be between 5 and 500. If maxResults
is given a larger value than 500, you receive an error.
The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.
" } }, "DescribeCapacityReservationsRequest": { @@ -9969,7 +9969,7 @@ "GetCapacityReservationUsageRequestMaxResults": { "base": null, "refs": { - "GetCapacityReservationUsageRequest$MaxResults": "The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken
value. This value can be between 5 and 500. If maxResults
is given a larger value than 500, you receive an error.
Valid range: Minimum value of 1. Maximum value of 1000.
" + "GetCapacityReservationUsageRequest$MaxResults": "The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.
" } }, "GetCapacityReservationUsageResult": { @@ -10055,7 +10055,7 @@ "GetGroupsForCapacityReservationRequestMaxResults": { "base": null, "refs": { - "GetGroupsForCapacityReservationRequest$MaxResults": "The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken
value. This value can be between 5 and 500. If maxResults
is given a larger value than 500, you receive an error.
The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.
" } }, "GetGroupsForCapacityReservationResult": { @@ -10689,9 +10689,9 @@ "HttpTokensState": { "base": null, "refs": { - "InstanceMetadataOptionsRequest$HttpTokens": "IMDSv2 uses token-backed sessions. Set the use of HTTP tokens to optional
(in other words, set the use of IMDSv2 to optional
) or required
(in other words, set the use of IMDSv2 to required
).
optional
- When IMDSv2 is optional, you can choose to retrieve instance metadata with or without a session token in your request. If you retrieve the IAM role credentials without a token, the IMDSv1 role credentials are returned. If you retrieve the IAM role credentials using a valid session token, the IMDSv2 role credentials are returned.
required
- When IMDSv2 is required, you must send a session token with any instance metadata retrieval requests. In this state, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available.
Default: optional
IMDSv2 uses token-backed sessions. Indicates whether the use of HTTP tokens is optional
(in other words, indicates whether the use of IMDSv2 is optional
) or required
(in other words, indicates whether the use of IMDSv2 is required
).
optional
- When IMDSv2 is optional, you can choose to retrieve instance metadata with or without a session token in your request. If you retrieve the IAM role credentials without a token, the IMDSv1 role credentials are returned. If you retrieve the IAM role credentials using a valid session token, the IMDSv2 role credentials are returned.
required
- When IMDSv2 is required, you must send a session token with any instance metadata retrieval requests. In this state, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available.
Default: optional
IMDSv2 uses token-backed sessions. Set the use of HTTP tokens to optional
(in other words, set the use of IMDSv2 to optional
) or required
(in other words, set the use of IMDSv2 to required
).
optional
- When IMDSv2 is optional, you can choose to retrieve instance metadata with or without a session token in your request. If you retrieve the IAM role credentials without a token, the IMDSv1 role credentials are returned. If you retrieve the IAM role credentials using a valid session token, the IMDSv2 role credentials are returned.
required
- When IMDSv2 is required, you must send a session token with any instance metadata retrieval requests. In this state, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available.
Default: optional
Indicates whether IMDSv2 is required.
optional
- IMDSv2 is optional. You can choose whether to send a session token in your instance metadata retrieval requests. If you retrieve IAM role credentials without a session token, you receive the IMDSv1 role credentials. If you retrieve IAM role credentials using a valid session token, you receive the IMDSv2 role credentials.
required
- IMDSv2 is required. You must send a session token in your instance metadata retrieval requests. With this option, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available.
Default: If the value of ImdsSupport
for the Amazon Machine Image (AMI) for your instance is v2.0
, the default is required
.
Indicates whether IMDSv2 is required.
optional
- IMDSv2 is optional. You can choose whether to send a session token in your instance metadata retrieval requests. If you retrieve IAM role credentials without a session token, you receive the IMDSv1 role credentials. If you retrieve IAM role credentials using a valid session token, you receive the IMDSv2 role credentials.
required
- IMDSv2 is required. You must send a session token in your instance metadata retrieval requests. With this option, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available.
Indicates whether IMDSv2 is required.
optional
- IMDSv2 is optional. You can choose whether to send a session token in your instance metadata retrieval requests. If you retrieve IAM role credentials without a session token, you receive the IMDSv1 role credentials. If you retrieve IAM role credentials using a valid session token, you receive the IMDSv2 role credentials.
required
- IMDSv2 is required. You must send a session token in your instance metadata retrieval requests. With this option, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available.
Default: If the value of ImdsSupport
for the Amazon Machine Image (AMI) for your instance is v2.0
, the default is required
.
Modifies the DeleteOnTermination
attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination
, the default is true
and the volume is deleted when the instance is terminated.
To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Update the block device mapping when launching an instance in the Amazon EC2 User Guide.
" + "ModifyInstanceAttributeRequest$BlockDeviceMappings": "Modifies the DeleteOnTermination
attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination
, the default is true
and the volume is deleted when the instance is terminated. You can't modify the DeleteOnTermination
attribute for volumes that are attached to Fargate tasks.
To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Update the block device mapping when launching an instance in the Amazon EC2 User Guide.
" } }, "InstanceBootModeValues": { @@ -12301,10 +12301,10 @@ "StoreImageTaskResult$ProgressPercentage": "The progress of the task as a percentage.
", "Subnet$AvailableIpAddressCount": "The number of unused private IPv4 addresses in the subnet. The IPv4 addresses for any stopped instances are considered unavailable.
", "Subnet$EnableLniAtDeviceIndex": " Indicates the device position for local network interfaces in this subnet. For example, 1
indicates local network interfaces in this subnet are the secondary network interface (eth1).
The number of units to request, filled using DefaultTargetCapacityType
.
The number of units to request, filled the default target capacity type.
", "TargetCapacitySpecification$OnDemandTargetCapacity": "The number of On-Demand units to request. If you specify a target capacity for Spot units, you cannot specify a target capacity for On-Demand units.
", "TargetCapacitySpecification$SpotTargetCapacity": "The maximum number of Spot units to launch. If you specify a target capacity for On-Demand units, you cannot specify a target capacity for Spot units.
", - "TargetCapacitySpecificationRequest$TotalTargetCapacity": "The number of units to request, filled using DefaultTargetCapacityType
.
The number of units to request, filled using the default target capacity type.
", "TargetCapacitySpecificationRequest$OnDemandTargetCapacity": "The number of On-Demand units to request.
", "TargetCapacitySpecificationRequest$SpotTargetCapacity": "The number of Spot units to request.
", "TargetConfiguration$InstanceCount": "The number of instances the Convertible Reserved Instance offering can be applied to. This parameter is reserved and cannot be specified in a request
", @@ -18560,7 +18560,7 @@ "base": null, "refs": { "DisableSnapshotBlockPublicAccessResult$State": "Returns unblocked
if the request succeeds.
The mode in which to enable block public access for snapshots for the Region. Specify one of the following values:
block-all-sharing
- Prevents all public sharing of snapshots in the Region. Users in the account will no longer be able to request new public sharing. Additionally, snapshots that are already publicly shared are treated as private and they are no longer publicly available.
If you enable block public access for snapshots in block-all-sharing
mode, it does not change the permissions for snapshots that are already publicly shared. Instead, it prevents these snapshots from be publicly visible and publicly accessible. Therefore, the attributes for these snapshots still indicate that they are publicly shared, even though they are not publicly available.
block-new-sharing
- Prevents only new public sharing of snapshots in the Region. Users in the account will no longer be able to request new public sharing. However, snapshots that are already publicly shared, remain publicly available.
The mode in which to enable block public access for snapshots for the Region. Specify one of the following values:
block-all-sharing
- Prevents all public sharing of snapshots in the Region. Users in the account will no longer be able to request new public sharing. Additionally, snapshots that are already publicly shared are treated as private and they are no longer publicly available.
If you enable block public access for snapshots in block-all-sharing
mode, it does not change the permissions for snapshots that are already publicly shared. Instead, it prevents these snapshots from be publicly visible and publicly accessible. Therefore, the attributes for these snapshots still indicate that they are publicly shared, even though they are not publicly available.
block-new-sharing
- Prevents only new public sharing of snapshots in the Region. Users in the account will no longer be able to request new public sharing. However, snapshots that are already publicly shared, remain publicly available.
unblocked
is not a valid value for EnableSnapshotBlockPublicAccess.
The state of block public access for snapshots for the account and Region. Returns either block-all-sharing
or block-new-sharing
if the request succeeds.
The current state of block public access for snapshots. Possible values include:
block-all-sharing
- All public sharing of snapshots is blocked. Users in the account can't request new public sharing. Additionally, snapshots that were already publicly shared are treated as private and are not publicly available.
block-new-sharing
- Only new public sharing of snapshots is blocked. Users in the account can't request new public sharing. However, snapshots that were already publicly shared, remain publicly available.
unblocked
- Public sharing is not blocked. Users can publicly share snapshots.
Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted.
This parameter is only supported on BlockDeviceMapping
objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.
The ARN of the Outpost on which the snapshot is stored.
This parameter is not supported when using CreateImage.
", "EbsInstanceBlockDevice$VolumeId": "The ID of the EBS volume.
", + "EbsInstanceBlockDevice$AssociatedResource": "The ARN of the Amazon ECS or Fargate task to which the volume is attached.
", + "EbsInstanceBlockDevice$VolumeOwnerId": "The ID of the Amazon Web Services account that owns the volume.
This parameter is returned only for volumes that are attached to Fargate tasks.
", "Ec2InstanceConnectEndpoint$OwnerId": "The ID of the Amazon Web Services account that created the EC2 Instance Connect Endpoint.
", "Ec2InstanceConnectEndpoint$StateMessage": "The message for the current state of the EC2 Instance Connect Endpoint. Can include a failure message.
", "Ec2InstanceConnectEndpoint$DnsName": "The DNS name of the EC2 Instance Connect Endpoint.
", @@ -20846,7 +20848,7 @@ "StartNetworkInsightsAccessScopeAnalysisRequest$ClientToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.
", "StartNetworkInsightsAnalysisRequest$ClientToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.
", "StateReason$Code": "The reason code for the state change.
", - "StateReason$Message": "The message for the state change.
Server.InsufficientInstanceCapacity
: There was insufficient capacity available to satisfy the launch request.
Server.InternalError
: An internal error caused the instance to terminate during launch.
Server.ScheduledStop
: The instance was stopped due to a scheduled retirement.
Server.SpotInstanceShutdown
: The instance was stopped because the number of Spot requests with a maximum price equal to or higher than the Spot price exceeded available capacity or because of an increase in the Spot price.
Server.SpotInstanceTermination
: The instance was terminated because the number of Spot requests with a maximum price equal to or higher than the Spot price exceeded available capacity or because of an increase in the Spot price.
Client.InstanceInitiatedShutdown
: The instance was shut down using the shutdown -h
command from the instance.
Client.InstanceTerminated
: The instance was terminated or rebooted during AMI creation.
Client.InternalError
: A client error caused the instance to terminate during launch.
Client.InvalidSnapshot.NotFound
: The specified snapshot was not found.
Client.UserInitiatedHibernate
: Hibernation was initiated on the instance.
Client.UserInitiatedShutdown
: The instance was shut down using the Amazon EC2 API.
Client.VolumeLimitExceeded
: The limit on the number of EBS volumes or total storage was exceeded. Decrease usage or request an increase in your account limits.
The message for the state change.
Server.InsufficientInstanceCapacity
: There was insufficient capacity available to satisfy the launch request.
Server.InternalError
: An internal error caused the instance to terminate during launch.
Server.ScheduledStop
: The instance was stopped due to a scheduled retirement.
Server.SpotInstanceShutdown
: The instance was stopped because the number of Spot requests with a maximum price equal to or higher than the Spot price exceeded available capacity or because of an increase in the Spot price.
Server.SpotInstanceTermination
: The instance was terminated because the number of Spot requests with a maximum price equal to or higher than the Spot price exceeded available capacity or because of an increase in the Spot price.
Client.InstanceInitiatedShutdown
: The instance was shut down from the operating system of the instance.
Client.InstanceTerminated
: The instance was terminated or rebooted during AMI creation.
Client.InternalError
: A client error caused the instance to terminate during launch.
Client.InvalidSnapshot.NotFound
: The specified snapshot was not found.
Client.UserInitiatedHibernate
: Hibernation was initiated on the instance.
Client.UserInitiatedShutdown
: The instance was shut down using the Amazon EC2 API.
Client.VolumeLimitExceeded
: The limit on the number of EBS volumes or total storage was exceeded. Decrease usage or request an increase in your account limits.
The name of the S3 bucket.
", "StorageLocation$Key": "The key.
", "StoreImageTaskResult$AmiId": "The ID of the AMI that is being stored.
", @@ -21077,9 +21079,11 @@ "Volume$OutpostArn": "The Amazon Resource Name (ARN) of the Outpost.
", "Volume$SnapshotId": "The snapshot from which the volume was created, if applicable.
", "Volume$VolumeId": "The ID of the volume.
", - "VolumeAttachment$Device": "The device name.
", - "VolumeAttachment$InstanceId": "The ID of the instance.
", + "VolumeAttachment$Device": "The device name.
If the volume is attached to a Fargate task, this parameter returns null
.
The ID of the instance.
If the volume is attached to a Fargate task, this parameter returns null
.
The ID of the volume.
", + "VolumeAttachment$AssociatedResource": "The ARN of the Amazon ECS or Fargate task to which the volume is attached.
", + "VolumeAttachment$InstanceOwningService": "The service principal of Amazon Web Services service that owns the underlying instance to which the volume is attached.
This parameter is returned only for volumes that are attached to Fargate tasks.
", "VolumeModification$VolumeId": "The ID of the volume.
", "VolumeModification$StatusMessage": "A status message about the modification progress or failure.
", "VolumeStatusAction$Code": "The code identifying the operation, for example, enable-volume-io
.
The number of units to request. You can choose to set the target capacity as the number of instances. Or you can set the target capacity to a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain
, you can specify a target capacity of 0 and add capacity later.
You can use the On-Demand Instance MaxTotalPrice
parameter, the Spot Instance MaxTotalPrice
parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. The MaxTotalPrice
parameters are located in OnDemandOptionsRequest and SpotOptionsRequest.
The number of units to request. You can choose to set the target capacity as the number of instances. Or you can set the target capacity to a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain
, you can specify a target capacity of 0 and add capacity later.
You can use the On-Demand Instance MaxTotalPrice
parameter, the Spot Instance MaxTotalPrice
parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn't met the target capacity. The MaxTotalPrice
parameters are located in OnDemandOptionsRequest and SpotOptionsRequest.
The number of units to request.
", "ModifyFleetRequest$TargetCapacitySpecification": "The size of the EC2 Fleet.
" @@ -21632,10 +21636,10 @@ "TargetCapacityUnitType": { "base": null, "refs": { - "GetSpotPlacementScoresRequest$TargetCapacityUnitType": "The unit for the target capacity.
Default: units
(translates to number of instances)
The unit for the target capacity. TargetCapacityUnitType
can only be specified when InstanceRequirements
is specified.
Default: units
(translates to number of instances)
The unit for the target capacity. TargetCapacityUnitType
can only be specified when InstanceRequirements
is specified.
Default: units
(translates to number of instances)
The unit for the target capacity. TargetCapacityUnitType
can only be specified when InstanceRequirements
is specified.
Default: units
(translates to number of instances)
The unit for the target capacity.
", + "SpotFleetRequestConfigData$TargetCapacityUnitType": "The unit for the target capacity. You can specify this parameter only when using attribute-based instance type selection.
Default: units
(the number of instances)
The unit for the target capacity.
", + "TargetCapacitySpecificationRequest$TargetCapacityUnitType": "The unit for the target capacity. You can specify this parameter only when using attributed-based instance type selection.
Default: units
(the number of instances)
Creates a new capacity provider. Capacity providers are associated with an Amazon ECS cluster and are used in capacity provider strategies to facilitate cluster auto scaling.
Only capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on Fargate use the FARGATE
and FARGATE_SPOT
capacity providers. These providers are available to all accounts in the Amazon Web Services Regions that Fargate supports.
Creates a new Amazon ECS cluster. By default, your account receives a default
cluster when you launch your first container instance. However, you can create your own cluster with a unique name with the CreateCluster
action.
When you call the CreateCluster API operation, Amazon ECS attempts to create the Amazon ECS service-linked role for your account. This is so that it can manage required resources in other Amazon Web Services services on your behalf. However, if the user that makes the call doesn't have permissions to create the service-linked role, it isn't created. For more information, see Using service-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount
, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
There are two service scheduler strategies available:
REPLICA
- The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
DAEMON
- The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent
is 100%. The default value for a daemon service for minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING
state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING
state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING
state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING
or PENDING
state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING
state. This is while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide.
", + "CreateService": "Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount
, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations
is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
There are two service scheduler strategies available:
REPLICA
- The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
DAEMON
- The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent
is 100%. The default value for a daemon service for minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING
state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING
state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING
state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING
or PENDING
state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING
state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING
state. This is while the container instances are in the DRAINING
state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide.
", "CreateTaskSet": "Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL
deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
You can create a maximum of 5 tasks sets for a deployment.
", "DeleteAccountSetting": "Disables an account setting for a specified user, role, or the root user for an account.
", "DeleteAttributes": "Deletes one or more custom attributes from an Amazon ECS resource.
", @@ -41,8 +41,8 @@ "PutClusterCapacityProviders": "Modifies the available capacity providers and the default capacity provider strategy for a cluster.
You must specify both the available capacity providers and a default capacity provider strategy for the cluster. If the specified cluster has existing capacity providers associated with it, you must specify all existing capacity providers in addition to any new ones you want to add. Any existing capacity providers that are associated with a cluster that are omitted from a PutClusterCapacityProviders API call will be disassociated with the cluster. You can only disassociate an existing capacity provider from a cluster if it's not being used by any existing tasks.
When creating a service or running a task on a cluster, if no capacity provider or launch type is specified, then the cluster's default capacity provider strategy is used. We recommend that you define a default capacity provider strategy for your cluster. However, you must specify an empty array ([]
) to bypass defining a default strategy.
This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
Registers an EC2 instance into the specified cluster. This instance becomes available to place containers on.
", "RegisterTaskDefinition": "Registers a new task definition from the supplied family
and containerDefinitions
. Optionally, you can add data volumes to your containers with the volumes
parameter. For more information about task definition parameters and defaults, see Amazon ECS Task Definitions in the Amazon Elastic Container Service Developer Guide.
You can specify a role for your task with the taskRoleArn
parameter. When you specify a role for a task, its containers can then use the latest versions of the CLI or SDKs to make API requests to the Amazon Web Services services that are specified in the policy that's associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.
You can specify a Docker networking mode for the containers in your task definition with the networkMode
parameter. The available network modes correspond to those described in Network settings in the Docker run reference. If you specify the awsvpc
network mode, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide.
Starts a new task using the specified task definition.
You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command.
To manage eventual consistency, you can do the following:
Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time.
Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.
Starts a new task from the specified task definition on the specified container instance or instances.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
", + "RunTask": "Starts a new task using the specified task definition.
You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command.
To manage eventual consistency, you can do the following:
Confirm the state of the resource before you run a command to modify it. Run the DescribeTasks command using an exponential backoff algorithm to ensure that you allow enough time for the previous command to propagate through the system. To do this, run the DescribeTasks command repeatedly, starting with a couple of seconds of wait time and increasing gradually up to five minutes of wait time.
Add wait time between subsequent commands, even if the DescribeTasks command returns an accurate response. Apply an exponential backoff algorithm starting with a couple of seconds of wait time, and increase gradually up to about five minutes of wait time.
Starts a new task from the specified task definition on the specified container instance or instances.
Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
", "StopTask": "Stops a running task. Any tags associated with the task will be deleted.
When StopTask is called on a task, the equivalent of docker stop
is issued to the containers running in the task. This results in a SIGTERM
value and a default 30-second timeout, after which the SIGKILL
value is sent and the containers are forcibly stopped. If the container handles the SIGTERM
value gracefully and exits within 30 seconds from receiving it, no SIGKILL
value is sent.
The default 30-second timeout can be configured on the Amazon ECS container agent with the ECS_CONTAINER_STOP_TIMEOUT
variable. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.
This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
Sent to acknowledge that an attachment changed states.
", "SubmitContainerStateChange": "This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
Sent to acknowledge that a container changed states.
", @@ -54,7 +54,7 @@ "UpdateClusterSettings": "Modifies the settings to use for a cluster.
", "UpdateContainerAgent": "Updates the Amazon ECS container agent on a specified container instance. Updating the Amazon ECS container agent doesn't interrupt running tasks or services on the container instance. The process for updating the agent differs depending on whether your container instance was launched with the Amazon ECS-optimized AMI or another operating system.
The UpdateContainerAgent
API isn't supported for container instances using the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent, you can update the ecs-init
package. This updates the agent. For more information, see Updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.
Agent updates with the UpdateContainerAgent
API operation do not apply to Windows container instances. We recommend that you launch new container instances to update the agent version in your Windows clusters.
The UpdateContainerAgent
API requires an Amazon ECS-optimized AMI or Amazon Linux AMI with the ecs-init
service installed and running. For help updating the Amazon ECS container agent on other operating systems, see Manually updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.
Modifies the status of an Amazon ECS container instance.
Once a container instance has reached an ACTIVE
state, you can change the status of a container instance to DRAINING
to manually remove an instance from a cluster, for example to perform system updates, update the Docker daemon, or scale down the cluster size.
A container instance can't be changed to DRAINING
until it has reached an ACTIVE
status. If the instance is in any other status, an error will be received.
When you set a container instance to DRAINING
, Amazon ECS prevents new tasks from being scheduled for placement on the container instance and replacement service tasks are started on other container instances in the cluster if the resources are available. Service tasks on the container instance that are in the PENDING
state are stopped immediately.
Service tasks on the container instance that are in the RUNNING
state are stopped and replaced according to the service's deployment configuration parameters, minimumHealthyPercent
and maximumPercent
. You can change the deployment configuration of your service using UpdateService.
If minimumHealthyPercent
is below 100%, the scheduler can ignore desiredCount
temporarily during task replacement. For example, desiredCount
is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. If the minimum is 100%, the service scheduler can't remove existing tasks until the replacement tasks are considered healthy. Tasks for services that do not use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the number of running tasks during task replacement. You can use this to define the replacement batch size. For example, if desiredCount
is four tasks, a maximum of 200% starts four new tasks before stopping the four tasks to be drained, provided that the cluster resources required to do this are available. If the maximum is 100%, then replacement tasks can't start until the draining tasks have stopped.
Any PENDING
or RUNNING
tasks that do not belong to a service aren't affected. You must wait for them to finish or stop them manually.
A container instance has completed draining when it has no more RUNNING
tasks. You can verify this using ListTasks.
When a container instance has been drained, you can set a container instance to ACTIVE
status and once it has reached that status the Amazon ECS scheduler can begin scheduling tasks on the instance again.
Modifies the parameters of a service.
For services using the rolling update (ECS
) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.
For services using the blue/green (CODE_DEPLOY
) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.
You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount
parameter.
If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.
If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest
), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment
option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.
You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent
and maximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore desiredCount
temporarily during a deployment. For example, if desiredCount
is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount
is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent of docker stop
is issued to the containers running in the task. This results in a SIGTERM
and a 30-second timeout. After this, SIGKILL
is sent and the containers are forcibly stopped. If the container handles the SIGTERM
gracefully and exits within 30 seconds from receiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.
Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.
By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.
Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.
Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.
When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:
Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.
Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.
You must have a service-linked role when you update any of the following service properties:
loadBalancers
,
serviceRegistries
For more information about the role see the CreateService
request parameter role
.
Modifies the parameters of a service.
For services using the rolling update (ECS
) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations
is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations
null
, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
For services using the blue/green (CODE_DEPLOY
) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.
You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount
parameter.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.
If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest
), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment
option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.
You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent
and maximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore desiredCount
temporarily during a deployment. For example, if desiredCount
is four tasks, a minimum of 50% allows the scheduler to stop two existing tasks before starting two new tasks. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING
state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING
state and are reported as healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the number of running tasks during a deployment. You can use it to define the deployment batch size. For example, if desiredCount
is four tasks, a maximum of 200% starts four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent of docker stop
is issued to the containers running in the task. This results in a SIGTERM
and a 30-second timeout. After this, SIGKILL
is sent and the containers are forcibly stopped. If the container handles the SIGTERM
gracefully and exits within 30 seconds from receiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.
Determine which of the container instances in your cluster can support your service's task definition. For example, they have the required CPU, memory, ports, and container instance attributes.
By default, the service scheduler attempts to balance tasks across Availability Zones in this manner even though you can choose a different placement strategy.
Sort the valid container instances by the fewest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have zero, valid container instances in either zone B or C are considered optimal for placement.
Place the new service task on a valid container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the fewest number of running tasks for this service.
When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:
Sort the container instances by the largest number of running tasks for this service in the same Availability Zone as the instance. For example, if zone A has one running service task and zones B and C each have two, container instances in either zone B or C are considered optimal for termination.
Stop the task on a container instance in an optimal Availability Zone (based on the previous steps), favoring container instances with the largest number of running tasks for this service.
You must have a service-linked role when you update any of the following service properties:
loadBalancers
,
serviceRegistries
For more information about the role see the CreateService
request parameter role
.
Modifies which task set in a service is the primary task set. Any parameters that are updated on the primary task set in a service will transition to the service. This is used when a service uses the EXTERNAL
deployment controller type. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.
Updates the protection status of a task. You can set protectionEnabled
to true
to protect your task from termination during scale-in events from Service Autoscaling or deployments.
Task-protection, by default, expires after 2 hours at which point Amazon ECS clears the protectionEnabled
property making the task eligible for termination by a subsequent scale-in event.
You can specify a custom expiration period for task protection from 1 minute to up to 2,880 minutes (48 hours). To specify the custom expiration period, set the expiresInMinutes
property. The expiresInMinutes
property is always reset when you invoke this operation for a task that already has protectionEnabled
set to true
. You can keep extending the protection expiration period of a task by invoking this operation repeatedly.
To learn more about Amazon ECS task protection, see Task scale-in protection in the Amazon Elastic Container Service Developer Guide .
This operation is only supported for tasks belonging to an Amazon ECS service. Invoking this operation for a standalone task will result in an TASK_NOT_VALID
failure. For more information, see API failure reasons.
If you prefer to set task protection from within the container, we recommend using the Task scale-in protection endpoint.
Modifies a task set. This is used when a service uses the EXTERNAL
deployment controller type. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.
Details of the attachment. For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet ID, and the private IPv4 address.
" + "Attachment$details": "Details of the attachment.
For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet ID, and the private IPv4 address.
For Service Connect services, this includes portName
, clientAliases
, discoveryName
, and ingressPortOverride
.
For elastic block storage, this includes roleArn
, encrypted
, filesystemType
, iops
, kmsKeyId
, sizeInGiB
, snapshotId
, tagSpecifications
, throughput
, and volumeType
.
If this value is true
, the Docker volume is created if it doesn't already exist.
This field is only used if the scope
is shared
.
Run an init
process inside the container that forwards signals and reaps processes. This parameter maps to the --init
option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
If this value is true
, the container has read-only access to the volume. If this value is false
, then the container can write to the volume. The default value is false
.
Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on by default. This parameter maps 1:1 with the Encrypted
parameter of the CreateVolume API in the Amazon EC2 API Reference.
Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on by default. This parameter maps 1:1 with the Encrypted
parameter of the CreateVolume API in the Amazon EC2 API Reference.
Indicates whether the volume should be deleted on when the task stops. If a value of true
is specified,
Amazon ECS deletes the Amazon EBS volume on your behalf when the task goes into the STOPPED
state. If no value is specified, the
default value is true
is used. When set to false
, Amazon ECS leaves the volume in your
account.
If true
, this enables execute command functionality on all task containers.
If you do not want to override the value that was set when the service was created, you can set this to null
when performing this action.
Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For more information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.
Only tasks launched after the update will reflect the update. To update the tags on all tasks, set forceNewDeployment
to true
, so that Amazon ECS starts new tasks with the updated tags.
Indicates whether the volume should be configured at launch time. This is used to create Amazon EBS volumes for standalone tasks or tasks created as part of a service. Each task definition revision may only have one volume configured at launch in the volume configuration.
To configure a volume at launch time, use this task definition revision and specify a volumeConfigurations
object when calling the CreateService
, UpdateService
, RunTask
or StartTask
APIs.
If this value is true
, the container has read-only access to the volume. If this value is false
, then the container can write to the volume. The default value is false
.
The port number on the container instance to reserve for your container.
If you specify a containerPortRange
, leave this field empty and the value of the hostPort
is set as follows:
For containers in a task with the awsvpc
network mode, the hostPort
is set to the same value as the containerPort
. This is a static mapping strategy.
For containers in a task with the bridge
network mode, the Amazon ECS agent finds open ports on the host and automatically binds them to the container ports. This is a dynamic mapping strategy.
If you use containers in a task with the awsvpc
or host
network mode, the hostPort
can either be left blank or set to the same value as the containerPort
.
If you use containers in a task with the bridge
network mode, you can specify a non-reserved host port for your container port mapping, or you can omit the hostPort
(or set it to 0
) while specifying a containerPort
and your container automatically receives a port in the ephemeral port range for your container instance operating system and Docker version.
The default ephemeral port range for Docker version 1.6.0 and later is listed on the instance under /proc/sys/net/ipv4/ip_local_port_range
. If this kernel parameter is unavailable, the default ephemeral port range from 49153 through 65535 (Linux) or 49152 through 65535 (Windows) is used. Do not attempt to specify a host port in the ephemeral port range as these are reserved for automatic assignment. In general, ports below 32768 are outside of the ephemeral port range.
The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in a running task is also reserved while the task is running. That is, after a task stops, the host port is released. The current reserved ports are displayed in the remainingResources
of DescribeContainerInstances output. A container instance can have up to 100 reserved ports at a time. This number includes the default reserved ports. Automatically assigned ports aren't included in the 100 reserved ports quota.
The number of instantiations of the specified task to place on your cluster. You can specify up to 10 tasks for each call.
", "Service$healthCheckGracePeriodSeconds": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started.
", + "ServiceManagedEBSVolumeConfiguration$sizeInGiB": "The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify a snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a volume size greater than or equal to the snapshot size. This parameter maps 1:1 with the Size
parameter of the CreateVolume API in the Amazon EC2 API Reference.
The following are the supported volume size values for each volume type.
gp2
and gp3
: 1-16,384
io1
and io2
: 4-16,384
st1
and sc1
: 125-16,384
standard
: 1-1,024
The number of I/O operations per second (IOPS). For gp3
, io1
, and io2
volumes, this represents the number of IOPS that are provisioned for the volume. For gp2
volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.
The following are the supported values for each volume type.
gp3
: 3,000 - 16,000 IOPS
io1
: 100 - 64,000 IOPS
io2
: 100 - 256,000 IOPS
This parameter is required for io1
and io2
volume types. The default for gp3
volumes is 3,000 IOPS
. This parameter is not supported for st1
, sc1
, or standard
volume types.
This parameter maps 1:1 with the Iops
parameter of the CreateVolume API in the Amazon EC2 API Reference.
The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. This parameter maps 1:1 with the Throughput
parameter of the CreateVolume API in the Amazon EC2 API Reference.
This parameter is only supported for the gp3
volume type.
The port value used if your service discovery service specified an SRV record. This field might be used if both the awsvpc
network mode and SRV records are used.
The port value to be used for your service discovery service. It's already specified in the task definition. If the task definition your service task specifies uses the bridge
or host
network mode, you must specify a containerName
and containerPort
combination from the task definition. If the task definition your service task specifies uses the awsvpc
network mode and a type SRV DNS record is used, you must specify either a containerName
and containerPort
combination or a port
value. However, you can't specify both.
The exit code that's returned for the state change request.
", + "TaskManagedEBSVolumeConfiguration$sizeInGiB": "The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify a snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a volume size greater than or equal to the snapshot size. This parameter maps 1:1 with the Size
parameter of the CreateVolume API in the Amazon EC2 API Reference.
The following are the supported volume size values for each volume type.
gp2
and gp3
: 1-16,384
io1
and io2
: 4-16,384
st1
and sc1
: 125-16,384
standard
: 1-1,024
The number of I/O operations per second (IOPS). For gp3
, io1
, and io2
volumes, this represents the number of IOPS that are provisioned for the volume. For gp2
volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.
The following are the supported values for each volume type.
gp3
: 3,000 - 16,000 IOPS
io1
: 100 - 64,000 IOPS
io2
: 100 - 256,000 IOPS
This parameter is required for io1
and io2
volume types. The default for gp3
volumes is 3,000 IOPS
. This parameter is not supported for st1
, sc1
, or standard
volume types.
This parameter maps 1:1 with the Iops
parameter of the CreateVolume API in the Amazon EC2 API Reference.
The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. This parameter maps 1:1 with the Throughput
parameter of the CreateVolume API in the Amazon EC2 API Reference.
This parameter is only supported for the gp3
volume type.
The number of instantiations of the task to place and keep running in your service.
", "UpdateServiceRequest$healthCheckGracePeriodSeconds": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid if your service is configured to use a load balancer. If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service scheduler ignores the Elastic Load Balancing health check status. This grace period can prevent the ECS service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.
", "UpdateTaskProtectionRequest$expiresInMinutes": "If you set protectionEnabled
to true
, you can specify the duration for task protection in minutes. You can specify a value from 1 minute to up to 2,880 minutes (48 hours). During this time, your task will not be terminated by scale-in events from Service Auto Scaling or deployments. After this time period lapses, protectionEnabled
will be reset to false
.
If you don’t specify the time, then the task is automatically protected for 120 minutes (2 hours).
" @@ -878,6 +888,53 @@ "Scale$value": "The value, specified as a percent total of a service's desiredCount
, to scale the task set. Accepted values are numbers between 0 and 100.
The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the KmsKeyId
parameter of the CreateVolume API in the Amazon EC2 API Reference.
Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.
The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the KmsKeyId
parameter of the CreateVolume API in the Amazon EC2 API Reference.
Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.
The type of volume resource.
" + } + }, + "EBSSnapshotId": { + "base": null, + "refs": { + "ServiceManagedEBSVolumeConfiguration$snapshotId": "The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume size. This parameter maps 1:1 with the SnapshotId
parameter of the CreateVolume API in the Amazon EC2 API Reference.
The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume size. This parameter maps 1:1 with the SnapshotId
parameter of the CreateVolume API in the Amazon EC2 API Reference.
The tag specifications of an Amazon EBS volume.
", + "refs": { + "EBSTagSpecifications$member": null + } + }, + "EBSTagSpecifications": { + "base": null, + "refs": { + "ServiceManagedEBSVolumeConfiguration$tagSpecifications": "The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This parameter maps 1:1 with the TagSpecifications.N
parameter of the CreateVolume API in the Amazon EC2 API Reference.
The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This parameter maps 1:1 with the TagSpecifications.N
parameter of the CreateVolume API in the Amazon EC2 API Reference.
The volume type. This parameter maps 1:1 with the VolumeType
parameter of the CreateVolume API in the Amazon EC2 API Reference. For more information, see Amazon EBS volume types in the Amazon EC2 User Guide.
The following are the supported volume types.
General Purpose SSD: gp2
|gp3
Provisioned IOPS SSD: io1
|io2
Throughput Optimized HDD: st1
Cold HDD: sc1
Magnetic: standard
The magnetic volume type is not supported on Fargate.
The volume type. This parameter maps 1:1 with the VolumeType
parameter of the CreateVolume API in the Amazon EC2 API Reference. For more information, see Amazon EBS volume types in the Amazon EC2 User Guide.
The following are the supported volume types.
General Purpose SSD: gp2
|gp3
Provisioned IOPS SSD: io1
|io2
Throughput Optimized HDD: st1
Cold HDD: sc1
Magnetic: standard
The magnetic volume type is not supported on Fargate.
The name of the volume. This value must match the volume name from the Volume
object in the task definition.
The name of the volume. This value must match the volume name from the Volume
object in the task definition.
The authorization configuration details for the Amazon EFS file system.
", "refs": { @@ -1065,6 +1122,13 @@ "Volume$host": "This parameter is specified when you use bind mount host volumes. The contents of the host
parameter determine whether your bind mount host volume persists on the host container instance and where it's stored. If the host
parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers that are associated with it stop running.
Windows containers can mount whole directories on the same drive as $env:ProgramData
. Windows containers can't mount directories on a different drive, and mount point can't be across drives. For example, you can mount C:\\my\\path:C:\\my\\path
and D:\\:D:\\
, but not D:\\my\\path:C:\\my\\path
or D:\\:C:\\my\\path
.
The ARN of the IAM role to associate with this volume. This is the Amazon ECS infrastructure IAM role that is used to manage your Amazon Web Services infrastructure. We recommend using the Amazon ECS-managed AmazonECSInfrastructureRolePolicyForVolumes
IAM policy with this role. For more information, see Amazon ECS infrastructure IAM role in the Amazon ECS Developer Guide.
The ARN of the IAM role to associate with this volume. This is the Amazon ECS infrastructure IAM role that is used to manage your Amazon Web Services infrastructure. We recommend using the Amazon ECS-managed AmazonECSInfrastructureRolePolicyForVolumes
IAM policy with this role. For more information, see Amazon ECS infrastructure IAM role in the Amazon ECS Developer Guide.
Details on an Elastic Inference accelerator. For more information, see Working with Amazon Elastic Inference on Amazon ECS in the Amazon Elastic Container Service Developer Guide.
", "refs": { @@ -1374,8 +1438,8 @@ "ManagedDraining": { "base": null, "refs": { - "AutoScalingGroupProvider$managedDraining": "The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider.
The default is ENABLED
.
The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider.
The default is ENABLED
.
The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider.
", + "AutoScalingGroupProviderUpdate$managedDraining": "The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider.
" } }, "ManagedScaling": { @@ -1596,6 +1660,7 @@ "base": null, "refs": { "CreateServiceRequest$propagateTags": "Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.
The default is NONE
.
Determines whether to propagate the tags from the task definition to
the Amazon EBS volume. Tags can only propagate to a SERVICE
specified in
ServiceVolumeConfiguration
. If no value is specified, the tags aren't
propagated.
Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action.
An error will be received if you specify the SERVICE
option when running a task.
Determines whether to propagate the tags from the task definition or the service to the task. If no value is specified, the tags aren't propagated.
", "StartTaskRequest$propagateTags": "Specifies whether to propagate the tags from the task definition or the service to the task. If no value is specified, the tags aren't propagated.
", @@ -1901,6 +1966,12 @@ "DescribeServicesRequest$include": "Determines whether you want to see the resource tags for the service. If TAGS
is specified, the tags are included in the response. If this field is omitted, tags aren't included in the response.
The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task in the service.
Many of these parameters map 1:1 with the Amazon EBS CreateVolume
API request parameters.
The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task in the service. The Amazon EBS volumes are visible in your account in the Amazon EC2 console once they are created.
" + } + }, "ServiceNotActiveException": { "base": "The specified service isn't active. You can't update a service that's inactive. If you have previously deleted a service, you can re-create it with CreateService.
", "refs": { @@ -1927,6 +1998,20 @@ "ServiceRegistries$member": null } }, + "ServiceVolumeConfiguration": { + "base": "The configuration for a volume specified in the task definition as a volume that is configured at launch time. Currently, the only supported volume type is an Amazon EBS volume.
", + "refs": { + "ServiceVolumeConfigurations$member": null + } + }, + "ServiceVolumeConfigurations": { + "base": null, + "refs": { + "CreateServiceRequest$volumeConfigurations": "The configuration for a volume specified in the task definition as a volume that is configured at launch time. Currently, the only supported volume type is an Amazon EBS volume.
", + "Deployment$volumeConfigurations": "The details of the volume that was configuredAtLaunch
. You can configure different settings like the size, throughput, volumeType, and ecryption in ServiceManagedEBSVolumeConfiguration. The name
of the volume must match the name
from the task definition.
The details of the volume that was configuredAtLaunch
. You can configure the size, volumeType, IOPS, throughput, snapshot and encryption in ServiceManagedEBSVolumeConfiguration. The name
of the volume must match the name
from the task definition. If set to null, no new deployment is triggered. Otherwise, if this configuration differs from the existing one, it triggers a new deployment.
The unique identifier for the attachment.
", - "Attachment$type": "The type of the attachment, such as ElasticNetworkInterface
.
The type of the attachment, such as ElasticNetworkInterface
, Service Connect
, and AmazonElasticBlockStorage
.
The status of the attachment. Valid values are PRECREATED
, CREATED
, ATTACHING
, ATTACHED
, DETACHING
, DETACHED
, DELETED
, and FAILED
.
The Amazon Resource Name (ARN) of the attachment.
", "AttachmentStateChange$status": "The status of the attachment.
", @@ -2345,7 +2430,7 @@ "VersionInfo$agentVersion": "The version number of the Amazon ECS container agent.
", "VersionInfo$agentHash": "The Git commit hash for the Amazon ECS container agent build on the amazon-ecs-agent GitHub repository.
", "VersionInfo$dockerVersion": "The Docker version that's running on the container instance.
", - "Volume$name": "The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This name is referenced in the sourceVolume
parameter of container definition mountPoints
.
This is required wwhen you use an Amazon EFS volume.
", + "Volume$name": "The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.
When using a volume configured at launch, the name
is required and must also be specified as the volume name in the ServiceVolumeConfiguration
or TaskVolumeConfiguration
parameter when creating your service or standalone task.
For all other types of volumes, this name is referenced in the sourceVolume
parameter of the mountPoints
object in the container definition.
When a volume is using the efsVolumeConfiguration
, the name is required.
The name of another container within the same task definition to mount volumes from.
" } }, @@ -2486,6 +2571,7 @@ "CreateServiceRequest$tags": "The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.
The following basic restrictions apply to tags:
Maximum number of tags per resource - 50
For each resource, each tag key must be unique, and each tag key can have only one value.
Maximum key length - 128 Unicode characters in UTF-8
Maximum value length - 256 Unicode characters in UTF-8
If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.
Tag keys and values are case-sensitive.
Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.
The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value. You define both. When a service is deleted, the tags are deleted.
The following basic restrictions apply to tags:
Maximum number of tags per resource - 50
For each resource, each tag key must be unique, and each tag key can have only one value.
Maximum key length - 128 Unicode characters in UTF-8
Maximum value length - 256 Unicode characters in UTF-8
If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.
Tag keys and values are case-sensitive.
Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.
The metadata that's applied to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.
The following basic restrictions apply to tags:
Maximum number of tags per resource - 50
For each resource, each tag key must be unique, and each tag key can have only one value.
Maximum key length - 128 Unicode characters in UTF-8
Maximum value length - 256 Unicode characters in UTF-8
If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.
Tag keys and values are case-sensitive.
Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.
The tags applied to this Amazon EBS volume. AmazonECSCreated
and AmazonECSManaged
are reserved tags that can't be used.
The tags for the resource.
", "RegisterContainerInstanceRequest$tags": "The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.
The following basic restrictions apply to tags:
Maximum number of tags per resource - 50
For each resource, each tag key must be unique, and each tag key can have only one value.
Maximum key length - 128 Unicode characters in UTF-8
Maximum value length - 256 Unicode characters in UTF-8
If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.
Tag keys and values are case-sensitive.
Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.
The metadata that you apply to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value. You define both of them.
The following basic restrictions apply to tags:
Maximum number of tags per resource - 50
For each resource, each tag key must be unique, and each tag key can have only one value.
Maximum key length - 128 Unicode characters in UTF-8
Maximum value length - 256 Unicode characters in UTF-8
If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.
Tag keys and values are case-sensitive.
Do not use aws:
, AWS:
, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.
Specifies whether you want to see the resource tags for the task. If TAGS
is specified, the tags are included in the response. If this field is omitted, tags aren't included in the response.
The Linux filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.
The available filesystem types are
ext3
, ext4
, and xfs
. If no value is specified, the xfs
filesystem type is used by default.
The Linux filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.
The available filesystem types are
ext3
, ext4
, and xfs
. If no value is specified, the xfs
filesystem type is used by default.
The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task.
", + "refs": { + "TaskVolumeConfiguration$managedEBSVolume": "The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task. The Amazon EBS volumes are visible in your account in the Amazon EC2 console once they are created.
" + } + }, + "TaskManagedEBSVolumeTerminationPolicy": { + "base": "The termination policy for the Amazon EBS volume when the task exits. For more information, see Amazon ECS volume termination policy.
", + "refs": { + "TaskManagedEBSVolumeConfiguration$terminationPolicy": "The termination policy for the volume when the task exits. This provides a way to control whether Amazon ECS terminates the Amazon EBS volume when the task stops.
" + } + }, "TaskOverride": { "base": "The overrides that are associated with a task.
", "refs": { @@ -2641,6 +2746,19 @@ "Task$stopCode": "The stop code indicating why a task was stopped. The stoppedReason
might contain additional details.
For more information about stop code, see Stopped tasks error codes in the Amazon ECS User Guide.
The following are valid values:
TaskFailedToStart
EssentialContainerExited
UserInitiated
TerminationNotice
ServiceSchedulerInitiated
SpotInterruption
Configuration settings for the task volume that was configuredAtLaunch
that weren't set during RegisterTaskDef
.
The details of the volume that was configuredAtLaunch
. You can configure the size, volumeType, IOPS, throughput, snapshot and encryption in in TaskManagedEBSVolumeConfiguration. The name
of the volume must match the name
from the task definition.
The details of the volume that was configuredAtLaunch
. You can configure the size, volumeType, IOPS, throughput, snapshot and encryption in TaskManagedEBSVolumeConfiguration. The name
of the volume must match the name
from the task definition.
A data volume that's used in a task definition. For tasks that use the Amazon Elastic File System (Amazon EFS), specify an efsVolumeConfiguration
. For Windows tasks that use Amazon FSx for Windows File Server file system, specify a fsxWindowsFileServerVolumeConfiguration
. For tasks that use a Docker volume, specify a DockerVolumeConfiguration
. For tasks that use a bind mount host volume, specify a host
and optional sourcePath
. For more information, see Using Data Volumes in Tasks.
The data volume configuration for tasks launched using this task definition. Specifying a volume configuration in a task definition is optional. The volume configuration may contain multiple volumes but only one volume configured at launch is supported. Each volume defined in the volume configuration may only specify a name
and one of either configuredAtLaunch
, dockerVolumeConfiguration
, efsVolumeConfiguration
, fsxWindowsFileServerVolumeConfiguration
, or host
. If an empty volume configuration is specified, by default Amazon ECS uses a host volume. For more information, see Using data volumes in tasks.
The state of the API destination that was updated.
" } }, + "AppSyncParameters": { + "base": "Contains the GraphQL operation to be parsed and executed, if the event target is an AppSync API.
", + "refs": { + "Target$AppSyncParameters": "Contains the GraphQL operation to be parsed and executed, if the event target is an AppSync API.
" + } + }, "Archive": { "base": "An Archive
object that contains details about an archive.
The failover configuration for an endpoint. This includes what triggers failover and what happens when it's triggered.
" } }, + "GraphQLOperation": { + "base": null, + "refs": { + "AppSyncParameters$GraphQLOperation": "The GraphQL operation; that is, the query, mutation, or subscription to be parsed and executed by the GraphQL service.
For more information, see Operations in the AppSync User Guide.
" + } + }, "HeaderKey": { "base": null, "refs": { @@ -1898,8 +1910,8 @@ "base": null, "refs": { "DescribeRuleResponse$State": "Specifies whether the rule is enabled or disabled.
", - "PutRuleRequest$State": "The state of the rule.
Valid values include:
DISABLED
: The rule is disabled. EventBridge does not match any events against the rule.
ENABLED
: The rule is enabled. EventBridge matches events against the rule, except for Amazon Web Services management events delivered through CloudTrail.
ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS
: The rule is enabled for all events, including Amazon Web Services management events delivered through CloudTrail.
Management events provide visibility into management operations that are performed on resources in your Amazon Web Services account. These are also known as control plane operations. For more information, see Logging management events in the CloudTrail User Guide, and Filtering management events from Amazon Web Services services in the Amazon EventBridge User Guide.
This value is only valid for rules on the default event bus or custom event buses. It does not apply to partner event buses.
The state of the rule.
Valid values include:
DISABLED
: The rule is disabled. EventBridge does not match any events against the rule.
ENABLED
: The rule is enabled. EventBridge matches events against the rule, except for Amazon Web Services management events delivered through CloudTrail.
ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS
: The rule is enabled for all events, including Amazon Web Services management events delivered through CloudTrail.
Management events provide visibility into management operations that are performed on resources in your Amazon Web Services account. These are also known as control plane operations. For more information, see Logging management events in the CloudTrail User Guide, and Filtering management events from Amazon Web Services services in the Amazon EventBridge User Guide.
This value is only valid for rules on the default event bus or custom event buses. It does not apply to partner event buses.
Indicates whether the rule is enabled or disabled.
", + "Rule$State": "The state of the rule.
" } }, "RunCommandParameters": { diff --git a/models/apis/iot/2015-05-28/api-2.json b/models/apis/iot/2015-05-28/api-2.json index 70ce2ad9020..26cf45d713a 100644 --- a/models/apis/iot/2015-05-28/api-2.json +++ b/models/apis/iot/2015-05-28/api-2.json @@ -3912,6 +3912,7 @@ "output":{"shape":"UpdatePackageResponse"}, "errors":[ {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} @@ -3929,6 +3930,7 @@ "output":{"shape":"UpdatePackageConfigurationResponse"}, "errors":[ {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], @@ -3945,6 +3947,7 @@ "output":{"shape":"UpdatePackageVersionResponse"}, "errors":[ {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} diff --git a/models/apis/iot/2015-05-28/docs-2.json b/models/apis/iot/2015-05-28/docs-2.json index b0445408853..90f403ec866 100644 --- a/models/apis/iot/2015-05-28/docs-2.json +++ b/models/apis/iot/2015-05-28/docs-2.json @@ -3075,10 +3075,10 @@ "DestinationPackageVersions": { "base": null, "refs": { - "CreateJobRequest$destinationPackageVersions": "The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
", - "CreateJobTemplateRequest$destinationPackageVersions": "The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
", - "DescribeJobTemplateResponse$destinationPackageVersions": "The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
", - "Job$destinationPackageVersions": "The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
" + "CreateJobRequest$destinationPackageVersions": "The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
", + "CreateJobTemplateRequest$destinationPackageVersions": "The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
", + "DescribeJobTemplateResponse$destinationPackageVersions": "The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
", + "Job$destinationPackageVersions": "The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.
Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.
" } }, "DetachPolicyRequest": { diff --git a/models/apis/iotfleetwise/2021-06-17/api-2.json b/models/apis/iotfleetwise/2021-06-17/api-2.json index a355ccc9df7..3cd00c74c5b 100644 --- a/models/apis/iotfleetwise/2021-06-17/api-2.json +++ b/models/apis/iotfleetwise/2021-06-17/api-2.json @@ -2246,8 +2246,7 @@ "CAN_NETWORK_INTERFACE_INFO_IS_NULL", "OBD_NETWORK_INTERFACE_INFO_IS_NULL", "NETWORK_INTERFACE_TO_REMOVE_ASSOCIATED_WITH_SIGNALS", - "VEHICLE_MIDDLEWARE_NETWORK_INTERFACE_INFO_IS_NULL", - "CUSTOMER_DECODED_SIGNAL_NETWORK_INTERFACE_INFO_IS_NULL" + "VEHICLE_MIDDLEWARE_NETWORK_INTERFACE_INFO_IS_NULL" ] }, "NetworkInterfaceType":{ @@ -2255,8 +2254,7 @@ "enum":[ "CAN_INTERFACE", "OBD_INTERFACE", - "VEHICLE_MIDDLEWARE", - "CUSTOMER_DECODED_INTERFACE" + "VEHICLE_MIDDLEWARE" ] }, "NetworkInterfaces":{ @@ -2637,8 +2635,7 @@ "STRUCT_SIZE_MISMATCH", "NO_SIGNAL_IN_CATALOG_FOR_DECODER_SIGNAL", "SIGNAL_DECODER_INCOMPATIBLE_WITH_SIGNAL_CATALOG", - "EMPTY_MESSAGE_SIGNAL", - "CUSTOMER_DECODED_SIGNAL_INFO_IS_NULL" + "EMPTY_MESSAGE_SIGNAL" ] }, "SignalDecoderType":{ @@ -2646,8 +2643,7 @@ "enum":[ "CAN_SIGNAL", "OBD_SIGNAL", - "MESSAGE_SIGNAL", - "CUSTOMER_DECODED_SIGNAL" + "MESSAGE_SIGNAL" ] }, "SignalDecoders":{ diff --git a/models/apis/secretsmanager/2017-10-17/docs-2.json b/models/apis/secretsmanager/2017-10-17/docs-2.json index a0d98654e79..8cdb1cfec8d 100644 --- a/models/apis/secretsmanager/2017-10-17/docs-2.json +++ b/models/apis/secretsmanager/2017-10-17/docs-2.json @@ -8,7 +8,7 @@ "DeleteResourcePolicy": "Deletes the resource-based permission policy attached to the secret. To attach a policy to a secret, use PutResourcePolicy.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:DeleteResourcePolicy
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Deletes a secret and all of its versions. You can specify a recovery window during which you can restore the secret. The minimum recovery window is 7 days. The default recovery window is 30 days. Secrets Manager attaches a DeletionDate
stamp to the secret that specifies the end of the recovery window. At the end of the recovery window, Secrets Manager deletes the secret permanently.
You can't delete a primary secret that is replicated to other Regions. You must first delete the replicas using RemoveRegionsFromReplication, and then delete the primary secret. When you delete a replica, it is deleted immediately.
You can't directly delete a version of a secret. Instead, you remove all staging labels from the version using UpdateSecretVersionStage. This marks the version as deprecated, and then Secrets Manager can automatically delete the version in the background.
To determine whether an application still uses a secret, you can create an Amazon CloudWatch alarm to alert you to any attempts to access a secret during the recovery window. For more information, see Monitor secrets scheduled for deletion.
Secrets Manager performs the permanent secret deletion at the end of the waiting period as a background task with low priority. There is no guarantee of a specific time after the recovery window for the permanent delete to occur.
At any time before recovery window ends, you can use RestoreSecret to remove the DeletionDate
and cancel the deletion of the secret.
When a secret is scheduled for deletion, you cannot retrieve the secret value. You must first cancel the deletion with RestoreSecret and then you can retrieve the secret.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:DeleteSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Retrieves the details of a secret. It does not include the encrypted secret value. Secrets Manager only returns fields that have a value in the response.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:DescribeSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Generates a random password. We recommend that you specify the maximum length and include every character type that the system you are generating a password for can support.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:GetRandomPassword
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Generates a random password. We recommend that you specify the maximum length and include every character type that the system you are generating a password for can support. By default, Secrets Manager uses uppercase and lowercase letters, numbers, and the following characters in passwords: !\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:GetRandomPassword
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Retrieves the JSON text of the resource-based policy document attached to the secret. For more information about permissions policies attached to a secret, see Permissions policies attached to a secret.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:GetResourcePolicy
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Retrieves the contents of the encrypted fields SecretString
or SecretBinary
from the specified version of a secret, whichever contains content.
To retrieve the values for a group of secrets, call BatchGetSecretValue.
We recommend that you cache your secret values by using client-side caching. Caching secrets improves speed and reduces your costs. For more information, see Cache secrets for your applications.
To retrieve the previous version of a secret, use VersionStage
and specify AWSPREVIOUS. To revert to the previous version of a secret, call UpdateSecretVersionStage.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:GetSecretValue
. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key aws/secretsmanager
, then you also need kms:Decrypt
permissions for that key. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Lists the versions of a secret. Secrets Manager uses staging labels to indicate the different versions of a secret. For more information, see Secrets Manager concepts: Versions.
To list the secrets in the account, use ListSecrets.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:ListSecretVersionIds
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Attaches a resource-based permission policy to a secret. A resource-based policy is optional. For more information, see Authentication and access control for Secrets Manager
For information about attaching a policy in the console, see Attach a permissions policy to a secret.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:PutResourcePolicy
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString
value or a new SecretBinary
value.
We recommend you avoid calling PutSecretValue
at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue
more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.
You can specify the staging labels to attach to the new version in VersionStages
. If you don't include VersionStages
, then Secrets Manager automatically moves the staging label AWSCURRENT
to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT
to it. If this operation moves the staging label AWSCURRENT
from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS
to the version that AWSCURRENT
was removed from.
This operation is idempotent. If you call this operation with a ClientRequestToken
that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary
or SecretString
because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:PutSecretValue
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
For a secret that is replicated to other Regions, deletes the secret replicas from the Regions you specify.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:RemoveRegionsFromReplication
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Replicates the secret to a new Regions. See Multi-Region secrets.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:ReplicateSecretToRegions
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Replicates the secret to a new Regions. See Multi-Region secrets.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:ReplicateSecretToRegions
. If the primary secret is encrypted with a KMS key other than aws/secretsmanager
, you also need kms:Decrypt
permission to the key. To encrypt the replicated secret with a KMS key other than aws/secretsmanager
, you need kms:GenerateDataKey
and kms:Encrypt
to the key. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Cancels the scheduled deletion of a secret by removing the DeletedDate
time stamp. You can access a secret again after it has been restored.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:RestoreSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
Configures and starts the asynchronous process of rotating the secret. For information about rotation, see Rotate secrets in the Secrets Manager User Guide. If you include the configuration parameters, the operation sets the values for the secret and then immediately starts a rotation. If you don't include the configuration parameters, the operation starts a rotation with the values already stored in the secret.
When rotation is successful, the AWSPENDING
staging label might be attached to the same version as the AWSCURRENT
version, or it might not be attached to any version. If the AWSPENDING
staging label is present but not attached to the same version as AWSCURRENT
, then any later invocation of RotateSecret
assumes that a previous rotation request is still in progress and returns an error. When rotation is unsuccessful, the AWSPENDING
staging label might be attached to an empty secret version. For more information, see Troubleshoot rotation in the Secrets Manager User Guide.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:RotateSecret
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. You also need lambda:InvokeFunction
permissions on the rotation function. For more information, see Permissions for rotation.
Removes the link between the replica secret and the primary secret and promotes the replica to a primary secret in the replica Region.
You must call this operation from the Region in which you want to promote the replica to a primary secret.
Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.
Required permissions: secretsmanager:StopReplicationToReplica
. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
The last date and time that Secrets Manager rotated the secret. If the secret isn't configured for rotation, Secrets Manager returns null.
", + "DescribeSecretResponse$LastRotatedDate": "The last date and time that Secrets Manager rotated the secret. If the secret isn't configured for rotation or rotation has been disabled, Secrets Manager returns null.
", "SecretListEntry$LastRotatedDate": "The most recent date and time that the Secrets Manager rotation process was successfully completed. This value is null if the secret hasn't ever rotated.
" } }, @@ -419,8 +419,8 @@ "NextRotationDateType": { "base": null, "refs": { - "DescribeSecretResponse$NextRotationDate": "The next rotation is scheduled to occur on or before this date. If the secret isn't configured for rotation, Secrets Manager returns null.
", - "SecretListEntry$NextRotationDate": "The next rotation is scheduled to occur on or before this date. If the secret isn't configured for rotation, Secrets Manager returns null.
" + "DescribeSecretResponse$NextRotationDate": "The next rotation is scheduled to occur on or before this date. If the secret isn't configured for rotation or rotation has been disabled, Secrets Manager returns null.
", + "SecretListEntry$NextRotationDate": "The next rotation is scheduled to occur on or before this date. If the secret isn't configured for rotation or rotation has been disabled, Secrets Manager returns null.
" } }, "NextTokenType": { @@ -647,7 +647,7 @@ "base": null, "refs": { "CreateSecretRequest$SecretBinary": "The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter.
Either SecretString
or SecretBinary
must have a value, but not both.
This parameter is not available in the Secrets Manager console.
", - "GetSecretValueResponse$SecretBinary": "The decrypted secret value, if the secret value was originally provided as binary data in the form of a byte array. The response parameter represents the binary data as a base64-encoded string.
If the secret was created by using the Secrets Manager console, or if the secret value was originally provided as a string, then this field is omitted. The secret value appears in SecretString
instead.
The decrypted secret value, if the secret value was originally provided as binary data in the form of a byte array. When you retrieve a SecretBinary
using the HTTP API, the Python SDK, or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not encoded.
If the secret was created by using the Secrets Manager console, or if the secret value was originally provided as a string, then this field is omitted. The secret value appears in SecretString
instead.
The binary data to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then pass the contents of the file as a parameter.
You must include SecretBinary
or SecretString
, but not both.
You can't access this value from the Secrets Manager console.
", "SecretValueEntry$SecretBinary": "The decrypted secret value, if the secret value was originally provided as binary data in the form of a byte array. The parameter represents the binary data as a base64-encoded string.
", "UpdateSecretRequest$SecretBinary": "The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter.
Either SecretBinary
or SecretString
must have a value, but not both.
You can't access this parameter in the Secrets Manager console.
" diff --git a/models/apis/secretsmanager/2017-10-17/examples-1.json b/models/apis/secretsmanager/2017-10-17/examples-1.json index d095b2df9c7..c99f65abe89 100644 --- a/models/apis/secretsmanager/2017-10-17/examples-1.json +++ b/models/apis/secretsmanager/2017-10-17/examples-1.json @@ -1,6 +1,63 @@ { "version": "1.0", "examples": { + "BatchGetSecretValue": [ + { + "input": { + "SecretIdList": [ + "MySecret1", + "MySecret2", + "MySecret3" + ] + }, + "output": { + "Errors": [ + + ], + "SecretValues": [ + { + "ARN": "®ion-arn;&asm-service-name;:us-west-2:&ExampleAccountId;:secret:MySecret1-a1b2c3", + "CreatedDate": 1700591229.801, + "Name": "MySecret1", + "SecretString": "{\"username\":\"diego_ramirez\",\"password\":\"EXAMPLE-PASSWORD\",\"engine\":\"mysql\",\"host\":\"secretsmanagertutorial.cluster.us-west-2.rds.amazonaws.com\",\"port\":3306,\"dbClusterIdentifier\":\"secretsmanagertutorial\"}", + "VersionId": "a1b2c3d4-5678-90ab-cdef-EXAMPLEaaaaa", + "VersionStages": [ + "AWSCURRENT" + ] + }, + { + "ARN": "®ion-arn;&asm-service-name;:us-west-2:&ExampleAccountId;:secret:MySecret2-a1b2c3", + "CreatedDate": 1699911394.105, + "Name": "MySecret2", + "SecretString": "{\"username\":\"akua_mansa\",\"password\":\"EXAMPLE-PASSWORD\"", + "VersionId": "a1b2c3d4-5678-90ab-cdef-EXAMPLEbbbbb", + "VersionStages": [ + "AWSCURRENT" + ] + }, + { + "ARN": "®ion-arn;&asm-service-name;:us-west-2:&ExampleAccountId;:secret:MySecret3-a1b2c3", + "CreatedDate": 1699911394.105, + "Name": "MySecret3", + "SecretString": "{\"username\":\"jie_liu\",\"password\":\"EXAMPLE-PASSWORD\"", + "VersionId": "a1b2c3d4-5678-90ab-cdef-EXAMPLEccccc", + "VersionStages": [ + "AWSCURRENT" + ] + } + ] + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "The following example gets the values for three secrets.", + "id": "to-retrieve-the-secret-values-for-a-group-of-secrets-listed-by-name-1704846593341", + "title": "To retrieve the secret values for a group of secrets listed by name" + } + ], "CancelRotateSecret": [ { "input": { diff --git a/models/apis/workspaces/2015-04-08/docs-2.json b/models/apis/workspaces/2015-04-08/docs-2.json index 67608b77588..8a90364a9a0 100644 --- a/models/apis/workspaces/2015-04-08/docs-2.json +++ b/models/apis/workspaces/2015-04-08/docs-2.json @@ -15,7 +15,7 @@ "CreateUpdatedWorkspaceImage": "Creates a new updated WorkSpace image based on the specified source image. The new updated WorkSpace image has the latest drivers and other updates required by the Amazon WorkSpaces components.
To determine which WorkSpace images need to be updated with the latest Amazon WorkSpaces requirements, use DescribeWorkspaceImages.
Only Windows 10, Windows Server 2016, and Windows Server 2019 WorkSpace images can be programmatically updated at this time.
Microsoft Windows updates and other application updates are not included in the update process.
The source WorkSpace image is not deleted. You can delete the source image after you've verified your new updated image and created a new bundle.
Creates the specified WorkSpace bundle. For more information about creating WorkSpace bundles, see Create a Custom WorkSpaces Image and Bundle.
", "CreateWorkspaceImage": "Creates a new WorkSpace image from an existing WorkSpace.
", - "CreateWorkspaces": "Creates one or more WorkSpaces.
This operation is asynchronous and returns before the WorkSpaces are created.
The MANUAL
running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.
You don't need to specify the PCOIP
protocol for Linux bundles because WSP
is the default protocol for those bundles.
Ensure you review your running mode to ensure you are using a running mode that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing?
Creates one or more WorkSpaces.
This operation is asynchronous and returns before the WorkSpaces are created.
The MANUAL
running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.
You don't need to specify the PCOIP
protocol for Linux bundles because WSP
is the default protocol for those bundles.
Deletes customized client branding. Client branding allows you to customize your WorkSpace's client login portal. You can tailor your login portal company logo, the support email address, support link, link to reset password, and a custom message for users trying to sign in.
After you delete your customized client branding, your login portal reverts to the default client branding.
", "DeleteConnectClientAddIn": "Deletes a client-add-in for Amazon Connect that is configured within a directory.
", "DeleteConnectionAlias": "Deletes the specified connection alias. For more information, see Cross-Region Redirection for Amazon WorkSpaces.
If you will no longer be using a fully qualified domain name (FQDN) as the registration code for your WorkSpaces users, you must take certain precautions to prevent potential security issues. For more information, see Security Considerations if You Stop Using Cross-Region Redirection.
To delete a connection alias that has been shared, the shared account must first disassociate the connection alias from any directories it has been associated with. Then you must unshare the connection alias from the account it has been shared with. You can delete a connection alias only after it is no longer shared with any accounts or associated with any directories.
Modify the default properties used to create WorkSpaces.
", "ModifyWorkspaceProperties": "Modifies the specified WorkSpace properties. For important information about how to modify the size of the root and user volumes, see Modify a WorkSpace.
The MANUAL
running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.
Sets the state of the specified WorkSpace.
To maintain a WorkSpace without being interrupted, set the WorkSpace state to ADMIN_MAINTENANCE
. WorkSpaces in this state do not respond to requests to reboot, stop, start, rebuild, or restore. An AutoStop WorkSpace in this state is not stopped. Users cannot log into a WorkSpace in the ADMIN_MAINTENANCE
state.
Reboots the specified WorkSpaces.
You cannot reboot a WorkSpace unless its state is AVAILABLE
or UNHEALTHY
.
This operation is asynchronous and returns before the WorkSpaces have rebooted.
", + "RebootWorkspaces": "Reboots the specified WorkSpaces.
You cannot reboot a WorkSpace unless its state is AVAILABLE
, UNHEALTHY
, or REBOOTING
. Reboot a WorkSpace in the REBOOTING
state only if your WorkSpace has been stuck in the REBOOTING
state for over 20 minutes.
This operation is asynchronous and returns before the WorkSpaces have rebooted.
", "RebuildWorkspaces": "Rebuilds the specified WorkSpace.
You cannot rebuild a WorkSpace unless its state is AVAILABLE
, ERROR
, UNHEALTHY
, STOPPED
, or REBOOTING
.
Rebuilding a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Rebuild a WorkSpace.
This operation is asynchronous and returns before the WorkSpaces have been completely rebuilt.
", "RegisterWorkspaceDirectory": "Registers the specified directory. This operation is asynchronous and returns before the WorkSpace directory is registered. If this is the first time you are registering a directory, you will need to create the workspaces_DefaultRole role before you can register a directory. For more information, see Creating the workspaces_DefaultRole Role.
", "RestoreWorkspace": "Restores the specified WorkSpace to its last known healthy state.
You cannot restore a WorkSpace unless its state is AVAILABLE
, ERROR
, UNHEALTHY
, or STOPPED
.
Restoring a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Restore a WorkSpace.
This operation is asynchronous and returns before the WorkSpace is completely restored.
", @@ -1955,7 +1955,7 @@ "RunningMode": { "base": null, "refs": { - "WorkspaceProperties$RunningMode": "The running mode. For more information, see Manage the WorkSpace Running Mode.
The MANUAL
value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.
Ensure you review your running mode to ensure you are using a running mode that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing?
The running mode. For more information, see Manage the WorkSpace Running Mode.
The MANUAL
value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.