From 4bd06b93f8627928c17604383bd8639f2cb23739 Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Fri, 17 Nov 2023 20:05:34 +0000 Subject: [PATCH] Merge customizations for service s3 --- .../331929f2c5c941d49d6160d2d2f34ee7.json | 9 + feature/s3/manager/download.go | 4 +- feature/s3/manager/download_test.go | 8 +- feature/s3/manager/integ_upload_test.go | 56 ++--- .../internal/integration/integration.go | 4 +- feature/s3/manager/upload.go | 16 +- feature/s3/manager/upload_test.go | 2 +- .../integrationtest/s3/checksum_test.go | 6 +- .../integrationtest/s3/presign_test.go | 2 +- service/s3/api_op_CompleteMultipartUpload.go | 2 +- service/s3/api_op_CopyObject.go | 4 +- service/s3/api_op_CreateBucket.go | 2 +- service/s3/api_op_CreateMultipartUpload.go | 4 +- service/s3/api_op_DeleteObject.go | 4 +- service/s3/api_op_DeleteObjects.go | 2 +- service/s3/api_op_GetObject.go | 14 +- service/s3/api_op_GetObjectAttributes.go | 6 +- service/s3/api_op_HeadObject.go | 12 +- ...pi_op_ListBucketAnalyticsConfigurations.go | 2 +- ...tBucketIntelligentTieringConfigurations.go | 2 +- ...pi_op_ListBucketInventoryConfigurations.go | 2 +- .../api_op_ListBucketMetricsConfigurations.go | 2 +- service/s3/api_op_ListMultipartUploads.go | 6 +- service/s3/api_op_ListObjectVersions.go | 6 +- service/s3/api_op_ListObjects.go | 6 +- service/s3/api_op_ListObjectsV2.go | 22 +- service/s3/api_op_ListParts.go | 18 +- ...i_op_PutBucketNotificationConfiguration.go | 2 +- service/s3/api_op_PutBucketPolicy.go | 2 +- service/s3/api_op_PutObject.go | 6 +- service/s3/api_op_PutObjectRetention.go | 2 +- service/s3/api_op_PutPublicAccessBlock.go | 2 +- service/s3/api_op_UploadPart.go | 6 +- service/s3/api_op_UploadPartCopy.go | 4 +- service/s3/api_op_WriteGetObjectResponse.go | 14 +- service/s3/deserializers.go | 154 ++++++------ service/s3/handwritten_paginators.go | 18 +- service/s3/handwritten_paginators_test.go | 71 +++--- .../internal/customizations/presign_test.go | 12 +- .../write_get_object_response_test.go | 9 +- service/s3/serializers.go | 232 +++++++++--------- service/s3/types/types.go | 98 ++++---- service/s3/types/types_exported_test.go | 4 +- service/s3/validators.go | 12 + 44 files changed, 453 insertions(+), 418 deletions(-) create mode 100644 .changelog/331929f2c5c941d49d6160d2d2f34ee7.json diff --git a/.changelog/331929f2c5c941d49d6160d2d2f34ee7.json b/.changelog/331929f2c5c941d49d6160d2d2f34ee7.json new file mode 100644 index 00000000000..59c3071b3e2 --- /dev/null +++ b/.changelog/331929f2c5c941d49d6160d2d2f34ee7.json @@ -0,0 +1,9 @@ +{ + "id": "331929f2-c5c9-41d4-9d61-60d2d2f34ee7", + "type": "feature", + "description": "**BREAKING CHANGE** Correct nullability of a large number of S3 structure fields. See https://github.com/aws/aws-sdk-go-v2/issues/2162.", + "modules": [ + "feature/s3/manager", + "service/s3" + ] +} diff --git a/feature/s3/manager/download.go b/feature/s3/manager/download.go index 2ebcea585e3..06070adadde 100644 --- a/feature/s3/manager/download.go +++ b/feature/s3/manager/download.go @@ -436,8 +436,8 @@ func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { if resp.ContentRange == nil { // ContentRange is nil when the full file contents is provided, and // is not chunked. Use ContentLength instead. - if resp.ContentLength > 0 { - d.totalBytes = resp.ContentLength + if aws.ToInt64(resp.ContentLength) > 0 { + d.totalBytes = aws.ToInt64(resp.ContentLength) return } } else { diff --git a/feature/s3/manager/download_test.go b/feature/s3/manager/download_test.go index adb8d5405f3..842c735fc1a 100644 --- a/feature/s3/manager/download_test.go +++ b/feature/s3/manager/download_test.go @@ -70,7 +70,7 @@ func newDownloadRangeClient(data []byte) (*downloadCaptureClient, *int, *[]strin return &s3.GetObjectOutput{ Body: ioutil.NopCloser(bytes.NewReader(bodyBytes)), ContentRange: aws.String(fmt.Sprintf("bytes %d-%d/%d", start, fin-1, len(data))), - ContentLength: int64(len(bodyBytes)), + ContentLength: aws.Int64(int64(len(bodyBytes))), }, nil } @@ -83,7 +83,7 @@ func newDownloadNonRangeClient(data []byte) (*downloadCaptureClient, *int) { capture.GetObjectFn = func(_ context.Context, params *s3.GetObjectInput, _ ...func(*s3.Options)) (*s3.GetObjectOutput, error) { return &s3.GetObjectOutput{ Body: ioutil.NopCloser(bytes.NewReader(data[:])), - ContentLength: int64(len(data)), + ContentLength: aws.Int64(int64(len(data))), }, nil } @@ -139,7 +139,7 @@ func newDownloadWithErrReaderClient(cases []testErrReader) (*downloadCaptureClie out := &s3.GetObjectOutput{ Body: ioutil.NopCloser(&c), ContentRange: aws.String(fmt.Sprintf("bytes %d-%d/%d", 0, c.Len-1, c.Len)), - ContentLength: c.Len, + ContentLength: aws.Int64(c.Len), } index++ return out, nil @@ -542,7 +542,7 @@ func TestDownload_WithFailure(t *testing.T) { body := bytes.NewReader(make([]byte, manager.DefaultDownloadPartSize)) out = &s3.GetObjectOutput{ Body: ioutil.NopCloser(body), - ContentLength: int64(body.Len()), + ContentLength: aws.Int64(int64(body.Len())), ContentRange: aws.String(fmt.Sprintf("bytes %d-%d/%d", startingByte, body.Len()-1, body.Len()*10)), } diff --git a/feature/s3/manager/integ_upload_test.go b/feature/s3/manager/integ_upload_test.go index 2555cf5ed54..dffaa3bf53e 100644 --- a/feature/s3/manager/integ_upload_test.go +++ b/feature/s3/manager/integ_upload_test.go @@ -228,15 +228,15 @@ func TestInteg_UploadPresetChecksum(t *testing.T) { expectParts: []s3types.CompletedPart{ { ETag: aws.String(singlePartETag), - PartNumber: 1, + PartNumber: aws.Int32(1), }, { ETag: aws.String(singlePartETag), - PartNumber: 2, + PartNumber: aws.Int32(2), }, { ETag: aws.String(multiPartTailETag), - PartNumber: 3, + PartNumber: aws.Int32(3), }, }, expectETag: multiPartETag, @@ -248,17 +248,17 @@ func TestInteg_UploadPresetChecksum(t *testing.T) { { ChecksumCRC32: aws.String(singlePartCRC32), ETag: aws.String(singlePartETag), - PartNumber: 1, + PartNumber: aws.Int32(1), }, { ChecksumCRC32: aws.String(singlePartCRC32), ETag: aws.String(singlePartETag), - PartNumber: 2, + PartNumber: aws.Int32(2), }, { ChecksumCRC32: aws.String(multiPartTailCRC32), ETag: aws.String(multiPartTailETag), - PartNumber: 3, + PartNumber: aws.Int32(3), }, }, expectChecksumCRC32: multiPartCRC32, @@ -271,17 +271,17 @@ func TestInteg_UploadPresetChecksum(t *testing.T) { { ChecksumCRC32C: aws.String(singlePartCRC32C), ETag: aws.String(singlePartETag), - PartNumber: 1, + PartNumber: aws.Int32(1), }, { ChecksumCRC32C: aws.String(singlePartCRC32C), ETag: aws.String(singlePartETag), - PartNumber: 2, + PartNumber: aws.Int32(2), }, { ChecksumCRC32C: aws.String(multiPartTailCRC32C), ETag: aws.String(multiPartTailETag), - PartNumber: 3, + PartNumber: aws.Int32(3), }, }, expectChecksumCRC32C: multiPartCRC32C, @@ -294,17 +294,17 @@ func TestInteg_UploadPresetChecksum(t *testing.T) { { ChecksumSHA1: aws.String(singlePartSHA1), ETag: aws.String(singlePartETag), - PartNumber: 1, + PartNumber: aws.Int32(1), }, { ChecksumSHA1: aws.String(singlePartSHA1), ETag: aws.String(singlePartETag), - PartNumber: 2, + PartNumber: aws.Int32(2), }, { ChecksumSHA1: aws.String(multiPartTailSHA1), ETag: aws.String(multiPartTailETag), - PartNumber: 3, + PartNumber: aws.Int32(3), }, }, expectChecksumSHA1: multiPartSHA1, @@ -317,17 +317,17 @@ func TestInteg_UploadPresetChecksum(t *testing.T) { { ChecksumSHA256: aws.String(singlePartSHA256), ETag: aws.String(singlePartETag), - PartNumber: 1, + PartNumber: aws.Int32(1), }, { ChecksumSHA256: aws.String(singlePartSHA256), ETag: aws.String(singlePartETag), - PartNumber: 2, + PartNumber: aws.Int32(2), }, { ChecksumSHA256: aws.String(multiPartTailSHA256), ETag: aws.String(multiPartTailETag), - PartNumber: 3, + PartNumber: aws.Int32(3), }, }, expectChecksumSHA256: multiPartSHA256, @@ -343,17 +343,17 @@ func TestInteg_UploadPresetChecksum(t *testing.T) { { ChecksumCRC32: aws.String(singlePartCRC32), ETag: aws.String(singlePartETag), - PartNumber: 1, + PartNumber: aws.Int32(1), }, { ChecksumCRC32: aws.String(singlePartCRC32), ETag: aws.String(singlePartETag), - PartNumber: 2, + PartNumber: aws.Int32(2), }, { ChecksumCRC32: aws.String(multiPartTailCRC32), ETag: aws.String(multiPartTailETag), - PartNumber: 3, + PartNumber: aws.Int32(3), }, }, expectChecksumCRC32: multiPartCRC32, @@ -367,17 +367,17 @@ func TestInteg_UploadPresetChecksum(t *testing.T) { { ChecksumCRC32C: aws.String(singlePartCRC32C), ETag: aws.String(singlePartETag), - PartNumber: 1, + PartNumber: aws.Int32(1), }, { ChecksumCRC32C: aws.String(singlePartCRC32C), ETag: aws.String(singlePartETag), - PartNumber: 2, + PartNumber: aws.Int32(2), }, { ChecksumCRC32C: aws.String(multiPartTailCRC32C), ETag: aws.String(multiPartTailETag), - PartNumber: 3, + PartNumber: aws.Int32(3), }, }, expectChecksumCRC32C: multiPartCRC32C, @@ -391,17 +391,17 @@ func TestInteg_UploadPresetChecksum(t *testing.T) { { ChecksumSHA1: aws.String(singlePartSHA1), ETag: aws.String(singlePartETag), - PartNumber: 1, + PartNumber: aws.Int32(1), }, { ChecksumSHA1: aws.String(singlePartSHA1), ETag: aws.String(singlePartETag), - PartNumber: 2, + PartNumber: aws.Int32(2), }, { ChecksumSHA1: aws.String(multiPartTailSHA1), ETag: aws.String(multiPartTailETag), - PartNumber: 3, + PartNumber: aws.Int32(3), }, }, expectChecksumSHA1: multiPartSHA1, @@ -415,17 +415,17 @@ func TestInteg_UploadPresetChecksum(t *testing.T) { { ChecksumSHA256: aws.String(singlePartSHA256), ETag: aws.String(singlePartETag), - PartNumber: 1, + PartNumber: aws.Int32(1), }, { ChecksumSHA256: aws.String(singlePartSHA256), ETag: aws.String(singlePartETag), - PartNumber: 2, + PartNumber: aws.Int32(2), }, { ChecksumSHA256: aws.String(multiPartTailSHA256), ETag: aws.String(multiPartTailETag), - PartNumber: 3, + PartNumber: aws.Int32(3), }, }, expectChecksumSHA256: multiPartSHA256, @@ -499,7 +499,7 @@ func (b *invalidateHash) RegisterMiddleware(stack *middleware.Stack) error { func (b *invalidateHash) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { - if input, ok := in.Parameters.(*s3.UploadPartInput); ok && input.PartNumber == 2 { + if input, ok := in.Parameters.(*s3.UploadPartInput); ok && input.PartNumber == aws.Int32(2) { ctx = v4.SetPayloadHash(ctx, "000") } diff --git a/feature/s3/manager/internal/integration/integration.go b/feature/s3/manager/internal/integration/integration.go index 19fd10bac1e..90351488996 100644 --- a/feature/s3/manager/internal/integration/integration.go +++ b/feature/s3/manager/internal/integration/integration.go @@ -156,7 +156,7 @@ func CleanupBucket(client *s3.Client, bucketName string) error { errs = append(errs, fmt.Errorf("failed to delete %s, %s", aws.ToString(deleteError.Key), aws.ToString(deleteError.Message))) } - if listObjectsV2.IsTruncated { + if aws.ToBool(listObjectsV2.IsTruncated) { input.ContinuationToken = listObjectsV2.NextContinuationToken } else { break @@ -182,7 +182,7 @@ func CleanupBucket(client *s3.Client, bucketName string) error { }) } - if uploads.IsTruncated { + if aws.ToBool(uploads.IsTruncated) { input.KeyMarker = uploads.NextKeyMarker input.UploadIdMarker = uploads.NextUploadIdMarker } else { diff --git a/feature/s3/manager/upload.go b/feature/s3/manager/upload.go index d68246c2b86..598cedbac26 100644 --- a/feature/s3/manager/upload.go +++ b/feature/s3/manager/upload.go @@ -501,7 +501,7 @@ func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, e return &UploadOutput{ Location: locationRecorder.location, - BucketKeyEnabled: out.BucketKeyEnabled, + BucketKeyEnabled: aws.ToBool(out.BucketKeyEnabled), ChecksumCRC32: out.ChecksumCRC32, ChecksumCRC32C: out.ChecksumCRC32C, ChecksumSHA1: out.ChecksumSHA1, @@ -568,9 +568,11 @@ type chunk struct { // since S3 required this list to be sent in sorted order. type completedParts []types.CompletedPart -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { + return aws.ToInt32(a[i].PartNumber) < aws.ToInt32(a[j].PartNumber) +} // upload will perform a multipart upload using the firstBuf buffer containing // the first chunk of data. @@ -639,7 +641,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadO UploadID: u.uploadID, CompletedParts: u.parts, - BucketKeyEnabled: completeOut.BucketKeyEnabled, + BucketKeyEnabled: aws.ToBool(completeOut.BucketKeyEnabled), ChecksumCRC32: completeOut.ChecksumCRC32, ChecksumCRC32C: completeOut.ChecksumCRC32C, ChecksumSHA1: completeOut.ChecksumSHA1, @@ -722,7 +724,7 @@ func (u *multiuploader) send(c chunk) error { // PutObject as they are never valid for individual parts of a // multipart upload. - PartNumber: c.num, + PartNumber: aws.Int32(c.num), UploadId: &u.uploadID, } // TODO should do copy then clear? @@ -734,7 +736,7 @@ func (u *multiuploader) send(c chunk) error { var completed types.CompletedPart awsutil.Copy(&completed, resp) - completed.PartNumber = c.num + completed.PartNumber = aws.Int32(c.num) u.m.Lock() u.parts = append(u.parts, completed) diff --git a/feature/s3/manager/upload_test.go b/feature/s3/manager/upload_test.go index ca149bbd884..d33d8a34751 100644 --- a/feature/s3/manager/upload_test.go +++ b/feature/s3/manager/upload_test.go @@ -89,7 +89,7 @@ func TestUploadOrderMulti(t *testing.T) { num := parts[i].PartNumber etag := aws.ToString(parts[i].ETag) - if int32(i+1) != num { + if int32(i+1) != aws.ToInt32(num) { t.Errorf("expect %d, got %d", i+1, num) } diff --git a/service/internal/integrationtest/s3/checksum_test.go b/service/internal/integrationtest/s3/checksum_test.go index 1a5f7f3a4f6..9da73ac8d78 100644 --- a/service/internal/integrationtest/s3/checksum_test.go +++ b/service/internal/integrationtest/s3/checksum_test.go @@ -104,7 +104,7 @@ func TestInteg_ObjectChecksums(t *testing.T) { "content length preset": { params: &s3.PutObjectInput{ Body: strings.NewReader("hello world"), - ContentLength: 11, + ContentLength: aws.Int64(11), ChecksumAlgorithm: s3types.ChecksumAlgorithmCrc32c, }, getObjectChecksumMode: s3types.ChecksumModeEnabled, @@ -198,7 +198,7 @@ func TestInteg_ObjectChecksums(t *testing.T) { "content length preset": { params: &s3.PutObjectInput{ Body: ioutil.NopCloser(strings.NewReader("hello world")), - ContentLength: 11, + ContentLength: aws.Int64(11), ChecksumAlgorithm: s3types.ChecksumAlgorithmCrc32c, }, getObjectChecksumMode: s3types.ChecksumModeEnabled, @@ -449,7 +449,7 @@ func TestInteg_RequireChecksum(t *testing.T) { Objects: []s3types.ObjectIdentifier{ {Key: aws.String(t.Name())}, }, - Quiet: true, + Quiet: aws.Bool(true), }, ChecksumAlgorithm: c.checksumAlgorithm, }) diff --git a/service/internal/integrationtest/s3/presign_test.go b/service/internal/integrationtest/s3/presign_test.go index e74ca40147d..707a804f641 100644 --- a/service/internal/integrationtest/s3/presign_test.go +++ b/service/internal/integrationtest/s3/presign_test.go @@ -192,7 +192,7 @@ func TestInteg_MultipartPresignURL(t *testing.T) { uploadPartInput := &s3.UploadPartInput{ Bucket: &setupMetadata.Buckets.Source.Name, Key: &key, - PartNumber: 1, + PartNumber: aws.Int32(1), UploadId: multipartUpload.UploadId, Body: c.body, } diff --git a/service/s3/api_op_CompleteMultipartUpload.go b/service/s3/api_op_CompleteMultipartUpload.go index 88901f70710..6944c3ddae0 100644 --- a/service/s3/api_op_CompleteMultipartUpload.go +++ b/service/s3/api_op_CompleteMultipartUpload.go @@ -207,7 +207,7 @@ type CompleteMultipartUploadOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled bool + BucketKeyEnabled *bool // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be // present if it was uploaded with the object. With multipart uploads, this may not diff --git a/service/s3/api_op_CopyObject.go b/service/s3/api_op_CopyObject.go index bd1b4cda883..3e9652cd0c8 100644 --- a/service/s3/api_op_CopyObject.go +++ b/service/s3/api_op_CopyObject.go @@ -234,7 +234,7 @@ type CopyObjectInput struct { // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object // encryption with SSE-KMS. Specifying this header with a COPY action doesn’t // affect bucket-level settings for S3 Bucket Key. - BucketKeyEnabled bool + BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string @@ -407,7 +407,7 @@ type CopyObjectOutput struct { // Indicates whether the copied object uses an S3 Bucket Key for server-side // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled bool + BucketKeyEnabled *bool // Container for all response elements. CopyObjectResult *types.CopyObjectResult diff --git a/service/s3/api_op_CreateBucket.go b/service/s3/api_op_CreateBucket.go index c620fce9ede..5fc78859857 100644 --- a/service/s3/api_op_CreateBucket.go +++ b/service/s3/api_op_CreateBucket.go @@ -122,7 +122,7 @@ type CreateBucketInput struct { GrantWriteACP *string // Specifies whether you want S3 Object Lock to be enabled for the new bucket. - ObjectLockEnabledForBucket bool + ObjectLockEnabledForBucket *bool // The container element for object ownership for a bucket's ownership controls. // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the diff --git a/service/s3/api_op_CreateMultipartUpload.go b/service/s3/api_op_CreateMultipartUpload.go index 8e21441b1bf..190b32dd3ff 100644 --- a/service/s3/api_op_CreateMultipartUpload.go +++ b/service/s3/api_op_CreateMultipartUpload.go @@ -224,7 +224,7 @@ type CreateMultipartUploadInput struct { // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object // encryption with SSE-KMS. Specifying this header with an object action doesn’t // affect bucket-level settings for S3 Bucket Key. - BucketKeyEnabled bool + BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string @@ -388,7 +388,7 @@ type CreateMultipartUploadOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled bool + BucketKeyEnabled *bool // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm types.ChecksumAlgorithm diff --git a/service/s3/api_op_DeleteObject.go b/service/s3/api_op_DeleteObject.go index 983a51beced..0c78bbb77dc 100644 --- a/service/s3/api_op_DeleteObject.go +++ b/service/s3/api_op_DeleteObject.go @@ -76,7 +76,7 @@ type DeleteObjectInput struct { // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to // process this operation. To use this header, you must have the // s3:BypassGovernanceRetention permission. - BypassGovernanceRetention bool + BypassGovernanceRetention *bool // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request fails with the HTTP status code 403 Forbidden @@ -115,7 +115,7 @@ type DeleteObjectOutput struct { // (true) or was not (false) a delete marker before deletion. In a simple DELETE, // this header indicates whether (true) or not (false) the current version of the // object is a delete marker. - DeleteMarker bool + DeleteMarker *bool // If present, indicates that the requester was successfully charged for the // request. diff --git a/service/s3/api_op_DeleteObjects.go b/service/s3/api_op_DeleteObjects.go index 7676e841590..46cc2994d56 100644 --- a/service/s3/api_op_DeleteObjects.go +++ b/service/s3/api_op_DeleteObjects.go @@ -88,7 +88,7 @@ type DeleteObjectsInput struct { // Specifies whether you want to delete this object even if it has a // Governance-type Object Lock in place. To use this header, you must have the // s3:BypassGovernanceRetention permission. - BypassGovernanceRetention bool + BypassGovernanceRetention *bool // Indicates the algorithm used to create the checksum for the object when using // the SDK. This header will not provide any additional functionality if not using diff --git a/service/s3/api_op_GetObject.go b/service/s3/api_op_GetObject.go index 144815e15d7..1db1f0bd2a7 100644 --- a/service/s3/api_op_GetObject.go +++ b/service/s3/api_op_GetObject.go @@ -177,7 +177,7 @@ type GetObjectInput struct { // Part number of the object being read. This is a positive integer between 1 and // 10,000. Effectively performs a 'ranged' GET request for the part specified. // Useful for downloading just a part of an object. - PartNumber int32 + PartNumber *int32 // Downloads the specified range bytes of an object. For more information about // the HTTP Range header, see @@ -249,7 +249,7 @@ type GetObjectOutput struct { // Indicates whether the object uses an S3 Bucket Key for server-side encryption // with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled bool + BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string @@ -294,7 +294,7 @@ type GetObjectOutput struct { ContentLanguage *string // Size of the body in bytes. - ContentLength int64 + ContentLength *int64 // The portion of the object returned in the response. ContentRange *string @@ -304,7 +304,7 @@ type GetObjectOutput struct { // Specifies whether the object retrieved was (true) or was not (false) a Delete // Marker. If false, this response header does not appear in the response. - DeleteMarker bool + DeleteMarker *bool // An entity tag (ETag) is an opaque identifier assigned by a web server to a // specific version of a resource found at a URL. @@ -331,7 +331,7 @@ type GetObjectOutput struct { // headers. This can happen if you create metadata using an API like SOAP that // supports more flexible metadata than the REST API. For example, using SOAP, you // can create metadata whose values are not legal HTTP headers. - MissingMeta int32 + MissingMeta *int32 // Indicates whether this object has an active legal hold. This field is only // returned if you have permission to view an object's legal hold status. @@ -345,7 +345,7 @@ type GetObjectOutput struct { // The count of parts this object has. This value is only returned if you specify // partNumber in your request and the object was uploaded as a multipart upload. - PartsCount int32 + PartsCount *int32 // Amazon S3 can return this if your request involves a bucket that is either a // source or destination in a replication rule. @@ -382,7 +382,7 @@ type GetObjectOutput struct { StorageClass types.StorageClass // The number of tags, if any, on the object. - TagCount int32 + TagCount *int32 // Version of the object. VersionId *string diff --git a/service/s3/api_op_GetObjectAttributes.go b/service/s3/api_op_GetObjectAttributes.go index 65f9ad4ebad..d8a4555f61f 100644 --- a/service/s3/api_op_GetObjectAttributes.go +++ b/service/s3/api_op_GetObjectAttributes.go @@ -127,7 +127,7 @@ type GetObjectAttributesInput struct { ExpectedBucketOwner *string // Sets the maximum number of parts to return. - MaxParts int32 + MaxParts *int32 // Specifies the part after which listing should begin. Only parts with higher // part numbers will be listed. @@ -175,7 +175,7 @@ type GetObjectAttributesOutput struct { // Specifies whether the object retrieved was ( true ) or was not ( false ) a // delete marker. If false , this response header does not appear in the response. - DeleteMarker bool + DeleteMarker *bool // An ETag is an opaque identifier assigned by a web server to a specific version // of a resource found at a URL. @@ -188,7 +188,7 @@ type GetObjectAttributesOutput struct { ObjectParts *types.GetObjectAttributesParts // The size of the object in bytes. - ObjectSize int64 + ObjectSize *int64 // If present, indicates that the requester was successfully charged for the // request. diff --git a/service/s3/api_op_HeadObject.go b/service/s3/api_op_HeadObject.go index 4277d70ddde..18597b10f4c 100644 --- a/service/s3/api_op_HeadObject.go +++ b/service/s3/api_op_HeadObject.go @@ -143,7 +143,7 @@ type HeadObjectInput struct { // 10,000. Effectively performs a 'ranged' HEAD request for the part specified. // Useful querying about the size of the part and the number of parts in this // object. - PartNumber int32 + PartNumber *int32 // HeadObject returns only the metadata for an object. If the Range is // satisfiable, only the ContentLength is affected in the response. If the Range @@ -196,7 +196,7 @@ type HeadObjectOutput struct { // Indicates whether the object uses an S3 Bucket Key for server-side encryption // with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled bool + BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string @@ -241,14 +241,14 @@ type HeadObjectOutput struct { ContentLanguage *string // Size of the body in bytes. - ContentLength int64 + ContentLength *int64 // A standard MIME type describing the format of the object data. ContentType *string // Specifies whether the object retrieved was (true) or was not (false) a Delete // Marker. If false, this response header does not appear in the response. - DeleteMarker bool + DeleteMarker *bool // An entity tag (ETag) is an opaque identifier assigned by a web server to a // specific version of a resource found at a URL. @@ -275,7 +275,7 @@ type HeadObjectOutput struct { // headers. This can happen if you create metadata using an API like SOAP that // supports more flexible metadata than the REST API. For example, using SOAP, you // can create metadata whose values are not legal HTTP headers. - MissingMeta int32 + MissingMeta *int32 // Specifies whether a legal hold is in effect for this object. This header is // only returned if the requester has the s3:GetObjectLegalHold permission. This @@ -296,7 +296,7 @@ type HeadObjectOutput struct { // The count of parts this object has. This value is only returned if you specify // partNumber in your request and the object was uploaded as a multipart upload. - PartsCount int32 + PartsCount *int32 // Amazon S3 can return this header if your request involves a bucket that is // either a source or a destination in a replication rule. In replication, you have diff --git a/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/service/s3/api_op_ListBucketAnalyticsConfigurations.go index 7e3f98e1681..af62aa9bab8 100644 --- a/service/s3/api_op_ListBucketAnalyticsConfigurations.go +++ b/service/s3/api_op_ListBucketAnalyticsConfigurations.go @@ -84,7 +84,7 @@ type ListBucketAnalyticsConfigurationsOutput struct { // Indicates whether the returned list of analytics configurations is complete. A // value of true indicates that the list is not complete and the // NextContinuationToken will be provided for a subsequent request. - IsTruncated bool + IsTruncated *bool // NextContinuationToken is sent when isTruncated is true, which indicates that // there are more analytics configurations to list. The next request must include diff --git a/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go index 29217e35f00..1dd4ca5ac8d 100644 --- a/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go +++ b/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go @@ -78,7 +78,7 @@ type ListBucketIntelligentTieringConfigurationsOutput struct { // Indicates whether the returned list of analytics configurations is complete. A // value of true indicates that the list is not complete and the // NextContinuationToken will be provided for a subsequent request. - IsTruncated bool + IsTruncated *bool // The marker used to continue this inventory configuration listing. Use the // NextContinuationToken from this response to continue the listing in a subsequent diff --git a/service/s3/api_op_ListBucketInventoryConfigurations.go b/service/s3/api_op_ListBucketInventoryConfigurations.go index 62a68611976..0000dccd225 100644 --- a/service/s3/api_op_ListBucketInventoryConfigurations.go +++ b/service/s3/api_op_ListBucketInventoryConfigurations.go @@ -85,7 +85,7 @@ type ListBucketInventoryConfigurationsOutput struct { // Tells whether the returned list of inventory configurations is complete. A // value of true indicates that the list is not complete and the // NextContinuationToken is provided for a subsequent request. - IsTruncated bool + IsTruncated *bool // The marker used to continue this inventory configuration listing. Use the // NextContinuationToken from this response to continue the listing in a subsequent diff --git a/service/s3/api_op_ListBucketMetricsConfigurations.go b/service/s3/api_op_ListBucketMetricsConfigurations.go index b77570739ad..597e06824c6 100644 --- a/service/s3/api_op_ListBucketMetricsConfigurations.go +++ b/service/s3/api_op_ListBucketMetricsConfigurations.go @@ -84,7 +84,7 @@ type ListBucketMetricsConfigurationsOutput struct { // Indicates whether the returned list of metrics configurations is complete. A // value of true indicates that the list is not complete and the // NextContinuationToken will be provided for a subsequent request. - IsTruncated bool + IsTruncated *bool // The list of metrics configurations for a bucket. MetricsConfigurationList []types.MetricsConfiguration diff --git a/service/s3/api_op_ListMultipartUploads.go b/service/s3/api_op_ListMultipartUploads.go index 6c908969d43..89fa91dd45b 100644 --- a/service/s3/api_op_ListMultipartUploads.go +++ b/service/s3/api_op_ListMultipartUploads.go @@ -105,7 +105,7 @@ type ListMultipartUploadsInput struct { // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the // response body. 1,000 is the maximum number of uploads that can be returned in a // response. - MaxUploads int32 + MaxUploads *int32 // Lists in-progress uploads only for those keys that begin with the specified // prefix. You can use prefixes to separate a bucket into different grouping of @@ -162,14 +162,14 @@ type ListMultipartUploadsOutput struct { // of true indicates that the list was truncated. The list can be truncated if the // number of multipart uploads exceeds the limit allowed or specified by max // uploads. - IsTruncated bool + IsTruncated *bool // The key at or after which the listing began. KeyMarker *string // Maximum number of multipart uploads that could have been included in the // response. - MaxUploads int32 + MaxUploads *int32 // When a list is truncated, this element specifies the value that should be used // for the key-marker request parameter in a subsequent request. diff --git a/service/s3/api_op_ListObjectVersions.go b/service/s3/api_op_ListObjectVersions.go index c968c61fe2d..60d5dd6461b 100644 --- a/service/s3/api_op_ListObjectVersions.go +++ b/service/s3/api_op_ListObjectVersions.go @@ -76,7 +76,7 @@ type ListObjectVersionsInput struct { // will never contain more. If additional keys satisfy the search criteria, but // were not returned because max-keys was exceeded, the response contains true . To // return the additional keys, see key-marker and version-id-marker . - MaxKeys int32 + MaxKeys *int32 // Specifies the optional fields that you want returned in the response. Fields // that you do not specify are not returned. @@ -136,13 +136,13 @@ type ListObjectVersionsOutput struct { // follow-up paginated request by using the NextKeyMarker and NextVersionIdMarker // response parameters as a starting place in another request to return the rest of // the results. - IsTruncated bool + IsTruncated *bool // Marks the last key returned in a truncated response. KeyMarker *string // Specifies the maximum number of objects to return. - MaxKeys int32 + MaxKeys *int32 // The bucket name. Name *string diff --git a/service/s3/api_op_ListObjects.go b/service/s3/api_op_ListObjects.go index 13184b39975..81e080deb75 100644 --- a/service/s3/api_op_ListObjects.go +++ b/service/s3/api_op_ListObjects.go @@ -85,7 +85,7 @@ type ListObjectsInput struct { // Sets the maximum number of keys returned in the response. By default, the // action returns up to 1,000 key names. The response might contain fewer keys but // will never contain more. - MaxKeys int32 + MaxKeys *int32 // Specifies the optional fields that you want returned in the response. Fields // that you do not specify are not returned. @@ -135,14 +135,14 @@ type ListObjectsOutput struct { // A flag that indicates whether Amazon S3 returned all of the results that // satisfied the search criteria. - IsTruncated bool + IsTruncated *bool // Indicates where in the bucket listing begins. Marker is included in the // response if it was sent with the request. Marker *string // The maximum number of keys returned in the response body. - MaxKeys int32 + MaxKeys *int32 // The bucket name. Name *string diff --git a/service/s3/api_op_ListObjectsV2.go b/service/s3/api_op_ListObjectsV2.go index fea614ffb33..93eb4e6f46c 100644 --- a/service/s3/api_op_ListObjectsV2.go +++ b/service/s3/api_op_ListObjectsV2.go @@ -89,12 +89,12 @@ type ListObjectsV2Input struct { // The owner field is not present in ListObjectsV2 by default. If you want to // return the owner field with each key in the result, then set the FetchOwner // field to true . - FetchOwner bool + FetchOwner *bool // Sets the maximum number of keys returned in the response. By default, the // action returns up to 1,000 key names. The response might contain fewer keys but // will never contain more. - MaxKeys int32 + MaxKeys *int32 // Specifies the optional fields that you want returned in the response. Fields // that you do not specify are not returned. @@ -155,17 +155,17 @@ type ListObjectsV2Output struct { // Set to false if all of the results were returned. Set to true if more keys are // available to return. If the number of results exceeds that specified by MaxKeys // , all of the results might not be returned. - IsTruncated bool + IsTruncated *bool // KeyCount is the number of keys returned with this request. KeyCount will always // be less than or equal to the MaxKeys field. For example, if you ask for 50 // keys, your result will include 50 keys or fewer. - KeyCount int32 + KeyCount *int32 // Sets the maximum number of keys returned in the response. By default, the // action returns up to 1,000 key names. The response might contain fewer keys but // will never contain more. - MaxKeys int32 + MaxKeys *int32 // The bucket name. When using this action with an access point, you must direct // requests to the access point hostname. The access point hostname takes the form @@ -338,8 +338,8 @@ func NewListObjectsV2Paginator(client ListObjectsV2APIClient, params *ListObject } options := ListObjectsV2PaginatorOptions{} - if params.MaxKeys != 0 { - options.Limit = params.MaxKeys + if params.MaxKeys != nil { + options.Limit = *params.MaxKeys } for _, fn := range optFns { @@ -369,7 +369,11 @@ func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*O params := *p.params params.ContinuationToken = p.nextToken - params.MaxKeys = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxKeys = limit result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...) if err != nil { @@ -379,7 +383,7 @@ func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*O prevToken := p.nextToken p.nextToken = nil - if result.IsTruncated { + if result.IsTruncated != nil && *result.IsTruncated { p.nextToken = result.NextContinuationToken } diff --git a/service/s3/api_op_ListParts.go b/service/s3/api_op_ListParts.go index 0badfaaa978..2a0c55aac62 100644 --- a/service/s3/api_op_ListParts.go +++ b/service/s3/api_op_ListParts.go @@ -88,7 +88,7 @@ type ListPartsInput struct { ExpectedBucketOwner *string // Sets the maximum number of parts to return. - MaxParts int32 + MaxParts *int32 // Specifies the part after which listing should begin. Only parts with higher // part numbers will be listed. @@ -162,13 +162,13 @@ type ListPartsOutput struct { // Indicates whether the returned list of parts is truncated. A true value // indicates that the list was truncated. A list can be truncated if the number of // parts exceeds the limit returned in the MaxParts element. - IsTruncated bool + IsTruncated *bool // Object key for which the multipart upload was initiated. Key *string // Maximum number of parts that were allowed in the response. - MaxParts int32 + MaxParts *int32 // When a list is truncated, this element specifies the last part in the list, as // well as the value to use for the part-number-marker request parameter in a @@ -337,8 +337,8 @@ func NewListPartsPaginator(client ListPartsAPIClient, params *ListPartsInput, op } options := ListPartsPaginatorOptions{} - if params.MaxParts != 0 { - options.Limit = params.MaxParts + if params.MaxParts != nil { + options.Limit = *params.MaxParts } for _, fn := range optFns { @@ -368,7 +368,11 @@ func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Optio params := *p.params params.PartNumberMarker = p.nextToken - params.MaxParts = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxParts = limit result, err := p.client.ListParts(ctx, ¶ms, optFns...) if err != nil { @@ -378,7 +382,7 @@ func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Optio prevToken := p.nextToken p.nextToken = nil - if result.IsTruncated { + if result.IsTruncated != nil && *result.IsTruncated { p.nextToken = result.NextPartNumberMarker } diff --git a/service/s3/api_op_PutBucketNotificationConfiguration.go b/service/s3/api_op_PutBucketNotificationConfiguration.go index 7f1c8c33177..6fc3112402f 100644 --- a/service/s3/api_op_PutBucketNotificationConfiguration.go +++ b/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -82,7 +82,7 @@ type PutBucketNotificationConfigurationInput struct { // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or // false value. - SkipDestinationValidation bool + SkipDestinationValidation *bool noSmithyDocumentSerde } diff --git a/service/s3/api_op_PutBucketPolicy.go b/service/s3/api_op_PutBucketPolicy.go index 1e7e5b62739..b2050c85485 100644 --- a/service/s3/api_op_PutBucketPolicy.go +++ b/service/s3/api_op_PutBucketPolicy.go @@ -71,7 +71,7 @@ type PutBucketPolicyInput struct { // Set this parameter to true to confirm that you want to remove your permissions // to change this bucket policy in the future. - ConfirmRemoveSelfBucketAccess bool + ConfirmRemoveSelfBucketAccess *bool // The MD5 hash of the request body. For requests made using the Amazon Web // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is diff --git a/service/s3/api_op_PutObject.go b/service/s3/api_op_PutObject.go index 9175ef2d17c..03d01b3bfbd 100644 --- a/service/s3/api_op_PutObject.go +++ b/service/s3/api_op_PutObject.go @@ -137,7 +137,7 @@ type PutObjectInput struct { // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object // encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect // bucket-level settings for S3 Bucket Key. - BucketKeyEnabled bool + BucketKeyEnabled *bool // Can be used to specify caching behavior along the request/reply chain. For more // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) @@ -201,7 +201,7 @@ type PutObjectInput struct { // cannot be determined automatically. For more information, see // https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length) // . - ContentLength int64 + ContentLength *int64 // The base64-encoded 128-bit MD5 digest of the message (without the headers) // according to RFC 1864. This header can be used as a message integrity check to @@ -343,7 +343,7 @@ type PutObjectOutput struct { // Indicates whether the uploaded object uses an S3 Bucket Key for server-side // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled bool + BucketKeyEnabled *bool // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be // present if it was uploaded with the object. With multipart uploads, this may not diff --git a/service/s3/api_op_PutObjectRetention.go b/service/s3/api_op_PutObjectRetention.go index 3663b78b6e2..d2fd1920023 100644 --- a/service/s3/api_op_PutObjectRetention.go +++ b/service/s3/api_op_PutObjectRetention.go @@ -56,7 +56,7 @@ type PutObjectRetentionInput struct { Key *string // Indicates whether this action should bypass Governance-mode restrictions. - BypassGovernanceRetention bool + BypassGovernanceRetention *bool // Indicates the algorithm used to create the checksum for the object when using // the SDK. This header will not provide any additional functionality if not using diff --git a/service/s3/api_op_PutPublicAccessBlock.go b/service/s3/api_op_PutPublicAccessBlock.go index 4c2f0c54655..206e9d07228 100644 --- a/service/s3/api_op_PutPublicAccessBlock.go +++ b/service/s3/api_op_PutPublicAccessBlock.go @@ -22,7 +22,7 @@ import ( // an object, it checks the PublicAccessBlock configuration for both the bucket // (or the bucket that contains the object) and the bucket owner's account. If the // PublicAccessBlock configurations are different between the bucket and the -// account, S3 uses the most restrictive combination of the bucket-level and +// account, Amazon S3 uses the most restrictive combination of the bucket-level and // account-level settings. For more information about when Amazon S3 considers a // bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) // . The following operations are related to PutPublicAccessBlock : diff --git a/service/s3/api_op_UploadPart.go b/service/s3/api_op_UploadPart.go index 7e6e231f03c..21c753a3405 100644 --- a/service/s3/api_op_UploadPart.go +++ b/service/s3/api_op_UploadPart.go @@ -132,7 +132,7 @@ type UploadPartInput struct { // 10,000. // // This member is required. - PartNumber int32 + PartNumber *int32 // Upload ID identifying the multipart upload whose part is being uploaded. // @@ -184,7 +184,7 @@ type UploadPartInput struct { // Size of the body in bytes. This parameter is useful when the size of the body // cannot be determined automatically. - ContentLength int64 + ContentLength *int64 // The base64-encoded 128-bit MD5 digest of the part data. This parameter is // auto-populated when using the command from the CLI. This parameter is required @@ -234,7 +234,7 @@ type UploadPartOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled bool + BucketKeyEnabled *bool // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be // present if it was uploaded with the object. With multipart uploads, this may not diff --git a/service/s3/api_op_UploadPartCopy.go b/service/s3/api_op_UploadPartCopy.go index 4ca31052f51..195e83fb454 100644 --- a/service/s3/api_op_UploadPartCopy.go +++ b/service/s3/api_op_UploadPartCopy.go @@ -159,7 +159,7 @@ type UploadPartCopyInput struct { // 10,000. // // This member is required. - PartNumber int32 + PartNumber *int32 // Upload ID identifying the multipart upload whose part is being copied. // @@ -247,7 +247,7 @@ type UploadPartCopyOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled bool + BucketKeyEnabled *bool // Container for all response elements. CopyPartResult *types.CopyPartResult diff --git a/service/s3/api_op_WriteGetObjectResponse.go b/service/s3/api_op_WriteGetObjectResponse.go index 3246145db08..2b9560c91b7 100644 --- a/service/s3/api_op_WriteGetObjectResponse.go +++ b/service/s3/api_op_WriteGetObjectResponse.go @@ -89,7 +89,7 @@ type WriteGetObjectResponseInput struct { // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for // server-side encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled bool + BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string @@ -150,7 +150,7 @@ type WriteGetObjectResponseInput struct { ContentLanguage *string // The size of the content body in bytes. - ContentLength int64 + ContentLength *int64 // The portion of the object returned in the response. ContentRange *string @@ -160,7 +160,7 @@ type WriteGetObjectResponseInput struct { // Specifies whether an object stored in Amazon S3 is ( true ) or is not ( false ) // a delete marker. - DeleteMarker bool + DeleteMarker *bool // An opaque identifier assigned by a web server to a specific version of a // resource found at a URL. @@ -198,7 +198,7 @@ type WriteGetObjectResponseInput struct { // can happen if you create metadata using an API like SOAP that supports more // flexible metadata than the REST API. For example, using SOAP, you can create // metadata whose values are not legal HTTP headers. - MissingMeta int32 + MissingMeta *int32 // Indicates whether an object stored in Amazon S3 has an active legal hold. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus @@ -212,7 +212,7 @@ type WriteGetObjectResponseInput struct { ObjectLockRetainUntilDate *time.Time // The count of parts this object has. - PartsCount int32 + PartsCount *int32 // Indicates if request involves bucket that is either a source or destination in // a Replication rule. For more information about S3 Replication, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html) @@ -262,7 +262,7 @@ type WriteGetObjectResponseInput struct { // - 416 - Range Not Satisfiable // - 500 - Internal Server Error // - 503 - Service Unavailable - StatusCode int32 + StatusCode *int32 // Provides storage class information of the object. Amazon S3 returns this header // for all objects except for S3 Standard storage class objects. For more @@ -271,7 +271,7 @@ type WriteGetObjectResponseInput struct { StorageClass types.StorageClass // The number of tags, if any, on the object. - TagCount int32 + TagCount *int32 // An ID used to reference a specific version of the object. VersionId *string diff --git a/service/s3/deserializers.go b/service/s3/deserializers.go index fc192a3263f..3588a8180e1 100644 --- a/service/s3/deserializers.go +++ b/service/s3/deserializers.go @@ -230,7 +230,7 @@ func awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(v *Comple if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { @@ -516,7 +516,7 @@ func awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(v *CopyObjectOutput, r if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { @@ -828,7 +828,7 @@ func awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(v *CreateMu if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-checksum-algorithm"); len(headerValues) != 0 { @@ -2003,7 +2003,7 @@ func awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(v *DeleteObjectOutpu if err != nil { return err } - v.DeleteMarker = vv + v.DeleteMarker = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { @@ -5280,7 +5280,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { @@ -5329,7 +5329,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res if err != nil { return err } - v.ContentLength = vv + v.ContentLength = ptr.Int64(vv) } if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 { @@ -5348,7 +5348,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res if err != nil { return err } - v.DeleteMarker = vv + v.DeleteMarker = ptr.Bool(vv) } if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { @@ -5395,7 +5395,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res if err != nil { return err } - v.MissingMeta = int32(vv) + v.MissingMeta = ptr.Int32(int32(vv)) } if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { @@ -5423,7 +5423,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res if err != nil { return err } - v.PartsCount = int32(vv) + v.PartsCount = ptr.Int32(int32(vv)) } if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { @@ -5472,7 +5472,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res if err != nil { return err } - v.TagCount = int32(vv) + v.TagCount = ptr.Int32(int32(vv)) } if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { @@ -5776,7 +5776,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(v *GetObjectA if err != nil { return err } - v.DeleteMarker = vv + v.DeleteMarker = ptr.Bool(vv) } if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { @@ -5861,7 +5861,7 @@ func awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(v **GetObjectAttr if err != nil { return err } - sv.ObjectSize = i64 + sv.ObjectSize = ptr.Int64(i64) } case strings.EqualFold("StorageClass", t.Name.Local): @@ -6878,7 +6878,7 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { @@ -6927,7 +6927,7 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r if err != nil { return err } - v.ContentLength = vv + v.ContentLength = ptr.Int64(vv) } if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { @@ -6941,7 +6941,7 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r if err != nil { return err } - v.DeleteMarker = vv + v.DeleteMarker = ptr.Bool(vv) } if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { @@ -6988,7 +6988,7 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r if err != nil { return err } - v.MissingMeta = int32(vv) + v.MissingMeta = ptr.Int32(int32(vv)) } if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { @@ -7016,7 +7016,7 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r if err != nil { return err } - v.PartsCount = int32(vv) + v.PartsCount = ptr.Int32(int32(vv)) } if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { @@ -7223,7 +7223,7 @@ func awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(v * if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("NextContinuationToken", t.Name.Local): @@ -7404,7 +7404,7 @@ func awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsO if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("NextContinuationToken", t.Name.Local): @@ -7585,7 +7585,7 @@ func awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(v * if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("NextContinuationToken", t.Name.Local): @@ -7760,7 +7760,7 @@ func awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(v **L if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("MetricsConfiguration", t.Name.Local): @@ -8135,7 +8135,7 @@ func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipar if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("KeyMarker", t.Name.Local): @@ -8165,7 +8165,7 @@ func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipar if err != nil { return err } - sv.MaxUploads = int32(i64) + sv.MaxUploads = ptr.Int32(int32(i64)) } case strings.EqualFold("NextKeyMarker", t.Name.Local): @@ -8430,7 +8430,7 @@ func awsRestxml_deserializeOpDocumentListObjectsOutput(v **ListObjectsOutput, de if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("Marker", t.Name.Local): @@ -8460,7 +8460,7 @@ func awsRestxml_deserializeOpDocumentListObjectsOutput(v **ListObjectsOutput, de if err != nil { return err } - sv.MaxKeys = int32(i64) + sv.MaxKeys = ptr.Int32(int32(i64)) } case strings.EqualFold("Name", t.Name.Local): @@ -8719,7 +8719,7 @@ func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("KeyCount", t.Name.Local): @@ -8736,7 +8736,7 @@ func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output if err != nil { return err } - sv.KeyCount = int32(i64) + sv.KeyCount = ptr.Int32(int32(i64)) } case strings.EqualFold("MaxKeys", t.Name.Local): @@ -8753,7 +8753,7 @@ func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output if err != nil { return err } - sv.MaxKeys = int32(i64) + sv.MaxKeys = ptr.Int32(int32(i64)) } case strings.EqualFold("Name", t.Name.Local): @@ -9009,7 +9009,7 @@ func awsRestxml_deserializeOpDocumentListObjectVersionsOutput(v **ListObjectVers if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("KeyMarker", t.Name.Local): @@ -9039,7 +9039,7 @@ func awsRestxml_deserializeOpDocumentListObjectVersionsOutput(v **ListObjectVers if err != nil { return err } - sv.MaxKeys = int32(i64) + sv.MaxKeys = ptr.Int32(int32(i64)) } case strings.EqualFold("Name", t.Name.Local): @@ -9322,7 +9322,7 @@ func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decode if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("Key", t.Name.Local): @@ -9352,7 +9352,7 @@ func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decode if err != nil { return err } - sv.MaxParts = int32(i64) + sv.MaxParts = ptr.Int32(int32(i64)) } case strings.EqualFold("NextPartNumberMarker", t.Name.Local): @@ -10868,7 +10868,7 @@ func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, res if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { @@ -11701,7 +11701,7 @@ func awsRestxml_deserializeOpHttpBindingsUploadPartOutput(v *UploadPartOutput, r if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { @@ -11870,7 +11870,7 @@ func awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(v *UploadPartCopyO if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { @@ -12392,7 +12392,7 @@ func awsRestxml_deserializeDocumentProgress(v **types.Progress, decoder smithyxm if err != nil { return err } - sv.BytesProcessed = i64 + sv.BytesProcessed = ptr.Int64(i64) } case strings.EqualFold("BytesReturned", t.Name.Local): @@ -12409,7 +12409,7 @@ func awsRestxml_deserializeDocumentProgress(v **types.Progress, decoder smithyxm if err != nil { return err } - sv.BytesReturned = i64 + sv.BytesReturned = ptr.Int64(i64) } case strings.EqualFold("BytesScanned", t.Name.Local): @@ -12426,7 +12426,7 @@ func awsRestxml_deserializeDocumentProgress(v **types.Progress, decoder smithyxm if err != nil { return err } - sv.BytesScanned = i64 + sv.BytesScanned = ptr.Int64(i64) } default: @@ -12479,7 +12479,7 @@ func awsRestxml_deserializeDocumentStats(v **types.Stats, decoder smithyxml.Node if err != nil { return err } - sv.BytesProcessed = i64 + sv.BytesProcessed = ptr.Int64(i64) } case strings.EqualFold("BytesReturned", t.Name.Local): @@ -12496,7 +12496,7 @@ func awsRestxml_deserializeDocumentStats(v **types.Stats, decoder smithyxml.Node if err != nil { return err } - sv.BytesReturned = i64 + sv.BytesReturned = ptr.Int64(i64) } case strings.EqualFold("BytesScanned", t.Name.Local): @@ -12513,7 +12513,7 @@ func awsRestxml_deserializeDocumentStats(v **types.Stats, decoder smithyxml.Node if err != nil { return err } - sv.BytesScanned = i64 + sv.BytesScanned = ptr.Int64(i64) } default: @@ -12639,7 +12639,7 @@ func awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(v **types.Abor if err != nil { return err } - sv.DaysAfterInitiation = int32(i64) + sv.DaysAfterInitiation = ptr.Int32(int32(i64)) } default: @@ -14192,7 +14192,7 @@ func awsRestxml_deserializeDocumentCORSRule(v **types.CORSRule, decoder smithyxm if err != nil { return err } - sv.MaxAgeSeconds = int32(i64) + sv.MaxAgeSeconds = ptr.Int32(int32(i64)) } default: @@ -14313,7 +14313,7 @@ func awsRestxml_deserializeDocumentDefaultRetention(v **types.DefaultRetention, if err != nil { return err } - sv.Days = int32(i64) + sv.Days = ptr.Int32(int32(i64)) } case strings.EqualFold("Mode", t.Name.Local): @@ -14343,7 +14343,7 @@ func awsRestxml_deserializeDocumentDefaultRetention(v **types.DefaultRetention, if err != nil { return err } - sv.Years = int32(i64) + sv.Years = ptr.Int32(int32(i64)) } default: @@ -14395,7 +14395,7 @@ func awsRestxml_deserializeDocumentDeletedObject(v **types.DeletedObject, decode if err != nil { return fmt.Errorf("expected DeleteMarker to be of type *bool, got %T instead", val) } - sv.DeleteMarker = xtv + sv.DeleteMarker = ptr.Bool(xtv) } case strings.EqualFold("DeleteMarkerVersionId", t.Name.Local): @@ -14554,7 +14554,7 @@ func awsRestxml_deserializeDocumentDeleteMarkerEntry(v **types.DeleteMarkerEntry if err != nil { return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) } - sv.IsLatest = xtv + sv.IsLatest = ptr.Bool(xtv) } case strings.EqualFold("Key", t.Name.Local): @@ -15500,7 +15500,7 @@ func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectA if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("MaxParts", t.Name.Local): @@ -15517,7 +15517,7 @@ func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectA if err != nil { return err } - sv.MaxParts = int32(i64) + sv.MaxParts = ptr.Int32(int32(i64)) } case strings.EqualFold("NextPartNumberMarker", t.Name.Local): @@ -15566,7 +15566,7 @@ func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectA if err != nil { return err } - sv.TotalPartsCount = int32(i64) + sv.TotalPartsCount = ptr.Int32(int32(i64)) } default: @@ -16313,7 +16313,7 @@ func awsRestxml_deserializeDocumentInventoryConfiguration(v **types.InventoryCon if err != nil { return fmt.Errorf("expected IsEnabled to be of type *bool, got %T instead", val) } - sv.IsEnabled = xtv + sv.IsEnabled = ptr.Bool(xtv) } case strings.EqualFold("OptionalFields", t.Name.Local): @@ -16967,7 +16967,7 @@ func awsRestxml_deserializeDocumentLifecycleExpiration(v **types.LifecycleExpira if err != nil { return err } - sv.Days = int32(i64) + sv.Days = ptr.Int32(int32(i64)) } case strings.EqualFold("ExpiredObjectDeleteMarker", t.Name.Local): @@ -16983,7 +16983,7 @@ func awsRestxml_deserializeDocumentLifecycleExpiration(v **types.LifecycleExpira if err != nil { return fmt.Errorf("expected ExpiredObjectDeleteMarker to be of type *bool, got %T instead", val) } - sv.ExpiredObjectDeleteMarker = xtv + sv.ExpiredObjectDeleteMarker = ptr.Bool(xtv) } default: @@ -17147,7 +17147,7 @@ func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleR if err != nil { return err } - sv.ObjectSizeGreaterThan = i64 + sv.ObjectSizeGreaterThan = ptr.Int64(i64) } case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): @@ -17164,7 +17164,7 @@ func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleR if err != nil { return err } - sv.ObjectSizeLessThan = i64 + sv.ObjectSizeLessThan = ptr.Int64(i64) } case strings.EqualFold("Prefix", t.Name.Local): @@ -18001,7 +18001,7 @@ func awsRestxml_deserializeDocumentNoncurrentVersionExpiration(v **types.Noncurr if err != nil { return err } - sv.NewerNoncurrentVersions = int32(i64) + sv.NewerNoncurrentVersions = ptr.Int32(int32(i64)) } case strings.EqualFold("NoncurrentDays", t.Name.Local): @@ -18018,7 +18018,7 @@ func awsRestxml_deserializeDocumentNoncurrentVersionExpiration(v **types.Noncurr if err != nil { return err } - sv.NoncurrentDays = int32(i64) + sv.NoncurrentDays = ptr.Int32(int32(i64)) } default: @@ -18071,7 +18071,7 @@ func awsRestxml_deserializeDocumentNoncurrentVersionTransition(v **types.Noncurr if err != nil { return err } - sv.NewerNoncurrentVersions = int32(i64) + sv.NewerNoncurrentVersions = ptr.Int32(int32(i64)) } case strings.EqualFold("NoncurrentDays", t.Name.Local): @@ -18088,7 +18088,7 @@ func awsRestxml_deserializeDocumentNoncurrentVersionTransition(v **types.Noncurr if err != nil { return err } - sv.NoncurrentDays = int32(i64) + sv.NoncurrentDays = ptr.Int32(int32(i64)) } case strings.EqualFold("StorageClass", t.Name.Local): @@ -18469,7 +18469,7 @@ func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.No if err != nil { return err } - sv.Size = i64 + sv.Size = ptr.Int64(i64) } case strings.EqualFold("StorageClass", t.Name.Local): @@ -18939,7 +18939,7 @@ func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smit if err != nil { return err } - sv.PartNumber = int32(i64) + sv.PartNumber = ptr.Int32(int32(i64)) } case strings.EqualFold("Size", t.Name.Local): @@ -18956,7 +18956,7 @@ func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smit if err != nil { return err } - sv.Size = i64 + sv.Size = ptr.Int64(i64) } default: @@ -19027,7 +19027,7 @@ func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decode if err != nil { return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) } - sv.IsLatest = xtv + sv.IsLatest = ptr.Bool(xtv) } case strings.EqualFold("Key", t.Name.Local): @@ -19086,7 +19086,7 @@ func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decode if err != nil { return err } - sv.Size = i64 + sv.Size = ptr.Int64(i64) } case strings.EqualFold("StorageClass", t.Name.Local): @@ -19536,7 +19536,7 @@ func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDe if err != nil { return err } - sv.PartNumber = int32(i64) + sv.PartNumber = ptr.Int32(int32(i64)) } case strings.EqualFold("Size", t.Name.Local): @@ -19553,7 +19553,7 @@ func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDe if err != nil { return err } - sv.Size = i64 + sv.Size = ptr.Int64(i64) } default: @@ -19741,7 +19741,7 @@ func awsRestxml_deserializeDocumentPolicyStatus(v **types.PolicyStatus, decoder if err != nil { return fmt.Errorf("expected IsPublic to be of type *bool, got %T instead", val) } - sv.IsPublic = xtv + sv.IsPublic = ptr.Bool(xtv) } default: @@ -19793,7 +19793,7 @@ func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.Publ if err != nil { return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) } - sv.BlockPublicAcls = xtv + sv.BlockPublicAcls = ptr.Bool(xtv) } case strings.EqualFold("BlockPublicPolicy", t.Name.Local): @@ -19809,7 +19809,7 @@ func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.Publ if err != nil { return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) } - sv.BlockPublicPolicy = xtv + sv.BlockPublicPolicy = ptr.Bool(xtv) } case strings.EqualFold("IgnorePublicAcls", t.Name.Local): @@ -19825,7 +19825,7 @@ func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.Publ if err != nil { return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) } - sv.IgnorePublicAcls = xtv + sv.IgnorePublicAcls = ptr.Bool(xtv) } case strings.EqualFold("RestrictPublicBuckets", t.Name.Local): @@ -19841,7 +19841,7 @@ func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.Publ if err != nil { return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) } - sv.RestrictPublicBuckets = xtv + sv.RestrictPublicBuckets = ptr.Bool(xtv) } default: @@ -20353,7 +20353,7 @@ func awsRestxml_deserializeDocumentReplicationRule(v **types.ReplicationRule, de if err != nil { return err } - sv.Priority = int32(i64) + sv.Priority = ptr.Int32(int32(i64)) } case strings.EqualFold("SourceSelectionCriteria", t.Name.Local): @@ -20674,7 +20674,7 @@ func awsRestxml_deserializeDocumentReplicationTimeValue(v **types.ReplicationTim if err != nil { return err } - sv.Minutes = int32(i64) + sv.Minutes = ptr.Int32(int32(i64)) } default: @@ -20726,7 +20726,7 @@ func awsRestxml_deserializeDocumentRestoreStatus(v **types.RestoreStatus, decode if err != nil { return fmt.Errorf("expected IsRestoreInProgress to be of type *bool, got %T instead", val) } - sv.IsRestoreInProgress = xtv + sv.IsRestoreInProgress = ptr.Bool(xtv) } case strings.EqualFold("RestoreExpiryDate", t.Name.Local): @@ -21063,7 +21063,7 @@ func awsRestxml_deserializeDocumentServerSideEncryptionRule(v **types.ServerSide if err != nil { return fmt.Errorf("expected BucketKeyEnabled to be of type *bool, got %T instead", val) } - sv.BucketKeyEnabled = xtv + sv.BucketKeyEnabled = ptr.Bool(xtv) } default: @@ -21729,7 +21729,7 @@ func awsRestxml_deserializeDocumentTiering(v **types.Tiering, decoder smithyxml. if err != nil { return err } - sv.Days = int32(i64) + sv.Days = ptr.Int32(int32(i64)) } default: @@ -22009,7 +22009,7 @@ func awsRestxml_deserializeDocumentTransition(v **types.Transition, decoder smit if err != nil { return err } - sv.Days = int32(i64) + sv.Days = ptr.Int32(int32(i64)) } case strings.EqualFold("StorageClass", t.Name.Local): diff --git a/service/s3/handwritten_paginators.go b/service/s3/handwritten_paginators.go index 3d0b25100d8..02128da6ff8 100644 --- a/service/s3/handwritten_paginators.go +++ b/service/s3/handwritten_paginators.go @@ -3,6 +3,8 @@ package s3 import ( "context" "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" ) // ListObjectVersionsAPIClient is a client that implements the ListObjectVersions @@ -42,7 +44,7 @@ func NewListObjectVersionsPaginator(client ListObjectVersionsAPIClient, params * } options := ListObjectVersionsPaginatorOptions{} - options.Limit = params.MaxKeys + options.Limit = aws.ToInt32(params.MaxKeys) for _, fn := range optFns { fn(&options) @@ -77,7 +79,7 @@ func (p *ListObjectVersionsPaginator) NextPage(ctx context.Context, optFns ...fu if p.options.Limit > 0 { limit = p.options.Limit } - params.MaxKeys = limit + params.MaxKeys = aws.Int32(limit) result, err := p.client.ListObjectVersions(ctx, ¶ms, optFns...) if err != nil { @@ -86,10 +88,10 @@ func (p *ListObjectVersionsPaginator) NextPage(ctx context.Context, optFns ...fu p.firstPage = false prevToken := p.keyMarker - p.isTruncated = result.IsTruncated + p.isTruncated = aws.ToBool(result.IsTruncated) p.keyMarker = nil p.versionIDMarker = nil - if result.IsTruncated { + if aws.ToBool(result.IsTruncated) { p.keyMarker = result.NextKeyMarker p.versionIDMarker = result.NextVersionIdMarker } @@ -141,7 +143,7 @@ func NewListMultipartUploadsPaginator(client ListMultipartUploadsAPIClient, para } options := ListMultipartUploadsPaginatorOptions{} - options.Limit = params.MaxUploads + options.Limit = aws.ToInt32(params.MaxUploads) for _, fn := range optFns { fn(&options) @@ -176,7 +178,7 @@ func (p *ListMultipartUploadsPaginator) NextPage(ctx context.Context, optFns ... if p.options.Limit > 0 { limit = p.options.Limit } - params.MaxUploads = limit + params.MaxUploads = aws.Int32(limit) result, err := p.client.ListMultipartUploads(ctx, ¶ms, optFns...) if err != nil { @@ -185,10 +187,10 @@ func (p *ListMultipartUploadsPaginator) NextPage(ctx context.Context, optFns ... p.firstPage = false prevToken := p.keyMarker - p.isTruncated = result.IsTruncated + p.isTruncated = aws.ToBool(result.IsTruncated) p.keyMarker = nil p.uploadIDMarker = nil - if result.IsTruncated { + if aws.ToBool(result.IsTruncated) { p.keyMarker = result.NextKeyMarker p.uploadIDMarker = result.NextUploadIdMarker } diff --git a/service/s3/handwritten_paginators_test.go b/service/s3/handwritten_paginators_test.go index 8e1b7ed8da1..3bf9865db26 100644 --- a/service/s3/handwritten_paginators_test.go +++ b/service/s3/handwritten_paginators_test.go @@ -2,8 +2,9 @@ package s3 import ( "context" - "github.com/aws/aws-sdk-go-v2/aws" "testing" + + "github.com/aws/aws-sdk-go-v2/aws" ) type mockListObjectVersionsClient struct { @@ -21,14 +22,14 @@ type mockListMultipartUploadsClient struct { func (c *mockListObjectVersionsClient) ListObjectVersions(ctx context.Context, input *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { c.inputs = append(c.inputs, input) requestCnt := len(c.inputs) - testCurRequest(len(c.outputs), requestCnt, c.outputs[requestCnt-1].MaxKeys, input.MaxKeys, c.t) + testCurRequest(len(c.outputs), requestCnt, aws.ToInt32(c.outputs[requestCnt-1].MaxKeys), aws.ToInt32(input.MaxKeys), c.t) return c.outputs[requestCnt-1], nil } func (c *mockListMultipartUploadsClient) ListMultipartUploads(ctx context.Context, input *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { c.inputs = append(c.inputs, input) requestCnt := len(c.inputs) - testCurRequest(len(c.outputs), requestCnt, c.outputs[requestCnt-1].MaxUploads, input.MaxUploads, c.t) + testCurRequest(len(c.outputs), requestCnt, aws.ToInt32(c.outputs[requestCnt-1].MaxUploads), aws.ToInt32(input.MaxUploads), c.t) return c.outputs[requestCnt-1], nil } @@ -61,20 +62,20 @@ func TestListObjectVersionsPaginator(t *testing.T) { { NextKeyMarker: aws.String("testKey1"), NextVersionIdMarker: aws.String("testID1"), - MaxKeys: 5, - IsTruncated: true, + MaxKeys: aws.Int32(5), + IsTruncated: aws.Bool(true), }, { NextKeyMarker: aws.String("testKey2"), NextVersionIdMarker: aws.String("testID2"), - MaxKeys: 5, - IsTruncated: true, + MaxKeys: aws.Int32(5), + IsTruncated: aws.Bool(true), }, { NextKeyMarker: aws.String("testKey3"), NextVersionIdMarker: aws.String("testID3"), - MaxKeys: 5, - IsTruncated: false, + MaxKeys: aws.Int32(5), + IsTruncated: aws.Bool(false), }, }, }, @@ -89,26 +90,26 @@ func TestListObjectVersionsPaginator(t *testing.T) { { NextKeyMarker: aws.String("testKey1"), NextVersionIdMarker: aws.String("testID1"), - MaxKeys: 10, - IsTruncated: true, + MaxKeys: aws.Int32(10), + IsTruncated: aws.Bool(true), }, { NextKeyMarker: aws.String("testKey2"), NextVersionIdMarker: aws.String("testID2"), - MaxKeys: 10, - IsTruncated: true, + MaxKeys: aws.Int32(10), + IsTruncated: aws.Bool(true), }, { NextKeyMarker: aws.String("testKey2"), NextVersionIdMarker: aws.String("testID2"), - MaxKeys: 10, - IsTruncated: true, + MaxKeys: aws.Int32(10), + IsTruncated: aws.Bool(true), }, { NextKeyMarker: aws.String("testKey3"), NextVersionIdMarker: aws.String("testID3"), - MaxKeys: 10, - IsTruncated: false, + MaxKeys: aws.Int32(10), + IsTruncated: aws.Bool(false), }, }, }, @@ -163,26 +164,26 @@ func TestListMultipartUploadsPaginator(t *testing.T) { { NextKeyMarker: aws.String("testKey1"), NextUploadIdMarker: aws.String("testID1"), - MaxUploads: 5, - IsTruncated: true, + MaxUploads: aws.Int32(5), + IsTruncated: aws.Bool(true), }, { NextKeyMarker: aws.String("testKey2"), NextUploadIdMarker: aws.String("testID2"), - MaxUploads: 5, - IsTruncated: true, + MaxUploads: aws.Int32(5), + IsTruncated: aws.Bool(true), }, { NextKeyMarker: aws.String("testKey3"), NextUploadIdMarker: aws.String("testID3"), - MaxUploads: 5, - IsTruncated: true, + MaxUploads: aws.Int32(5), + IsTruncated: aws.Bool(true), }, { NextKeyMarker: aws.String("testKey4"), NextUploadIdMarker: aws.String("testID4"), - MaxUploads: 5, - IsTruncated: false, + MaxUploads: aws.Int32(5), + IsTruncated: aws.Bool(false), }, }, }, @@ -197,32 +198,32 @@ func TestListMultipartUploadsPaginator(t *testing.T) { { NextKeyMarker: aws.String("testKey1"), NextUploadIdMarker: aws.String("testID1"), - MaxUploads: 10, - IsTruncated: true, + MaxUploads: aws.Int32(10), + IsTruncated: aws.Bool(true), }, { NextKeyMarker: aws.String("testKey2"), NextUploadIdMarker: aws.String("testID2"), - MaxUploads: 10, - IsTruncated: true, + MaxUploads: aws.Int32(10), + IsTruncated: aws.Bool(true), }, { NextKeyMarker: aws.String("testKey2"), NextUploadIdMarker: aws.String("testID2"), - MaxUploads: 10, - IsTruncated: true, + MaxUploads: aws.Int32(10), + IsTruncated: aws.Bool(true), }, { NextKeyMarker: aws.String("testKey4"), NextUploadIdMarker: aws.String("testID4"), - MaxUploads: 10, - IsTruncated: false, + MaxUploads: aws.Int32(10), + IsTruncated: aws.Bool(false), }, { NextKeyMarker: aws.String("testKey5"), NextUploadIdMarker: aws.String("testID5"), - MaxUploads: 10, - IsTruncated: false, + MaxUploads: aws.Int32(10), + IsTruncated: aws.Bool(false), }, }, }, diff --git a/service/s3/internal/customizations/presign_test.go b/service/s3/internal/customizations/presign_test.go index c2e23f5aea9..fb689cba3ba 100644 --- a/service/s3/internal/customizations/presign_test.go +++ b/service/s3/internal/customizations/presign_test.go @@ -130,7 +130,7 @@ func TestPutObject_PresignURL(t *testing.T) { input: s3.PutObjectInput{ Bucket: aws.String("mock-bucket"), Key: aws.String("mockkey"), - ContentLength: 100, + ContentLength: aws.Int64(100), }, expectPresignedURLHost: "https://mock-bucket.s3.us-west-2.amazonaws.com/mockkey?", expectRequestURIQuery: []string{ @@ -299,7 +299,7 @@ func TestUploadPart_PresignURL(t *testing.T) { input: s3.UploadPartInput{ Bucket: aws.String("mock-bucket"), Key: aws.String("mockkey"), - PartNumber: 1, + PartNumber: aws.Int32(1), UploadId: aws.String("123456"), Body: strings.NewReader("hello-world"), }, @@ -324,7 +324,7 @@ func TestUploadPart_PresignURL(t *testing.T) { input: s3.UploadPartInput{ Bucket: aws.String("mock-bucket"), Key: aws.String("mockkey"), - PartNumber: 1, + PartNumber: aws.Int32(1), UploadId: aws.String("123456"), Body: bytes.NewReader([]byte("hello-world")), }, @@ -350,7 +350,7 @@ func TestUploadPart_PresignURL(t *testing.T) { input: s3.UploadPartInput{ Bucket: aws.String("mock-bucket"), Key: aws.String("mockkey"), - PartNumber: 1, + PartNumber: aws.Int32(1), UploadId: aws.String("123456"), Body: bytes.NewBuffer([]byte(`hello-world`)), }, @@ -375,7 +375,7 @@ func TestUploadPart_PresignURL(t *testing.T) { input: s3.UploadPartInput{ Bucket: aws.String("mock-bucket"), Key: aws.String("mockkey"), - PartNumber: 1, + PartNumber: aws.Int32(1), UploadId: aws.String("123456"), Body: bytes.NewReader([]byte(``)), }, @@ -398,7 +398,7 @@ func TestUploadPart_PresignURL(t *testing.T) { input: s3.UploadPartInput{ Bucket: aws.String("mock-bucket"), Key: aws.String("mockkey"), - PartNumber: 1, + PartNumber: aws.Int32(1), UploadId: aws.String("123456"), }, expectPresignedURLHost: "https://mock-bucket.s3.us-west-2.amazonaws.com/mockkey?", diff --git a/service/s3/internal/customizations/write_get_object_response_test.go b/service/s3/internal/customizations/write_get_object_response_test.go index 3dfe256ef08..fb5de85d4dc 100644 --- a/service/s3/internal/customizations/write_get_object_response_test.go +++ b/service/s3/internal/customizations/write_get_object_response_test.go @@ -5,13 +5,14 @@ import ( "context" "crypto/tls" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/google/go-cmp/cmp" "io/ioutil" "net/http" "net/http/httptest" "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/google/go-cmp/cmp" ) type readSeeker struct { @@ -152,7 +153,7 @@ func TestWriteGetObjectResponse(t *testing.T) { RequestRoute: aws.String("route"), RequestToken: aws.String("token"), Body: &readOnlyReader{bytes.NewReader([]byte("test input"))}, - ContentLength: 10, + ContentLength: aws.Int64(10), }, }, "Content-Length Not Provided": { diff --git a/service/s3/serializers.go b/service/s3/serializers.go index 463f6b706f6..9f010166c60 100644 --- a/service/s3/serializers.go +++ b/service/s3/serializers.go @@ -293,9 +293,9 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.ACL)) } - if v.BucketKeyEnabled { + if v.BucketKeyEnabled != nil { locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } if v.CacheControl != nil && len(*v.CacheControl) > 0 { @@ -606,9 +606,9 @@ func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, e encoder.SetHeader(locationName).String(*v.GrantWriteACP) } - if v.ObjectLockEnabledForBucket { + if v.ObjectLockEnabledForBucket != nil { locationName := "X-Amz-Bucket-Object-Lock-Enabled" - encoder.SetHeader(locationName).Boolean(v.ObjectLockEnabledForBucket) + encoder.SetHeader(locationName).Boolean(*v.ObjectLockEnabledForBucket) } if len(v.ObjectOwnership) > 0 { @@ -677,9 +677,9 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(string(v.ACL)) } - if v.BucketKeyEnabled { + if v.BucketKeyEnabled != nil { locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } if v.CacheControl != nil && len(*v.CacheControl) > 0 { @@ -1685,9 +1685,9 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.BypassGovernanceRetention { + if v.BypassGovernanceRetention != nil { locationName := "X-Amz-Bypass-Governance-Retention" - encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention) + encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) } if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { @@ -1798,9 +1798,9 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.BypassGovernanceRetention { + if v.BypassGovernanceRetention != nil { locationName := "X-Amz-Bypass-Governance-Retention" - encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention) + encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) } if len(v.ChecksumAlgorithm) > 0 { @@ -3289,8 +3289,8 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder } } - if v.PartNumber != 0 { - encoder.SetQuery("partNumber").Integer(v.PartNumber) + if v.PartNumber != nil { + encoder.SetQuery("partNumber").Integer(*v.PartNumber) } if v.Range != nil && len(*v.Range) > 0 { @@ -3495,9 +3495,9 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr } } - if v.MaxParts != 0 { + if v.MaxParts != nil { locationName := "X-Amz-Max-Parts" - encoder.SetHeader(locationName).Integer(v.MaxParts) + encoder.SetHeader(locationName).Integer(*v.MaxParts) } if v.ObjectAttributes != nil { @@ -4133,8 +4133,8 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod } } - if v.PartNumber != 0 { - encoder.SetQuery("partNumber").Integer(v.PartNumber) + if v.PartNumber != nil { + encoder.SetQuery("partNumber").Integer(*v.PartNumber) } if v.Range != nil && len(*v.Range) > 0 { @@ -4546,8 +4546,8 @@ func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipar encoder.SetQuery("key-marker").String(*v.KeyMarker) } - if v.MaxUploads != 0 { - encoder.SetQuery("max-uploads").Integer(v.MaxUploads) + if v.MaxUploads != nil { + encoder.SetQuery("max-uploads").Integer(*v.MaxUploads) } if v.Prefix != nil { @@ -4636,8 +4636,8 @@ func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, enc encoder.SetQuery("marker").String(*v.Marker) } - if v.MaxKeys != 0 { - encoder.SetQuery("max-keys").Integer(v.MaxKeys) + if v.MaxKeys != nil { + encoder.SetQuery("max-keys").Integer(*v.MaxKeys) } if v.OptionalObjectAttributes != nil { @@ -4736,12 +4736,12 @@ func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.FetchOwner { - encoder.SetQuery("fetch-owner").Boolean(v.FetchOwner) + if v.FetchOwner != nil { + encoder.SetQuery("fetch-owner").Boolean(*v.FetchOwner) } - if v.MaxKeys != 0 { - encoder.SetQuery("max-keys").Integer(v.MaxKeys) + if v.MaxKeys != nil { + encoder.SetQuery("max-keys").Integer(*v.MaxKeys) } if v.OptionalObjectAttributes != nil { @@ -4844,8 +4844,8 @@ func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVers encoder.SetQuery("key-marker").String(*v.KeyMarker) } - if v.MaxKeys != 0 { - encoder.SetQuery("max-keys").Integer(v.MaxKeys) + if v.MaxKeys != nil { + encoder.SetQuery("max-keys").Integer(*v.MaxKeys) } if v.OptionalObjectAttributes != nil { @@ -4945,8 +4945,8 @@ func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder } } - if v.MaxParts != 0 { - encoder.SetQuery("max-parts").Integer(v.MaxParts) + if v.MaxParts != nil { + encoder.SetQuery("max-parts").Integer(*v.MaxParts) } if v.PartNumberMarker != nil { @@ -6003,9 +6003,9 @@ func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.SkipDestinationValidation { + if v.SkipDestinationValidation != nil { locationName := "X-Amz-Skip-Destination-Validation" - encoder.SetHeader(locationName).Boolean(v.SkipDestinationValidation) + encoder.SetHeader(locationName).Boolean(*v.SkipDestinationValidation) } return nil @@ -6171,9 +6171,9 @@ func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyIn encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ConfirmRemoveSelfBucketAccess { + if v.ConfirmRemoveSelfBucketAccess != nil { locationName := "X-Amz-Confirm-Remove-Self-Bucket-Access" - encoder.SetHeader(locationName).Boolean(v.ConfirmRemoveSelfBucketAccess) + encoder.SetHeader(locationName).Boolean(*v.ConfirmRemoveSelfBucketAccess) } if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { @@ -6744,9 +6744,9 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(string(v.ACL)) } - if v.BucketKeyEnabled { + if v.BucketKeyEnabled != nil { locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } if v.CacheControl != nil && len(*v.CacheControl) > 0 { @@ -6794,9 +6794,9 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(*v.ContentLanguage) } - if v.ContentLength != 0 { + if v.ContentLength != nil { locationName := "Content-Length" - encoder.SetHeader(locationName).Long(v.ContentLength) + encoder.SetHeader(locationName).Long(*v.ContentLength) } if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { @@ -7363,9 +7363,9 @@ func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectReten return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.BypassGovernanceRetention { + if v.BypassGovernanceRetention != nil { locationName := "X-Amz-Bypass-Governance-Retention" - encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention) + encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) } if len(v.ChecksumAlgorithm) > 0 { @@ -7992,9 +7992,9 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } - if v.ContentLength != 0 { + if v.ContentLength != nil { locationName := "Content-Length" - encoder.SetHeader(locationName).Long(v.ContentLength) + encoder.SetHeader(locationName).Long(*v.ContentLength) } if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { @@ -8016,8 +8016,8 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod } } - { - encoder.SetQuery("partNumber").Integer(v.PartNumber) + if v.PartNumber != nil { + encoder.SetQuery("partNumber").Integer(*v.PartNumber) } if len(v.RequestPayer) > 0 { @@ -8164,8 +8164,8 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu } } - { - encoder.SetQuery("partNumber").Integer(v.PartNumber) + if v.PartNumber != nil { + encoder.SetQuery("partNumber").Integer(*v.PartNumber) } if len(v.RequestPayer) > 0 { @@ -8265,9 +8265,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(*v.AcceptRanges) } - if v.BucketKeyEnabled { + if v.BucketKeyEnabled != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } if v.CacheControl != nil && len(*v.CacheControl) > 0 { @@ -8310,9 +8310,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(*v.ContentLanguage) } - if v.ContentLength != 0 { + if v.ContentLength != nil { locationName := "Content-Length" - encoder.SetHeader(locationName).Long(v.ContentLength) + encoder.SetHeader(locationName).Long(*v.ContentLength) } if v.ContentRange != nil && len(*v.ContentRange) > 0 { @@ -8325,9 +8325,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(*v.ContentType) } - if v.DeleteMarker { + if v.DeleteMarker != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Delete-Marker" - encoder.SetHeader(locationName).Boolean(v.DeleteMarker) + encoder.SetHeader(locationName).Boolean(*v.DeleteMarker) } if v.ErrorCode != nil && len(*v.ErrorCode) > 0 { @@ -8369,9 +8369,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb } } - if v.MissingMeta != 0 { + if v.MissingMeta != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Missing-Meta" - encoder.SetHeader(locationName).Integer(v.MissingMeta) + encoder.SetHeader(locationName).Integer(*v.MissingMeta) } if len(v.ObjectLockLegalHoldStatus) > 0 { @@ -8389,9 +8389,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) } - if v.PartsCount != 0 { + if v.PartsCount != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Mp-Parts-Count" - encoder.SetHeader(locationName).Integer(v.PartsCount) + encoder.SetHeader(locationName).Integer(*v.PartsCount) } if len(v.ReplicationStatus) > 0 { @@ -8439,9 +8439,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } - if v.StatusCode != 0 { + if v.StatusCode != nil { locationName := "X-Amz-Fwd-Status" - encoder.SetHeader(locationName).Integer(v.StatusCode) + encoder.SetHeader(locationName).Integer(*v.StatusCode) } if len(v.StorageClass) > 0 { @@ -8449,9 +8449,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(string(v.StorageClass)) } - if v.TagCount != 0 { + if v.TagCount != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Tagging-Count" - encoder.SetHeader(locationName).Integer(v.TagCount) + encoder.SetHeader(locationName).Integer(*v.TagCount) } if v.VersionId != nil && len(*v.VersionId) > 0 { @@ -8464,7 +8464,7 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb func awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v *types.AbortIncompleteMultipartUpload, value smithyxml.Value) error { defer value.Close() - if v.DaysAfterInitiation != 0 { + if v.DaysAfterInitiation != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -8473,7 +8473,7 @@ func awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v *types.AbortIn Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.DaysAfterInitiation) + el.Integer(*v.DaysAfterInitiation) } return nil } @@ -8876,7 +8876,7 @@ func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smi el := value.MemberElement(root) el.String(*v.ETag) } - if v.PartNumber != 0 { + if v.PartNumber != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -8885,7 +8885,7 @@ func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smi Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.PartNumber) + el.Integer(*v.PartNumber) } return nil } @@ -9015,7 +9015,7 @@ func awsRestxml_serializeDocumentCORSRule(v *types.CORSRule, value smithyxml.Val el := value.MemberElement(root) el.String(*v.ID) } - if v.MaxAgeSeconds != 0 { + if v.MaxAgeSeconds != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -9024,7 +9024,7 @@ func awsRestxml_serializeDocumentCORSRule(v *types.CORSRule, value smithyxml.Val Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.MaxAgeSeconds) + el.Integer(*v.MaxAgeSeconds) } return nil } @@ -9062,7 +9062,7 @@ func awsRestxml_serializeDocumentCreateBucketConfiguration(v *types.CreateBucket func awsRestxml_serializeDocumentCSVInput(v *types.CSVInput, value smithyxml.Value) error { defer value.Close() - if v.AllowQuotedRecordDelimiter { + if v.AllowQuotedRecordDelimiter != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -9071,7 +9071,7 @@ func awsRestxml_serializeDocumentCSVInput(v *types.CSVInput, value smithyxml.Val Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.AllowQuotedRecordDelimiter) + el.Boolean(*v.AllowQuotedRecordDelimiter) } if v.Comments != nil { rootAttr := []smithyxml.Attr{} @@ -9204,7 +9204,7 @@ func awsRestxml_serializeDocumentCSVOutput(v *types.CSVOutput, value smithyxml.V func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, value smithyxml.Value) error { defer value.Close() - if v.Days != 0 { + if v.Days != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -9213,7 +9213,7 @@ func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, val Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Days) + el.Integer(*v.Days) } if len(v.Mode) > 0 { rootAttr := []smithyxml.Attr{} @@ -9226,7 +9226,7 @@ func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, val el := value.MemberElement(root) el.String(string(v.Mode)) } - if v.Years != 0 { + if v.Years != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -9235,7 +9235,7 @@ func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, val Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Years) + el.Integer(*v.Years) } return nil } @@ -9255,7 +9255,7 @@ func awsRestxml_serializeDocumentDelete(v *types.Delete, value smithyxml.Value) return err } } - if v.Quiet { + if v.Quiet != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -9264,7 +9264,7 @@ func awsRestxml_serializeDocumentDelete(v *types.Delete, value smithyxml.Value) Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.Quiet) + el.Boolean(*v.Quiet) } return nil } @@ -9901,7 +9901,7 @@ func awsRestxml_serializeDocumentInventoryConfiguration(v *types.InventoryConfig el := value.MemberElement(root) el.String(string(v.IncludedObjectVersions)) } - { + if v.IsEnabled != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -9910,7 +9910,7 @@ func awsRestxml_serializeDocumentInventoryConfiguration(v *types.InventoryConfig Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.IsEnabled) + el.Boolean(*v.IsEnabled) } if v.OptionalFields != nil { rootAttr := []smithyxml.Attr{} @@ -10217,7 +10217,7 @@ func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiratio el := value.MemberElement(root) el.String(smithytime.FormatDateTime(*v.Date)) } - if v.Days != 0 { + if v.Days != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10226,9 +10226,9 @@ func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiratio Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Days) + el.Integer(*v.Days) } - if v.ExpiredObjectDeleteMarker { + if v.ExpiredObjectDeleteMarker != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10237,7 +10237,7 @@ func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiratio Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.ExpiredObjectDeleteMarker) + el.Boolean(*v.ExpiredObjectDeleteMarker) } return nil } @@ -10360,7 +10360,7 @@ func awsRestxml_serializeDocumentLifecycleRule(v *types.LifecycleRule, value smi func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator, value smithyxml.Value) error { defer value.Close() - if v.ObjectSizeGreaterThan != 0 { + if v.ObjectSizeGreaterThan != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10369,9 +10369,9 @@ func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRule Attr: rootAttr, } el := value.MemberElement(root) - el.Long(v.ObjectSizeGreaterThan) + el.Long(*v.ObjectSizeGreaterThan) } - if v.ObjectSizeLessThan != 0 { + if v.ObjectSizeLessThan != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10380,7 +10380,7 @@ func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRule Attr: rootAttr, } el := value.MemberElement(root) - el.Long(v.ObjectSizeLessThan) + el.Long(*v.ObjectSizeLessThan) } if v.Prefix != nil { rootAttr := []smithyxml.Attr{} @@ -10718,7 +10718,7 @@ func awsRestxml_serializeDocumentMetricsFilter(v types.MetricsFilter, value smit func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.NoncurrentVersionExpiration, value smithyxml.Value) error { defer value.Close() - if v.NewerNoncurrentVersions != 0 { + if v.NewerNoncurrentVersions != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10727,9 +10727,9 @@ func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.Noncurrent Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.NewerNoncurrentVersions) + el.Integer(*v.NewerNoncurrentVersions) } - if v.NoncurrentDays != 0 { + if v.NoncurrentDays != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10738,14 +10738,14 @@ func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.Noncurrent Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.NoncurrentDays) + el.Integer(*v.NoncurrentDays) } return nil } func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.NoncurrentVersionTransition, value smithyxml.Value) error { defer value.Close() - if v.NewerNoncurrentVersions != 0 { + if v.NewerNoncurrentVersions != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10754,9 +10754,9 @@ func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.Noncurrent Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.NewerNoncurrentVersions) + el.Integer(*v.NewerNoncurrentVersions) } - if v.NoncurrentDays != 0 { + if v.NoncurrentDays != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10765,7 +10765,7 @@ func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.Noncurrent Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.NoncurrentDays) + el.Integer(*v.NoncurrentDays) } if len(v.StorageClass) > 0 { rootAttr := []smithyxml.Attr{} @@ -11135,7 +11135,7 @@ func awsRestxml_serializeDocumentParquetInput(v *types.ParquetInput, value smith func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicAccessBlockConfiguration, value smithyxml.Value) error { defer value.Close() - if v.BlockPublicAcls { + if v.BlockPublicAcls != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11144,9 +11144,9 @@ func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicA Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.BlockPublicAcls) + el.Boolean(*v.BlockPublicAcls) } - if v.BlockPublicPolicy { + if v.BlockPublicPolicy != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11155,9 +11155,9 @@ func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicA Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.BlockPublicPolicy) + el.Boolean(*v.BlockPublicPolicy) } - if v.IgnorePublicAcls { + if v.IgnorePublicAcls != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11166,9 +11166,9 @@ func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicA Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.IgnorePublicAcls) + el.Boolean(*v.IgnorePublicAcls) } - if v.RestrictPublicBuckets { + if v.RestrictPublicBuckets != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11177,7 +11177,7 @@ func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicA Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.RestrictPublicBuckets) + el.Boolean(*v.RestrictPublicBuckets) } return nil } @@ -11458,7 +11458,7 @@ func awsRestxml_serializeDocumentReplicationRule(v *types.ReplicationRule, value el := value.MemberElement(root) el.String(*v.Prefix) } - if v.Priority != 0 { + if v.Priority != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11467,7 +11467,7 @@ func awsRestxml_serializeDocumentReplicationRule(v *types.ReplicationRule, value Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Priority) + el.Integer(*v.Priority) } if v.SourceSelectionCriteria != nil { rootAttr := []smithyxml.Attr{} @@ -11618,7 +11618,7 @@ func awsRestxml_serializeDocumentReplicationTime(v *types.ReplicationTime, value func awsRestxml_serializeDocumentReplicationTimeValue(v *types.ReplicationTimeValue, value smithyxml.Value) error { defer value.Close() - if v.Minutes != 0 { + if v.Minutes != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11627,7 +11627,7 @@ func awsRestxml_serializeDocumentReplicationTimeValue(v *types.ReplicationTimeVa Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Minutes) + el.Integer(*v.Minutes) } return nil } @@ -11650,7 +11650,7 @@ func awsRestxml_serializeDocumentRequestPaymentConfiguration(v *types.RequestPay func awsRestxml_serializeDocumentRequestProgress(v *types.RequestProgress, value smithyxml.Value) error { defer value.Close() - if v.Enabled { + if v.Enabled != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11659,14 +11659,14 @@ func awsRestxml_serializeDocumentRequestProgress(v *types.RequestProgress, value Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.Enabled) + el.Boolean(*v.Enabled) } return nil } func awsRestxml_serializeDocumentRestoreRequest(v *types.RestoreRequest, value smithyxml.Value) error { defer value.Close() - if v.Days != 0 { + if v.Days != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11675,7 +11675,7 @@ func awsRestxml_serializeDocumentRestoreRequest(v *types.RestoreRequest, value s Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Days) + el.Integer(*v.Days) } if v.Description != nil { rootAttr := []smithyxml.Attr{} @@ -11926,7 +11926,7 @@ func awsRestxml_serializeDocumentS3Location(v *types.S3Location, value smithyxml func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.Value) error { defer value.Close() - if v.End != 0 { + if v.End != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11935,9 +11935,9 @@ func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.V Attr: rootAttr, } el := value.MemberElement(root) - el.Long(v.End) + el.Long(*v.End) } - if v.Start != 0 { + if v.Start != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11946,7 +11946,7 @@ func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.V Attr: rootAttr, } el := value.MemberElement(root) - el.Long(v.Start) + el.Long(*v.Start) } return nil } @@ -12064,7 +12064,7 @@ func awsRestxml_serializeDocumentServerSideEncryptionRule(v *types.ServerSideEnc return err } } - if v.BucketKeyEnabled { + if v.BucketKeyEnabled != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -12073,7 +12073,7 @@ func awsRestxml_serializeDocumentServerSideEncryptionRule(v *types.ServerSideEnc Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.BucketKeyEnabled) + el.Boolean(*v.BucketKeyEnabled) } return nil } @@ -12345,7 +12345,7 @@ func awsRestxml_serializeDocumentTiering(v *types.Tiering, value smithyxml.Value el := value.MemberElement(root) el.String(string(v.AccessTier)) } - { + if v.Days != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -12354,7 +12354,7 @@ func awsRestxml_serializeDocumentTiering(v *types.Tiering, value smithyxml.Value Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Days) + el.Integer(*v.Days) } return nil } @@ -12455,7 +12455,7 @@ func awsRestxml_serializeDocumentTransition(v *types.Transition, value smithyxml el := value.MemberElement(root) el.String(smithytime.FormatDateTime(*v.Date)) } - if v.Days != 0 { + if v.Days != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -12464,7 +12464,7 @@ func awsRestxml_serializeDocumentTransition(v *types.Transition, value smithyxml Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Days) + el.Integer(*v.Days) } if len(v.StorageClass) > 0 { rootAttr := []smithyxml.Attr{} diff --git a/service/s3/types/types.go b/service/s3/types/types.go index 3ddea0a2450..cc006dd4d58 100644 --- a/service/s3/types/types.go +++ b/service/s3/types/types.go @@ -16,7 +16,7 @@ type AbortIncompleteMultipartUpload struct { // Specifies the number of days after which Amazon S3 aborts an incomplete // multipart upload. - DaysAfterInitiation int32 + DaysAfterInitiation *int32 noSmithyDocumentSerde } @@ -305,7 +305,7 @@ type CompletedPart struct { // Part number that identifies the part. This is a positive integer between 1 and // 10,000. - PartNumber int32 + PartNumber *int32 noSmithyDocumentSerde } @@ -466,7 +466,7 @@ type CORSRule struct { // The time in seconds that your browser is to cache the preflight response for // the specified resource. - MaxAgeSeconds int32 + MaxAgeSeconds *int32 noSmithyDocumentSerde } @@ -488,7 +488,7 @@ type CSVInput struct { // Specifies that CSV field values may contain quoted record delimiters and such // records should be allowed. Default value is FALSE. Setting this value to TRUE // may lower performance. - AllowQuotedRecordDelimiter bool + AllowQuotedRecordDelimiter *bool // A single character used to indicate that a row should be ignored when the // character is present at the start of that row. You can specify any character to @@ -563,7 +563,7 @@ type DefaultRetention struct { // The number of days that you want to specify for the default retention period. // Must be used with Mode . - Days int32 + Days *int32 // The default Object Lock retention mode you want to apply to new objects placed // in the specified bucket. Must be used with either Days or Years . @@ -571,7 +571,7 @@ type DefaultRetention struct { // The number of years that you want to specify for the default retention period. // Must be used with Mode . - Years int32 + Years *int32 noSmithyDocumentSerde } @@ -586,7 +586,7 @@ type Delete struct { // Element to enable quiet mode for the request. When you add this element, you // must set its value to true. - Quiet bool + Quiet *bool noSmithyDocumentSerde } @@ -598,7 +598,7 @@ type DeletedObject struct { // (true) or was not (false) a delete marker before deletion. In a simple DELETE, // this header indicates whether (true) or not (false) the current version of the // object is a delete marker. - DeleteMarker bool + DeleteMarker *bool // The version ID of the delete marker created as a result of the DELETE // operation. If you delete a specific object version, the value returned by this @@ -619,7 +619,7 @@ type DeleteMarkerEntry struct { // Specifies whether the object is (true) or is not (false) the latest version of // an object. - IsLatest bool + IsLatest *bool // The object key. Key *string @@ -1240,10 +1240,10 @@ type GetObjectAttributesParts struct { // Indicates whether the returned list of parts is truncated. A value of true // indicates that the list was truncated. A list can be truncated if the number of // parts exceeds the limit returned in the MaxParts element. - IsTruncated bool + IsTruncated *bool // The maximum number of parts allowed in the response. - MaxParts int32 + MaxParts *int32 // When a list is truncated, this element specifies the last part in the list, as // well as the value to use for the PartNumberMarker request parameter in a @@ -1258,7 +1258,7 @@ type GetObjectAttributesParts struct { Parts []ObjectPart // The total number of parts. - TotalPartsCount int32 + TotalPartsCount *int32 noSmithyDocumentSerde } @@ -1463,7 +1463,7 @@ type InventoryConfiguration struct { // inventory list is generated. If set to False , no inventory list is generated. // // This member is required. - IsEnabled bool + IsEnabled *bool // Specifies the schedule for generating inventory results. // @@ -1617,13 +1617,13 @@ type LifecycleExpiration struct { // Indicates the lifetime, in days, of the objects that are subject to the rule. // The value must be a non-zero positive integer. - Days int32 + Days *int32 // Indicates whether Amazon S3 will remove a delete marker with no noncurrent // versions. If set to true, the delete marker will be expired; if set to false the // policy takes no action. This cannot be specified with Days or Date in a // Lifecycle Expiration Policy. - ExpiredObjectDeleteMarker bool + ExpiredObjectDeleteMarker *bool noSmithyDocumentSerde } @@ -1693,10 +1693,10 @@ type LifecycleRule struct { type LifecycleRuleAndOperator struct { // Minimum object size to which the rule applies. - ObjectSizeGreaterThan int64 + ObjectSizeGreaterThan *int64 // Maximum object size to which the rule applies. - ObjectSizeLessThan int64 + ObjectSizeLessThan *int64 // Prefix identifying one or more objects to which the rule applies. Prefix *string @@ -1964,14 +1964,14 @@ type NoncurrentVersionExpiration struct { // For more information about noncurrent versions, see Lifecycle configuration // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) // in the Amazon S3 User Guide. - NewerNoncurrentVersions int32 + NewerNoncurrentVersions *int32 // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. The value must be a non-zero positive integer. // For information about the noncurrent days calculations, see How Amazon S3 // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) // in the Amazon S3 User Guide. - NoncurrentDays int32 + NoncurrentDays *int32 noSmithyDocumentSerde } @@ -1990,14 +1990,14 @@ type NoncurrentVersionTransition struct { // For more information about noncurrent versions, see Lifecycle configuration // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) // in the Amazon S3 User Guide. - NewerNoncurrentVersions int32 + NewerNoncurrentVersions *int32 // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates How Long an Object Has Been // Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) // in the Amazon S3 User Guide. - NoncurrentDays int32 + NoncurrentDays *int32 // The class of storage used to store the object. StorageClass TransitionStorageClass @@ -2079,7 +2079,7 @@ type Object struct { RestoreStatus *RestoreStatus // Size in bytes of the object - Size int64 + Size *int64 // The class of storage used to store the object. StorageClass ObjectStorageClass @@ -2186,10 +2186,10 @@ type ObjectPart struct { // The part number identifying the part. This value is a positive integer between // 1 and 10,000. - PartNumber int32 + PartNumber *int32 // The size of the uploaded part in bytes. - Size int64 + Size *int64 noSmithyDocumentSerde } @@ -2205,7 +2205,7 @@ type ObjectVersion struct { // Specifies whether the object is (true) or is not (false) the latest version of // an object. - IsLatest bool + IsLatest *bool // The object key. Key *string @@ -2224,7 +2224,7 @@ type ObjectVersion struct { RestoreStatus *RestoreStatus // Size in bytes of the object. - Size int64 + Size *int64 // The class of storage used to store the object. StorageClass ObjectVersionStorageClass @@ -2353,10 +2353,10 @@ type Part struct { // Part number identifying the part. This is a positive integer between 1 and // 10,000. - PartNumber int32 + PartNumber *int32 // Size in bytes of the uploaded part data. - Size int64 + Size *int64 noSmithyDocumentSerde } @@ -2366,7 +2366,7 @@ type PolicyStatus struct { // The policy status for this bucket. TRUE indicates that this bucket is public. // FALSE indicates that the bucket is not public. - IsPublic bool + IsPublic *bool noSmithyDocumentSerde } @@ -2375,13 +2375,13 @@ type PolicyStatus struct { type Progress struct { // The current number of uncompressed object bytes processed. - BytesProcessed int64 + BytesProcessed *int64 // The current number of bytes of records payload data returned. - BytesReturned int64 + BytesReturned *int64 // The current number of object bytes scanned. - BytesScanned int64 + BytesScanned *int64 noSmithyDocumentSerde } @@ -2410,20 +2410,20 @@ type PublicAccessBlockConfiguration struct { // - PUT Object calls fail if the request includes a public ACL. // - PUT Bucket calls fail if the request includes a public ACL. // Enabling this setting doesn't affect existing policies or ACLs. - BlockPublicAcls bool + BlockPublicAcls *bool // Specifies whether Amazon S3 should block public bucket policies for this // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT // Bucket policy if the specified bucket policy allows public access. Enabling this // setting doesn't affect existing bucket policies. - BlockPublicPolicy bool + BlockPublicPolicy *bool // Specifies whether Amazon S3 should ignore public ACLs for this bucket and // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore // all public ACLs on this bucket and objects in this bucket. Enabling this setting // doesn't affect the persistence of any existing ACLs and doesn't prevent new // public ACLs from being set. - IgnorePublicAcls bool + IgnorePublicAcls *bool // Specifies whether Amazon S3 should restrict public bucket policies for this // bucket. Setting this element to TRUE restricts access to this bucket to only @@ -2432,7 +2432,7 @@ type PublicAccessBlockConfiguration struct { // stored bucket policies, except that public and cross-account access within any // public bucket policy, including non-public delegation to specific accounts, is // blocked. - RestrictPublicBuckets bool + RestrictPublicBuckets *bool noSmithyDocumentSerde } @@ -2622,7 +2622,7 @@ type ReplicationRule struct { // rule with the highest priority. The higher the number, the higher the priority. // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 User Guide. - Priority int32 + Priority *int32 // A container that describes additional filters for identifying the source // objects that you want to replicate. You can choose to enable or disable the @@ -2729,7 +2729,7 @@ type ReplicationTime struct { type ReplicationTimeValue struct { // Contains an integer specifying time in minutes. Valid value: 15 - Minutes int32 + Minutes *int32 noSmithyDocumentSerde } @@ -2750,7 +2750,7 @@ type RequestProgress struct { // Specifies whether periodic QueryProgress frames should be sent. Valid values: // TRUE, FALSE. Default value: FALSE. - Enabled bool + Enabled *bool noSmithyDocumentSerde } @@ -2761,7 +2761,7 @@ type RestoreRequest struct { // Lifetime of the active copy in days. Do not use with restores that specify // OutputLocation . The Days element is required for regular restores, and must not // be provided for select requests. - Days int32 + Days *int32 // The optional description for the job. Description *string @@ -2799,7 +2799,7 @@ type RestoreStatus struct { // x-amz-optional-object-attributes: IsRestoreInProgress="false", // RestoreExpiryDate="2012-12-21T00:00:00.000Z" If the object hasn't been restored, // there is no header response. - IsRestoreInProgress bool + IsRestoreInProgress *bool // Indicates when the restored copy will expire. This value is populated only if // the object has already been restored. For example: @@ -2886,13 +2886,13 @@ type ScanRange struct { // non-negative integers. The default value is one less than the size of the object // being queried. If only the End parameter is supplied, it is interpreted to mean // scan the last N bytes of the file. For example, 50 means scan the last 50 bytes. - End int64 + End *int64 // Specifies the start of the byte range. This parameter is optional. Valid // values: non-negative integers. The default value is 0. If only start is // supplied, it means scan from that point to the end of the file. For example, 50 // means scan from byte 50 until the end of the file. - Start int64 + Start *int64 noSmithyDocumentSerde } @@ -3042,7 +3042,7 @@ type ServerSideEncryptionRule struct { // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. For more // information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) // in the Amazon S3 User Guide. - BucketKeyEnabled bool + BucketKeyEnabled *bool noSmithyDocumentSerde } @@ -3106,13 +3106,13 @@ type SSES3 struct { type Stats struct { // The total number of uncompressed object bytes processed. - BytesProcessed int64 + BytesProcessed *int64 // The total number of bytes of records payload data returned. - BytesReturned int64 + BytesReturned *int64 // The total number of object bytes scanned. - BytesScanned int64 + BytesScanned *int64 noSmithyDocumentSerde } @@ -3215,7 +3215,7 @@ type Tiering struct { // days). // // This member is required. - Days int32 + Days *int32 noSmithyDocumentSerde } @@ -3262,7 +3262,7 @@ type Transition struct { // Indicates the number of days after creation when objects are transitioned to // the specified storage class. The value must be a positive integer. - Days int32 + Days *int32 // The storage class to which you want the object to transition. StorageClass TransitionStorageClass diff --git a/service/s3/types/types_exported_test.go b/service/s3/types/types_exported_test.go index 9fa7e2c355f..1d0c63dd2e7 100644 --- a/service/s3/types/types_exported_test.go +++ b/service/s3/types/types_exported_test.go @@ -63,8 +63,8 @@ func ExampleLifecycleRuleFilter_outputUsage() { var _ *string var _ *types.LifecycleRuleAndOperator -var _ int64 -var _ int64 +var _ *int64 +var _ *int64 var _ *types.Tag func ExampleMetricsFilter_outputUsage() { diff --git a/service/s3/validators.go b/service/s3/validators.go index ccd845a71e9..ac560f7df4f 100644 --- a/service/s3/validators.go +++ b/service/s3/validators.go @@ -2699,6 +2699,9 @@ func validateInventoryConfiguration(v *types.InventoryConfiguration) error { invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) } } + if v.IsEnabled == nil { + invalidParams.Add(smithy.NewErrParamRequired("IsEnabled")) + } if v.Filter != nil { if err := validateInventoryFilter(v.Filter); err != nil { invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) @@ -3735,6 +3738,9 @@ func validateTiering(v *types.Tiering) error { return nil } invalidParams := smithy.InvalidParamsError{Context: "Tiering"} + if v.Days == nil { + invalidParams.Add(smithy.NewErrParamRequired("Days")) + } if len(v.AccessTier) == 0 { invalidParams.Add(smithy.NewErrParamRequired("AccessTier")) } @@ -5444,6 +5450,9 @@ func validateOpUploadPartCopyInput(v *UploadPartCopyInput) error { if v.Key == nil { invalidParams.Add(smithy.NewErrParamRequired("Key")) } + if v.PartNumber == nil { + invalidParams.Add(smithy.NewErrParamRequired("PartNumber")) + } if v.UploadId == nil { invalidParams.Add(smithy.NewErrParamRequired("UploadId")) } @@ -5465,6 +5474,9 @@ func validateOpUploadPartInput(v *UploadPartInput) error { if v.Key == nil { invalidParams.Add(smithy.NewErrParamRequired("Key")) } + if v.PartNumber == nil { + invalidParams.Add(smithy.NewErrParamRequired("PartNumber")) + } if v.UploadId == nil { invalidParams.Add(smithy.NewErrParamRequired("UploadId")) }