add vendor

This commit is contained in:
Malar Invention
2022-04-03 09:37:16 +05:30
parent f96ba5f172
commit 00ebcd295e
2339 changed files with 705854 additions and 0 deletions

24952
vendor/github.com/aws/aws-sdk-go/service/s3/api.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,249 @@
package s3
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"hash"
"io"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkio"
)
const (
contentMD5Header = "Content-Md5"
contentSha256Header = "X-Amz-Content-Sha256"
amzTeHeader = "X-Amz-Te"
amzTxEncodingHeader = "X-Amz-Transfer-Encoding"
appendMD5TxEncoding = "append-md5"
)
// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
// require it.
func contentMD5(r *request.Request) {
h := md5.New()
if !aws.IsReaderSeekable(r.Body) {
if r.Config.Logger != nil {
r.Config.Logger.Log(fmt.Sprintf(
"Unable to compute Content-MD5 for unseekable body, S3.%s",
r.Operation.Name))
}
return
}
if _, err := copySeekableBody(h, r.Body); err != nil {
r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
return
}
// encode the md5 checksum in base64 and set the request header.
v := base64.StdEncoding.EncodeToString(h.Sum(nil))
r.HTTPRequest.Header.Set(contentMD5Header, v)
}
// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
// request. If the body is not seekable or S3DisableContentMD5Validation set
// this handler will be ignored.
func computeBodyHashes(r *request.Request) {
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
return
}
if r.IsPresigned() {
return
}
if r.Error != nil || !aws.IsReaderSeekable(r.Body) {
return
}
var md5Hash, sha256Hash hash.Hash
hashers := make([]io.Writer, 0, 2)
// Determine upfront which hashes can be set without overriding user
// provide header data.
if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 {
md5Hash = md5.New()
hashers = append(hashers, md5Hash)
}
if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 {
sha256Hash = sha256.New()
hashers = append(hashers, sha256Hash)
}
// Create the destination writer based on the hashes that are not already
// provided by the user.
var dst io.Writer
switch len(hashers) {
case 0:
return
case 1:
dst = hashers[0]
default:
dst = io.MultiWriter(hashers...)
}
if _, err := copySeekableBody(dst, r.Body); err != nil {
r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
return
}
// For the hashes created, set the associated headers that the user did not
// already provide.
if md5Hash != nil {
sum := make([]byte, md5.Size)
encoded := make([]byte, md5Base64EncLen)
base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0]))
r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)}
}
if sha256Hash != nil {
encoded := make([]byte, sha256HexEncLen)
sum := make([]byte, sha256.Size)
hex.Encode(encoded, sha256Hash.Sum(sum[0:0]))
r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)}
}
}
const (
md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen
sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
)
func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
curPos, err := src.Seek(0, sdkio.SeekCurrent)
if err != nil {
return 0, err
}
// hash the body. seek back to the first position after reading to reset
// the body for transmission. copy errors may be assumed to be from the
// body.
n, err := io.Copy(dst, src)
if err != nil {
return n, err
}
_, err = src.Seek(curPos, sdkio.SeekStart)
if err != nil {
return n, err
}
return n, nil
}
// Adds the x-amz-te: append_md5 header to the request. This requests the service
// responds with a trailing MD5 checksum.
//
// Will not ask for append MD5 if disabled, the request is presigned or,
// or the API operation does not support content MD5 validation.
func askForTxEncodingAppendMD5(r *request.Request) {
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
return
}
if r.IsPresigned() {
return
}
r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding)
}
func useMD5ValidationReader(r *request.Request) {
if r.Error != nil {
return
}
if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding {
return
}
var bodyReader *io.ReadCloser
var contentLen int64
switch tv := r.Data.(type) {
case *GetObjectOutput:
bodyReader = &tv.Body
contentLen = aws.Int64Value(tv.ContentLength)
// Update ContentLength hiden the trailing MD5 checksum.
tv.ContentLength = aws.Int64(contentLen - md5.Size)
tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range"))
default:
r.Error = awserr.New("ChecksumValidationError",
fmt.Sprintf("%s: %s header received on unsupported API, %s",
amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name,
), nil)
return
}
if contentLen < md5.Size {
r.Error = awserr.New("ChecksumValidationError",
fmt.Sprintf("invalid Content-Length %d for %s %s",
contentLen, appendMD5TxEncoding, amzTxEncodingHeader,
), nil)
return
}
// Wrap and swap the response body reader with the validation reader.
*bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size)
}
type md5ValidationReader struct {
rawReader io.ReadCloser
payload io.Reader
hash hash.Hash
payloadLen int64
read int64
}
func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader {
h := md5.New()
return &md5ValidationReader{
rawReader: reader,
payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h),
hash: h,
payloadLen: payloadLen,
}
}
func (v *md5ValidationReader) Read(p []byte) (n int, err error) {
n, err = v.payload.Read(p)
if err != nil && err != io.EOF {
return n, err
}
v.read += int64(n)
if err == io.EOF {
if v.read != v.payloadLen {
return n, io.ErrUnexpectedEOF
}
expectSum := make([]byte, md5.Size)
actualSum := make([]byte, md5.Size)
if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil {
return n, sumReadErr
}
actualSum = v.hash.Sum(actualSum[0:0])
if !bytes.Equal(expectSum, actualSum) {
return n, awserr.New("InvalidChecksum",
fmt.Sprintf("expected MD5 checksum %s, got %s",
hex.EncodeToString(expectSum),
hex.EncodeToString(actualSum),
),
nil)
}
}
return n, err
}
func (v *md5ValidationReader) Close() error {
return v.rawReader.Close()
}

View File

@ -0,0 +1,107 @@
package s3
import (
"io/ioutil"
"regexp"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
)
var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
// NormalizeBucketLocation is a utility function which will update the
// passed in value to always be a region ID. Generally this would be used
// with GetBucketLocation API operation.
//
// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
//
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
// for more information on the values that can be returned.
func NormalizeBucketLocation(loc string) string {
switch loc {
case "":
loc = "us-east-1"
case "EU":
loc = "eu-west-1"
}
return loc
}
// NormalizeBucketLocationHandler is a request handler which will update the
// GetBucketLocation's result LocationConstraint value to always be a region ID.
//
// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
//
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
// for more information on the values that can be returned.
//
// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
// Bucket: aws.String(bucket),
// })
// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
// err := req.Send()
var NormalizeBucketLocationHandler = request.NamedHandler{
Name: "awssdk.s3.NormalizeBucketLocation",
Fn: func(req *request.Request) {
if req.Error != nil {
return
}
out := req.Data.(*GetBucketLocationOutput)
loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint))
out.LocationConstraint = aws.String(loc)
},
}
// WithNormalizeBucketLocation is a request option which will update the
// GetBucketLocation's result LocationConstraint value to always be a region ID.
//
// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
//
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
// for more information on the values that can be returned.
//
// result, err := svc.GetBucketLocationWithContext(ctx,
// &s3.GetBucketLocationInput{
// Bucket: aws.String(bucket),
// },
// s3.WithNormalizeBucketLocation,
// )
func WithNormalizeBucketLocation(r *request.Request) {
r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
}
func buildGetBucketLocation(r *request.Request) {
if r.DataFilled() {
out := r.Data.(*GetBucketLocationOutput)
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New(request.ErrCodeSerialization,
"failed reading response body", err)
return
}
match := reBucketLocation.FindSubmatch(b)
if len(match) > 1 {
loc := string(match[1])
out.LocationConstraint = aws.String(loc)
}
}
}
func populateLocationConstraint(r *request.Request) {
if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" {
in := r.Params.(*CreateBucketInput)
if in.CreateBucketConfiguration == nil {
r.Params = awsutil.CopyOf(r.Params)
in = r.Params.(*CreateBucketInput)
in.CreateBucketConfiguration = &CreateBucketConfiguration{
LocationConstraint: r.Config.Region,
}
}
}
}

View File

@ -0,0 +1,75 @@
package s3
import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/s3err"
)
func init() {
initClient = defaultInitClientFn
initRequest = defaultInitRequestFn
}
func defaultInitClientFn(c *client.Client) {
// Support building custom endpoints based on config
c.Handlers.Build.PushFront(updateEndpointForS3Config)
// Require SSL when using SSE keys
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
c.Handlers.Build.PushBack(computeSSEKeyMD5)
c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5)
// S3 uses custom error unmarshaling logic
c.Handlers.UnmarshalError.Clear()
c.Handlers.UnmarshalError.PushBack(unmarshalError)
c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler())
}
func defaultInitRequestFn(r *request.Request) {
// Add reuest handlers for specific platforms.
// e.g. 100-continue support for PUT requests using Go 1.6
platformRequestHandlers(r)
switch r.Operation.Name {
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration,
opPutBucketReplication:
// These S3 operations require Content-MD5 to be set
r.Handlers.Build.PushBack(contentMD5)
case opGetBucketLocation:
// GetBucketLocation has custom parsing logic
r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
case opCreateBucket:
// Auto-populate LocationConstraint with current region
r.Handlers.Validate.PushFront(populateLocationConstraint)
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler())
case opPutObject, opUploadPart:
r.Handlers.Build.PushBack(computeBodyHashes)
// Disabled until #1837 root issue is resolved.
// case opGetObject:
// r.Handlers.Build.PushBack(askForTxEncodingAppendMD5)
// r.Handlers.Unmarshal.PushBack(useMD5ValidationReader)
}
}
// bucketGetter is an accessor interface to grab the "Bucket" field from
// an S3 type.
type bucketGetter interface {
getBucket() string
}
// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey"
// field from an S3 type.
type sseCustomerKeyGetter interface {
getSSECustomerKey() string
}
// copySourceSSECustomerKeyGetter is an accessor interface to grab the
// "CopySourceSSECustomerKey" field from an S3 type.
type copySourceSSECustomerKeyGetter interface {
getCopySourceSSECustomerKey() string
}

26
vendor/github.com/aws/aws-sdk-go/service/s3/doc.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package s3 provides the client and types for making API
// requests to Amazon Simple Storage Service.
//
// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service.
//
// See s3 package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
//
// Using the Client
//
// To contact Amazon Simple Storage Service with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
//
// See the SDK's documentation for more information on how to use the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the Amazon Simple Storage Service client S3 for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
package s3

View File

@ -0,0 +1,123 @@
// Upload Managers
//
// The s3manager package's Uploader provides concurrent upload of content to S3
// by taking advantage of S3's Multipart APIs. The Uploader also supports both
// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker
// for optimizations if the Body satisfies that type. Once the Uploader instance
// is created you can call Upload concurrently from multiple goroutines safely.
//
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
//
// // Create an uploader with the session and default options
// uploader := s3manager.NewUploader(sess)
//
// f, err := os.Open(filename)
// if err != nil {
// return fmt.Errorf("failed to open file %q, %v", filename, err)
// }
//
// // Upload the file to S3.
// result, err := uploader.Upload(&s3manager.UploadInput{
// Bucket: aws.String(myBucket),
// Key: aws.String(myString),
// Body: f,
// })
// if err != nil {
// return fmt.Errorf("failed to upload file, %v", err)
// }
// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
//
// See the s3manager package's Uploader type documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader
//
// Download Manager
//
// The s3manager package's Downloader provides concurrently downloading of Objects
// from S3. The Downloader will write S3 Object content with an io.WriterAt.
// Once the Downloader instance is created you can call Download concurrently from
// multiple goroutines safely.
//
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
//
// // Create a downloader with the session and default options
// downloader := s3manager.NewDownloader(sess)
//
// // Create a file to write the S3 Object contents to.
// f, err := os.Create(filename)
// if err != nil {
// return fmt.Errorf("failed to create file %q, %v", filename, err)
// }
//
// // Write the contents of S3 Object to the file
// n, err := downloader.Download(f, &s3.GetObjectInput{
// Bucket: aws.String(myBucket),
// Key: aws.String(myString),
// })
// if err != nil {
// return fmt.Errorf("failed to download file, %v", err)
// }
// fmt.Printf("file downloaded, %d bytes\n", n)
//
// See the s3manager package's Downloader type documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
//
// Automatic URI cleaning
//
// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname)
// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct
// used by the service client.
//
// svc := s3.New(sess, &aws.Config{
// DisableRestProtocolURICleaning: aws.Bool(true),
// })
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("//foo//bar//moo"),
// })
//
// Get Bucket Region
//
// GetBucketRegion will attempt to get the region for a bucket using a region
// hint to determine which AWS partition to perform the query on. Use this utility
// to determine the region a bucket is in.
//
// sess := session.Must(session.NewSession())
//
// bucket := "my-bucket"
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
// if err != nil {
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
// }
// return err
// }
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
//
// See the s3manager package's GetBucketRegion function documentation for more information
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion
//
// S3 Crypto Client
//
// The s3crypto package provides the tools to upload and download encrypted
// content from S3. The Encryption and Decryption clients can be used concurrently
// once the client is created.
//
// sess := session.Must(session.NewSession())
//
// // Create the decryption client.
// svc := s3crypto.NewDecryptionClient(sess)
//
// // The object will be downloaded from S3 and decrypted locally. By metadata
// // about the object's encryption will instruct the decryption client how
// // decrypt the content of the object. By default KMS is used for keys.
// result, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String(myBucket),
// Key: aws.String(myKey),
// })
//
// See the s3crypto package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
//
package s3

48
vendor/github.com/aws/aws-sdk-go/service/s3/errors.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package s3
const (
// ErrCodeBucketAlreadyExists for service response error code
// "BucketAlreadyExists".
//
// The requested bucket name is not available. The bucket namespace is shared
// by all users of the system. Please select a different name and try again.
ErrCodeBucketAlreadyExists = "BucketAlreadyExists"
// ErrCodeBucketAlreadyOwnedByYou for service response error code
// "BucketAlreadyOwnedByYou".
ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
// ErrCodeNoSuchBucket for service response error code
// "NoSuchBucket".
//
// The specified bucket does not exist.
ErrCodeNoSuchBucket = "NoSuchBucket"
// ErrCodeNoSuchKey for service response error code
// "NoSuchKey".
//
// The specified key does not exist.
ErrCodeNoSuchKey = "NoSuchKey"
// ErrCodeNoSuchUpload for service response error code
// "NoSuchUpload".
//
// The specified multipart upload does not exist.
ErrCodeNoSuchUpload = "NoSuchUpload"
// ErrCodeObjectAlreadyInActiveTierError for service response error code
// "ObjectAlreadyInActiveTierError".
//
// This operation is not allowed against this storage tier
ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError"
// ErrCodeObjectNotInActiveTierError for service response error code
// "ObjectNotInActiveTierError".
//
// The source object of the COPY operation is not in the active tier and is
// only stored in Amazon Glacier.
ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError"
)

View File

@ -0,0 +1,155 @@
package s3
import (
"fmt"
"net/url"
"regexp"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// an operationBlacklist is a list of operation names that should a
// request handler should not be executed with.
type operationBlacklist []string
// Continue will return true of the Request's operation name is not
// in the blacklist. False otherwise.
func (b operationBlacklist) Continue(r *request.Request) bool {
for i := 0; i < len(b); i++ {
if b[i] == r.Operation.Name {
return false
}
}
return true
}
var accelerateOpBlacklist = operationBlacklist{
opListBuckets, opCreateBucket, opDeleteBucket,
}
// Request handler to automatically add the bucket name to the endpoint domain
// if possible. This style of bucket is valid for all bucket names which are
// DNS compatible and do not contain "."
func updateEndpointForS3Config(r *request.Request) {
forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
if accelerate && accelerateOpBlacklist.Continue(r) {
if forceHostStyle {
if r.Config.Logger != nil {
r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
}
}
updateEndpointForAccelerate(r)
} else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
updateEndpointForHostStyle(r)
}
}
func updateEndpointForHostStyle(r *request.Request) {
bucket, ok := bucketNameFromReqParams(r.Params)
if !ok {
// Ignore operation requests if the bucketname was not provided
// if this is an input validation error the validation handler
// will report it.
return
}
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
// bucket name must be valid to put into the host
return
}
moveBucketToHost(r.HTTPRequest.URL, bucket)
}
var (
accelElem = []byte("s3-accelerate.dualstack.")
)
func updateEndpointForAccelerate(r *request.Request) {
bucket, ok := bucketNameFromReqParams(r.Params)
if !ok {
// Ignore operation requests if the bucketname was not provided
// if this is an input validation error the validation handler
// will report it.
return
}
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
r.Error = awserr.New("InvalidParameterException",
fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket),
nil)
return
}
parts := strings.Split(r.HTTPRequest.URL.Host, ".")
if len(parts) < 3 {
r.Error = awserr.New("InvalidParameterExecption",
fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s",
r.HTTPRequest.URL.Host), nil)
return
}
if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") {
parts[0] = "s3-accelerate"
}
for i := 1; i+1 < len(parts); i++ {
if parts[i] == aws.StringValue(r.Config.Region) {
parts = append(parts[:i], parts[i+1:]...)
break
}
}
r.HTTPRequest.URL.Host = strings.Join(parts, ".")
moveBucketToHost(r.HTTPRequest.URL, bucket)
}
// Attempts to retrieve the bucket name from the request input parameters.
// If no bucket is found, or the field is empty "", false will be returned.
func bucketNameFromReqParams(params interface{}) (string, bool) {
if iface, ok := params.(bucketGetter); ok {
b := iface.getBucket()
return b, len(b) > 0
}
return "", false
}
// hostCompatibleBucketName returns true if the request should
// put the bucket in the host. This is false if S3ForcePathStyle is
// explicitly set or if the bucket is not DNS compatible.
func hostCompatibleBucketName(u *url.URL, bucket string) bool {
// Bucket might be DNS compatible but dots in the hostname will fail
// certificate validation, so do not use host-style.
if u.Scheme == "https" && strings.Contains(bucket, ".") {
return false
}
// if the bucket is DNS compatible
return dnsCompatibleBucketName(bucket)
}
var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
// Buckets created outside of the classic region MUST be DNS compatible.
func dnsCompatibleBucketName(bucket string) bool {
return reDomain.MatchString(bucket) &&
!reIPAddress.MatchString(bucket) &&
!strings.Contains(bucket, "..")
}
// moveBucketToHost moves the bucket name from the URI path to URL host.
func moveBucketToHost(u *url.URL, bucket string) {
u.Host = bucket + "." + u.Host
u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
if u.Path == "" {
u.Path = "/"
}
}

View File

@ -0,0 +1,8 @@
// +build !go1.6
package s3
import "github.com/aws/aws-sdk-go/aws/request"
func platformRequestHandlers(r *request.Request) {
}

View File

@ -0,0 +1,28 @@
// +build go1.6
package s3
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
func platformRequestHandlers(r *request.Request) {
if r.Operation.HTTPMethod == "PUT" {
// 100-Continue should only be used on put requests.
r.Handlers.Sign.PushBack(add100Continue)
}
}
func add100Continue(r *request.Request) {
if aws.BoolValue(r.Config.S3Disable100Continue) {
return
}
if r.HTTPRequest.ContentLength < 1024*1024*2 {
// Ignore requests smaller than 2MB. This helps prevent delaying
// requests unnecessarily.
return
}
r.HTTPRequest.Header.Set("Expect", "100-Continue")
}

99
vendor/github.com/aws/aws-sdk-go/service/s3/service.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package s3
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/restxml"
)
// S3 provides the API operation methods for making requests to
// Amazon Simple Storage Service. See this package's package overview docs
// for details on the service.
//
// S3 methods are safe to use concurrently. It is not safe to
// modify mutate any of the struct's properties though.
type S3 struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "s3" // Name of service.
EndpointsID = ServiceName // ID to lookup a service endpoint with.
ServiceID = "S3" // ServiceID is a unique identifer of a specific service.
)
// New creates a new instance of the S3 client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a S3 client from just a session.
// svc := s3.New(mySession)
//
// // Create a S3 client with additional configuration
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
svc := &S3{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2006-03-01",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) {
s.DisableURIPathEscaping = true
}))
svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a S3 operation and runs any
// custom request initialization.
func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

84
vendor/github.com/aws/aws-sdk-go/service/s3/sse.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
package s3
import (
"crypto/md5"
"encoding/base64"
"net/http"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
func validateSSERequiresSSL(r *request.Request) {
if r.HTTPRequest.URL.Scheme == "https" {
return
}
if iface, ok := r.Params.(sseCustomerKeyGetter); ok {
if len(iface.getSSECustomerKey()) > 0 {
r.Error = errSSERequiresSSL
return
}
}
if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
if len(iface.getCopySourceSSECustomerKey()) > 0 {
r.Error = errSSERequiresSSL
return
}
}
}
const (
sseKeyHeader = "x-amz-server-side-encryption-customer-key"
sseKeyMD5Header = sseKeyHeader + "-md5"
)
func computeSSEKeyMD5(r *request.Request) {
var key string
if g, ok := r.Params.(sseCustomerKeyGetter); ok {
key = g.getSSECustomerKey()
}
computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest)
}
const (
copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key"
copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5"
)
func computeCopySourceSSEKeyMD5(r *request.Request) {
var key string
if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
key = g.getCopySourceSSECustomerKey()
}
computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest)
}
func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) {
if len(key) == 0 {
// Backwards compatiablity where user just set the header value instead
// of using the API parameter, or setting the header value for an
// operation without the parameters modeled.
key = r.Header.Get(keyHeader)
if len(key) == 0 {
return
}
// In backwards compatiable, the header's value is not base64 encoded,
// and needs to be encoded and updated by the SDK's customizations.
b64Key := base64.StdEncoding.EncodeToString([]byte(key))
r.Header.Set(keyHeader, b64Key)
}
// Only update Key's MD5 if not already set.
if len(r.Header.Get(keyMD5Header)) == 0 {
sum := md5.Sum([]byte(key))
keyMD5 := base64.StdEncoding.EncodeToString(sum[:])
r.Header.Set(keyMD5Header, keyMD5)
}
}

View File

@ -0,0 +1,40 @@
package s3
import (
"bytes"
"io/ioutil"
"net/http"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkio"
)
func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.NewRequestFailure(
awserr.New(request.ErrCodeSerialization, "unable to read response body", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
body := bytes.NewReader(b)
r.HTTPResponse.Body = ioutil.NopCloser(body)
defer body.Seek(0, sdkio.SeekStart)
if body.Len() == 0 {
// If there is no body don't attempt to parse the body.
return
}
unmarshalError(r)
if err, ok := r.Error.(awserr.Error); ok && err != nil {
if err.Code() == request.ErrCodeSerialization {
r.Error = nil
return
}
r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
}
}

View File

@ -0,0 +1,88 @@
package s3
import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
)
type xmlErrorResponse struct {
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code"`
Message string `xml:"Message"`
}
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
// Bucket exists in a different region, and request needs
// to be made to the correct region.
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
msg := fmt.Sprintf(
"incorrect region, the bucket is not in '%s' region at endpoint '%s'",
aws.StringValue(r.Config.Region),
aws.StringValue(r.Config.Endpoint),
)
if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 {
msg += fmt.Sprintf(", bucket is in '%s' region", v)
}
r.Error = awserr.NewRequestFailure(
awserr.New("BucketRegionError", msg, nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
// Attempt to parse error from body if it is known
var errResp xmlErrorResponse
err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body)
if err == io.EOF {
// Only capture the error if an unmarshal error occurs that is not EOF,
// because S3 might send an error without a error message which causes
// the XML unmarshal to fail with EOF.
err = nil
}
if err != nil {
r.Error = awserr.NewRequestFailure(
awserr.New(request.ErrCodeSerialization,
"failed to unmarshal error message", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
// Fallback to status code converted to message if still no error code
if len(errResp.Code) == 0 {
statusText := http.StatusText(r.HTTPResponse.StatusCode)
errResp.Code = strings.Replace(statusText, " ", "", -1)
errResp.Message = statusText
}
r.Error = awserr.NewRequestFailure(
awserr.New(errResp.Code, errResp.Message, err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
}
// A RequestFailure provides access to the S3 Request ID and Host ID values
// returned from API operation errors. Getting the error as a string will
// return the formated error with the same information as awserr.RequestFailure,
// while also adding the HostID value from the response.
type RequestFailure interface {
awserr.RequestFailure
// Host ID is the S3 Host ID needed for debug, and contacting support
HostID() string
}

214
vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go generated vendored Normal file
View File

@ -0,0 +1,214 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package s3
import (
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
// WaitUntilBucketExists uses the Amazon S3 API operation
// HeadBucket to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
}
// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilBucketExists",
MaxAttempts: 20,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 200,
},
{
State: request.SuccessWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 301,
},
{
State: request.SuccessWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 403,
},
{
State: request.RetryWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 404,
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *HeadBucketInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.HeadBucketRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilBucketNotExists uses the Amazon S3 API operation
// HeadBucket to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
}
// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilBucketNotExists",
MaxAttempts: 20,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 404,
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *HeadBucketInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.HeadBucketRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilObjectExists uses the Amazon S3 API operation
// HeadObject to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
}
// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilObjectExists",
MaxAttempts: 20,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 200,
},
{
State: request.RetryWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 404,
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *HeadObjectInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.HeadObjectRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilObjectNotExists uses the Amazon S3 API operation
// HeadObject to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)
}
// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilObjectNotExists",
MaxAttempts: 20,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.StatusWaiterMatch,
Expected: 404,
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *HeadObjectInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.HeadObjectRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}