扫描AWS S3时Python线程池比Go例程更快?

2024-09-21 03:17:48 发布

您现在位置:Python中文网/ 问答频道 /正文

我最近一直在深入了解Golang并发,特别是通道和工作池的使用。我想比较Go和Python之间的性能(正如许多人所做的那样),因为我主要读到Go在并发性方面优于Python。因此,我编写了两个程序来扫描AWS帐户的S3存储桶,并报告总大小。我在一个帐户上执行此操作,该帐户有75个以上的存储桶,总计超过几TB的数据

我惊讶地发现我的Python实现比我的Go实现快了近2倍。基于我读过的所有基准和文献,这让我感到困惑。这让我相信我没有正确地实现我的Go代码。在观看这两个程序运行时,我注意到Go实现只使用了我CPU的15%,而Python使用了>;85%. 我是否错过了Go的一个重要步骤,或者在实施过程中遗漏了什么?提前谢谢

Python代码:

'''
Get the size of all objects in all buckets in S3
'''
import os
import sys
import boto3
import concurrent.futures

def get_s3_bucket_sizes(aws_access_key_id, aws_secret_access_key, aws_session_token=None):

    s3client = boto3.client('s3')

    # Create the dictionary which will be indexed by the bucket's
    # name and has an S3Bucket object as its contents
    buckets = {}

    total_size = 0.0

    #
    # Start gathering data...
    #

    # Get all of the buckets in the account
    _buckets = s3client.list_buckets()

    cnt = 1
    with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
        future_bucket_to_scan = {executor.submit(get_bucket_objects, s3client, bucket): bucket for bucket in _buckets["Buckets"]}

        for future in concurrent.futures.as_completed(future_bucket_to_scan):
            bucket_object = future_bucket_to_scan[future]

            try:
                ret = future.result()
            except Exception as exc:
                print('ERROR: %s' % (str(exc)))
            else:
                total_size += ret

    print(total_size)

def get_bucket_objects(s3client, bucket):

    name = bucket["Name"]

    # Get all of the objects in the bucket
    lsbuckets = s3client.list_objects(Bucket=name)

    size = 0
    while True:
        if "Contents" not in lsbuckets.keys():
            break

        for content in lsbuckets["Contents"]:            
            size += content["Size"]

        break

    return size

#
# Main
#
if __name__=='__main__':
    get_s3_bucket_sizes(os.environ.get("AWS_ACCESS_KEY_ID"), os.environ.get("AWS_SECRET_ACCESS_KEY"))

Go代码:

package main

import (
    "fmt"
    "sync"

    "github.com/aws/aws-sdk-go/aws"
    "github.com/aws/aws-sdk-go/aws/awserr"
    "github.com/aws/aws-sdk-go/aws/session"
    "github.com/aws/aws-sdk-go/service/s3"
)

type S3_Bucket_Response struct {
    bucket string
    count  int64
    size   int64
    err    error
}

type S3_Bucket_Request struct {
    bucket string
    region string
}

func get_bucket_objects_async(wg *sync.WaitGroup, requests chan S3_Bucket_Request, responses chan S3_Bucket_Response) {

    var size  int64
    var count int64

    for request := range requests {
        bucket := request.bucket
        region := request.region

        // Create a new response
        response := new(S3_Bucket_Response)
        response.bucket = bucket

        sess, err := session.NewSession(&aws.Config{
            Region: aws.String(region), 
        })

        s3conn := s3.New(sess)

        resp, err := s3conn.ListObjectsV2(&s3.ListObjectsV2Input{
            Bucket: aws.String(bucket),
        })

        if err != nil {
            if awsErr, ok := err.(awserr.Error); ok {

                switch awsErr.Code() {
                case "NoSuchBucket":
                    response.err = fmt.Errorf("Bucket: (%s) is NoSuchBucket.  Must be in process of deleting.", bucket)
                case "AccessDenied":
                    response.err = fmt.Errorf("Bucket: (%s) is AccessDenied.  You should really be running this with full Admin Privaleges", bucket)
                }
            } else {
                response.err = fmt.Errorf("Listing Objects Unhandled Error: %s ", err)
            }

            responses <- *response
            continue
        } 

        contents := resp.Contents
        size      = 0
        count     = 0

        for i:=0; i<len(contents); i++ {
            size  += *contents[i].Size
            count += 1
        }

        response.size  = size
        response.count = count

        responses <- *response
    }

    wg.Done()
}

func main() {

    var err  error
    var size int64
    var resp *s3.ListBucketsOutput
    var wg sync.WaitGroup

    sess, _ := session.NewSession()
    s3conn  := s3.New(sess)

    // Get account bucket listing
    if resp, err = s3conn.ListBuckets(&s3.ListBucketsInput{});err != nil {
        fmt.Println("Error listing buckets: %s", err)
        return 
    }

    buckets := resp.Buckets
    size = 0

    // Create the buffered channels
    requests  := make(chan S3_Bucket_Request , len(buckets))
    responses := make(chan S3_Bucket_Response, len(buckets))

    for i := range buckets {

        bucket := *buckets[i].Name

        resp2, err := s3conn.GetBucketLocation(&s3.GetBucketLocationInput{                                                           
            Bucket: aws.String(bucket),                                                                                                       
        })         

        if err != nil {
            fmt.Printf("Could not get bucket location for bucket (%s): %s", bucket, err)
            continue
        }

        wg.Add(1)
        go get_bucket_objects_async(&wg, requests, responses)

        region := "us-east-1"
        if resp2.LocationConstraint != nil {
            region = *resp2.LocationConstraint
        }

        request := new(S3_Bucket_Request)
        request.bucket = bucket
        request.region = region

        requests <- *request        
    }

    // Close requests channel and wait for responses
    close(requests)
    wg.Wait()
    close(responses)

    cnt := 1
    // Process the results as they come in
    for response := range responses {

        fmt.Printf("Bucket: (%s) complete!  Buckets remaining: %d\n", response.bucket, len(buckets)-cnt)

        // Did the bucket request have errors?
        if response.err != nil {
            fmt.Println(response.err)
            continue
        }

        cnt  += 1
        size += response.size
    }

    fmt.Println(size)
    return 
}



Tags: theinawsforsizegetifs3
1条回答
网友
1楼 · 发布于 2024-09-21 03:17:48

很抱歉,我没有机会全面回顾这一点,但我的答案是:这些解决方案在并发性方面似乎并不等同。三件事突然出现:

  • boto s3客户端的线程安全。这个线安全吗?你能确认一下吗This reddit article表明它是而不是线程安全的
  • Python使用的工作池大小为50,但go是无限的。(a semphore could be used向当前代码添加50的上限)
  • 我对boto不太熟悉,但与python相比,go似乎在每个bucket(GetBucketLocation)的主线程上执行额外的IO调用

我的下一个问题是:

  • 每个解决方案都是正确的吗?您能证明它吗?(两个解决方案的字节总数是否相同,并且它们是否与s3控制台匹配?)
  • 您确定并发结构是相同的吗, i、 e.在主线程上求和,相同的池大小,相同的IO工作量 每个工人
  • 客户端的默认值是否相等?例如,python是否有默认的连接池大小?Go doesn't so it will create connections per request(我上周刚刚遇到这个问题)

相关问题 更多 >

    热门问题