Concurrent.futures 在 AWS lambda 函数实现中
Concurrent.futures in AWS lambda function implementation
我正在使用 AWS Lambda python 函数将 EBS/RDS 快照复制到另一个区域以进行灾难恢复。
我遇到的问题是当时复制限制为 5 个快照。
如果我当时尝试复制超过 5 个,我将收到错误消息:
botocore.exceptions.ClientError: An error occurred (ResourceLimitExceeded) when calling the CopySnapshot operation: Too many snapshot copies in progress. The limit is 5 for this destination region.
为了避免这种情况,我添加了一个服务员函数,它正在检查目标区域中快照的状态,并且在快照完成状态后继续循环。
它运行良好,但在这种情况下,它一次只能处理一个快照。
问题是,如何实现concurrent.futures 并行任务模块,一次复制5个快照?
waiter = client_ec2_dst.get_waiter('snapshot_completed')
message = ""
for i in ec2_snapshots_src:
# snapshot_tags_filtered = ([item for item in i["Tags"] if item['Key'] != 'aws:backup:source-resource']
# snapshot_tags_filtered.append({'Key': 'delete_On', 'Value': delete_on})
# snapshot_tags_filtered.append({'Key': 'src_Id', 'Value': i["SnapshotId"]})
try:
response = client_ec2_dst.copy_snapshot(
Description='[Disaster Recovery] copied from us-east-1',
SourceRegion=region_src,
SourceSnapshotId=i["SnapshotId"],
DryRun=False,
# Encrypted=True,
# KmsKeyId='1e287363-89f6-4837-a619-b550ff28c211',
)
new_snapshot_id = response["SnapshotId"]
waiter.wait(
SnapshotIds=[new_snapshot_id],
WaiterConfig={'Delay': 5, 'MaxAttempts': 120}
)
snapshot_src_name = ([dic['Value'] for dic in snapshot_tags_filtered if dic['Key'] == 'Name'])
message += ("Started copying latest EBS snapshot: " + i["SnapshotId"] + " for EC2 instance: " + str(snapshot_src_name) + " from: " + region_src + " to: " + region_dst + " with new id: " + new_snapshot_id + ".\n")
# Adding tags to snapshots in destination region
tag_src = [new_snapshot_id]
tag = client_ec2_dst.create_tags(
DryRun=False,
Resources=tag_src,
Tags=snapshot_tags_filtered
)
except Exception as e:
raise e
您可以使用并发执行器和 max_workers
参数来限制同时 运行 作业的数量。像这样:
import concurrent.futures
def copy_snapshot(snapshot_id):
waiter = client_ec2_dst.get_waiter('snapshot_completed')
response = client_ec2_dst.copy_snapshot(
Description='[Disaster Recovery] copied from us-east-1',
SourceRegion=region_src,
SourceSnapshotId=snapshot_id,
DryRun=False
)
new_snapshot_id = response["SnapshotId"]
waiter.wait(
SnapshotIds=[new_snapshot_id],
WaiterConfig={'Delay': 5, 'MaxAttempts': 120}
)
# Copy snapshots in parallel, but no more than 5 at a time:
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(copy_snapshot, s['SnapshotId'])
for s in ec2_snapshots_src]
for future in futures:
future.result()
我正在使用 AWS Lambda python 函数将 EBS/RDS 快照复制到另一个区域以进行灾难恢复。 我遇到的问题是当时复制限制为 5 个快照。 如果我当时尝试复制超过 5 个,我将收到错误消息:
botocore.exceptions.ClientError: An error occurred (ResourceLimitExceeded) when calling the CopySnapshot operation: Too many snapshot copies in progress. The limit is 5 for this destination region.
为了避免这种情况,我添加了一个服务员函数,它正在检查目标区域中快照的状态,并且在快照完成状态后继续循环。 它运行良好,但在这种情况下,它一次只能处理一个快照。 问题是,如何实现concurrent.futures 并行任务模块,一次复制5个快照?
waiter = client_ec2_dst.get_waiter('snapshot_completed')
message = ""
for i in ec2_snapshots_src:
# snapshot_tags_filtered = ([item for item in i["Tags"] if item['Key'] != 'aws:backup:source-resource']
# snapshot_tags_filtered.append({'Key': 'delete_On', 'Value': delete_on})
# snapshot_tags_filtered.append({'Key': 'src_Id', 'Value': i["SnapshotId"]})
try:
response = client_ec2_dst.copy_snapshot(
Description='[Disaster Recovery] copied from us-east-1',
SourceRegion=region_src,
SourceSnapshotId=i["SnapshotId"],
DryRun=False,
# Encrypted=True,
# KmsKeyId='1e287363-89f6-4837-a619-b550ff28c211',
)
new_snapshot_id = response["SnapshotId"]
waiter.wait(
SnapshotIds=[new_snapshot_id],
WaiterConfig={'Delay': 5, 'MaxAttempts': 120}
)
snapshot_src_name = ([dic['Value'] for dic in snapshot_tags_filtered if dic['Key'] == 'Name'])
message += ("Started copying latest EBS snapshot: " + i["SnapshotId"] + " for EC2 instance: " + str(snapshot_src_name) + " from: " + region_src + " to: " + region_dst + " with new id: " + new_snapshot_id + ".\n")
# Adding tags to snapshots in destination region
tag_src = [new_snapshot_id]
tag = client_ec2_dst.create_tags(
DryRun=False,
Resources=tag_src,
Tags=snapshot_tags_filtered
)
except Exception as e:
raise e
您可以使用并发执行器和 max_workers
参数来限制同时 运行 作业的数量。像这样:
import concurrent.futures
def copy_snapshot(snapshot_id):
waiter = client_ec2_dst.get_waiter('snapshot_completed')
response = client_ec2_dst.copy_snapshot(
Description='[Disaster Recovery] copied from us-east-1',
SourceRegion=region_src,
SourceSnapshotId=snapshot_id,
DryRun=False
)
new_snapshot_id = response["SnapshotId"]
waiter.wait(
SnapshotIds=[new_snapshot_id],
WaiterConfig={'Delay': 5, 'MaxAttempts': 120}
)
# Copy snapshots in parallel, but no more than 5 at a time:
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = [
executor.submit(copy_snapshot, s['SnapshotId'])
for s in ec2_snapshots_src]
for future in futures:
future.result()