|
|
@@ -225,6 +225,7 @@ def upload(item, filename, metadata, *, iaConfigFile = None, partSize = 100*1024 |
|
|
|
headers = {'Authorization': f'LOW {access}:{secret}'} |
|
|
|
metadataHeaders = metadata_to_headers(metadata) |
|
|
|
initialHeaders = {**headers, 'x-amz-auto-make-bucket': '1', **metadataHeaders} |
|
|
|
extraHeaders = {'x-archive-queue-derive': '1' if queueDerive else '0', 'x-archive-keep-old-version': '1' if keepOldVersion else '0'} |
|
|
|
|
|
|
|
# Always read the first part |
|
|
|
data, size, contentMd5 = get_part(f, partSize, progress) |
|
|
@@ -232,7 +233,7 @@ def upload(item, filename, metadata, *, iaConfigFile = None, partSize = 100*1024 |
|
|
|
# If the file is only a single part anyway, use the normal PUT API instead of multipart because IA can process that *much* faster. |
|
|
|
if uploadId is None and parts is None and complete and size < partSize: |
|
|
|
logger.info(f'Uploading in one piece ({size} bytes)') |
|
|
|
partNumber, eTag = upload_one(url, None, 0, data, contentMd5, size, initialHeaders, progress, tries, partTimeout) |
|
|
|
partNumber, eTag = upload_one(url, None, 0, data, contentMd5, size, {**initialHeaders, **extraHeaders}, progress, tries, partTimeout) |
|
|
|
logger.info(f'Upload OK, ETag: {eTag}') |
|
|
|
logger.info('Done!') |
|
|
|
return |
|
|
@@ -297,7 +298,6 @@ def upload(item, filename, metadata, *, iaConfigFile = None, partSize = 100*1024 |
|
|
|
# FUCKING FIGHT ME! |
|
|
|
completeData = '<CompleteMultipartUpload>' + ''.join(f'<Part><PartNumber>{partNumber}</PartNumber><ETag>{etag}</ETag></Part>' for partNumber, etag in parts) + '</CompleteMultipartUpload>' |
|
|
|
completeData = completeData.encode('utf-8') |
|
|
|
extraHeaders = {'x-archive-queue-derive': '1' if queueDerive else '0', 'x-archive-keep-old-version': '1' if keepOldVersion else '0'} |
|
|
|
for attempt in range(1, tries + 1): |
|
|
|
if attempt > 1: |
|
|
|
logger.info('Retrying completion request') |
|
|
|