Refine search
@Override protected boolean copyObject(String src, String dst) { LOG.debug("Copying {} to {}", src, dst); // Retry copy for a few times, in case some AWS internal errors happened during copy. int retries = 3; for (int i = 0; i < retries; i++) { try { CopyObjectRequest request = new CopyObjectRequest(mBucketName, src, mBucketName, dst); if (Boolean.parseBoolean( mConf.get(PropertyKey.UNDERFS_S3A_SERVER_SIDE_ENCRYPTION_ENABLED))) { ObjectMetadata meta = new ObjectMetadata(); meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setNewObjectMetadata(meta); } mManager.copy(request).waitForCopyResult(); return true; } catch (AmazonClientException | InterruptedException e) { LOG.error("Failed to copy file {} to {}", src, dst, e); if (i != retries - 1) { LOG.error("Retrying copying file {} to {}", src, dst); } } } LOG.error("Failed to copy file {} to {}, after {} retries", src, dst, retries); return false; }
/** * Sets the canned ACL to use for the newly copied object, and returns this * <code>CopyObjectRequest</code>, enabling additional method calls to be chained * together. * * @param cannedACL * The canned ACL to set for the newly copied object. * * @return This <code>CopyObjectRequest</code>, enabling additional method calls to be * chained together. */ public CopyObjectRequest withCannedAccessControlList(CannedAccessControlList cannedACL) { setCannedAccessControlList(cannedACL); return this; }
origReq.getDestinationBucketName(), origReq.getDestinationKey()).withCannedACL( origReq.getCannedAccessControlList()) .withRequesterPays(origReq.isRequesterPays()) .withAccessControlList(origReq.getAccessControlList()) .withStorageClass(origReq.getStorageClass()) .withSSECustomerKey(origReq.getDestinationSSECustomerKey()) .withSSEAwsKeyManagementParams(origReq.getSSEAwsKeyManagementParams()) .withGeneralProgressListener(origReq.getGeneralProgressListener()) .withRequestMetricCollector(origReq.getRequestMetricCollector()) ObjectMetadata newObjectMetadata = origReq.getNewObjectMetadata(); if (newObjectMetadata == null){ newObjectMetadata = new ObjectMetadata(); if (newObjectMetadata.getContentType() == null){ newObjectMetadata.setContentType(metadata.getContentType()); req.setTagging(origReq.getNewObjectTagging()); req.withObjectLockMode(origReq.getObjectLockMode()) .withObjectLockLegalHoldStatus(origReq.getObjectLockLegalHoldStatus()) .withObjectLockRetainUntilDate(origReq.getObjectLockRetainUntilDate()); String uploadId = s3.initiateMultipartUpload(req).getUploadId(); log.debug("Initiated new multipart upload: " + uploadId);
@Override public CopyObjectRequest decorate(CopyObjectRequest request) { final ObjectMetadata objectMetadata = request.getNewObjectMetadata() == null ? new ObjectMetadata() : request.getNewObjectMetadata().clone(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); return request.withNewObjectMetadata(objectMetadata); } }
@Override public void setObjectRedirectLocation(String bucketName, String key, String newRedirectLocation) throws SdkClientException, AmazonServiceException { rejectNull(bucketName, "The bucketName parameter must be specified when changing an object's storage class"); rejectNull(key, "The key parameter must be specified when changing an object's storage class"); rejectNull(newRedirectLocation, "The newStorageClass parameter must be specified when changing an object's storage class"); copyObject(new CopyObjectRequest(bucketName, key, bucketName, key) .withRedirectLocation(newRedirectLocation)); }
@Override public void changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass) throws SdkClientException, AmazonServiceException { rejectNull(bucketName, "The bucketName parameter must be specified when changing an object's storage class"); rejectNull(key, "The key parameter must be specified when changing an object's storage class"); rejectNull(newStorageClass, "The newStorageClass parameter must be specified when changing an object's storage class"); copyObject(new CopyObjectRequest(bucketName, key, bucketName, key) .withStorageClass(newStorageClass.toString())); }
copyObjectRequest.getDestinationBucketName(), copyObjectRequest.getDestinationKey()).withCannedACL( copyObjectRequest.getCannedAccessControlList()); if (copyObjectRequest.getAccessControlList() != null) { initiateMultipartUploadRequest .setAccessControlList(copyObjectRequest .getAccessControlList()); if (copyObjectRequest.getStorageClass() != null) { initiateMultipartUploadRequest.setStorageClass(StorageClass .fromValue(copyObjectRequest.getStorageClass())); if (copyObjectRequest.getDestinationSSECustomerKey() != null) { initiateMultipartUploadRequest.setSSECustomerKey( copyObjectRequest.getDestinationSSECustomerKey()); ObjectMetadata newObjectMetadata = copyObjectRequest.getNewObjectMetadata(); if (newObjectMetadata == null) { newObjectMetadata = new ObjectMetadata(); if (newObjectMetadata.getContentType() == null) { newObjectMetadata.setContentType(metadata.getContentType()); String uploadId = s3.initiateMultipartUpload( initiateMultipartUploadRequest).getUploadId(); log.debug("Initiated new multipart upload: " + uploadId);
@Test public void testMarkAsUnused() throws BinaryStoreException { ObjectMetadata objMeta = new ObjectMetadata(); Map<String, String> userMeta = new HashMap<>(); // Existing value of unused property set to false (so file is considered used) userMeta.put(s3BinaryStore.UNUSED_KEY, String.valueOf(false)); objMeta.setUserMetadata(userMeta); expect(s3Client.getObjectMetadata(eq(BUCKET), isA(String.class))) .andReturn(objMeta); Capture<CopyObjectRequest> copyRequestCapture = Capture.newInstance(); expect(s3Client.copyObject(capture(copyRequestCapture))).andReturn(null); replayAll(); s3BinaryStore.markAsUnused(Collections.singleton(new BinaryKey(TEST_KEY))); ObjectMetadata newObjMeta = copyRequestCapture.getValue().getNewObjectMetadata(); assertEquals(String.valueOf(true), newObjMeta.getUserMetadata().get(s3BinaryStore.UNUSED_KEY)); }
/** * Set encryption in {@link CopyObjectRequest} */ public CopyObjectRequest decorate(CopyObjectRequest request) { switch (getDataEncryption()) { case SSE_S3: ObjectMetadata metadata = request.getNewObjectMetadata() == null ? new ObjectMetadata() : request.getNewObjectMetadata(); metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setNewObjectMetadata(metadata); break; case NONE: break; } return request; }
AmazonS3 s3 = new AmazonS3Client(); String bucketName = "bucketName "; String key = "key.txt"; ObjectMetadata newObjectMetadata = new ObjectMetadata(); // ... whatever you desire, e.g.: newObjectMetadata.setHeader("Expires", "Thu, 21 Mar 2042 08:16:32 GMT"); CopyObjectRequest copyObjectRequest = new CopyObjectRequest() .WithSourceBucketName(bucketName) .WithSourceKey(key) .WithDestinationBucket(bucketName) .WithDestinationKey(key) .withNewObjectMetadata(newObjectMetadata); s3.copyObject(copyObjectRequest);
@Test public void testStoreMimeType() throws BinaryStoreException { expect(s3Client.getObjectMetadata(BUCKET, TEST_KEY)) .andReturn(new ObjectMetadata()); Capture<CopyObjectRequest> copyRequestCapture = Capture.newInstance(); expect(s3Client.copyObject(capture(copyRequestCapture))).andReturn(null); replayAll(); BinaryValue binaryValue = createBinaryValue(TEST_KEY, TEST_CONTENT); s3BinaryStore.storeMimeType(binaryValue, TEST_MIME); CopyObjectRequest copyRequest = copyRequestCapture.getValue(); assertEquals(BUCKET, copyRequest.getSourceBucketName()); assertEquals(BUCKET, copyRequest.getDestinationBucketName()); assertEquals(TEST_KEY, copyRequest.getSourceKey()); assertEquals(TEST_KEY, copyRequest.getDestinationKey()); assertEquals(TEST_MIME, copyRequest.getNewObjectMetadata().getContentType()); }
/** * Initiates a multipart upload and returns the upload id */ private String initiateMultipartUpload(CopyObjectRequest origReq) { EncryptedInitiateMultipartUploadRequest req = new EncryptedInitiateMultipartUploadRequest( origReq.getDestinationBucketName(), origReq.getDestinationKey()).withCannedACL( origReq.getCannedAccessControlList()) .withRequesterPays(origReq.isRequesterPays()) .withAccessControlList(origReq.getAccessControlList()) .withStorageClass(origReq.getStorageClass()) .withSSECustomerKey(origReq.getDestinationSSECustomerKey()) .withSSEAwsKeyManagementParams(origReq.getSSEAwsKeyManagementParams()) .withGeneralProgressListener(origReq.getGeneralProgressListener()) .withRequestMetricCollector(origReq.getRequestMetricCollector()) ; req.setCreateEncryptionMaterial(false); ObjectMetadata newObjectMetadata = origReq.getNewObjectMetadata(); if (newObjectMetadata == null){ newObjectMetadata = new ObjectMetadata(); } if (newObjectMetadata.getContentType() == null){ newObjectMetadata.setContentType(metadata.getContentType()); } req.setObjectMetadata(newObjectMetadata); populateMetadataWithEncryptionParams(metadata,newObjectMetadata); String uploadId = s3.initiateMultipartUpload(req).getUploadId(); log.debug("Initiated new multipart upload: " + uploadId); return uploadId; }
private void applyObjectMetadata(CopyObjectRequest copyObjectRequest) { if (s3s3CopierOptions.isS3ServerSideEncryption()) { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); copyObjectRequest.setNewObjectMetadata(objectMetadata); } }
@Override protected void storeMimeType(BinaryValue binaryValue, String mimeType) throws BinaryStoreException { try { String key = binaryValue.getKey().toString(); ObjectMetadata metadata = s3Client.getObjectMetadata(bucketName, key); metadata.setContentType(mimeType); // Update the object in place CopyObjectRequest copyRequest = new CopyObjectRequest(bucketName, key, bucketName, key); copyRequest.setNewObjectMetadata(metadata); s3Client.copyObject(copyRequest); } catch (AmazonClientException e) { throw new BinaryStoreException(e); } }
@Override public Transfer performTransfer(TransferManager transferManager) { // Create a copy request. CopyObjectRequest copyObjectRequest = new CopyObjectRequest(params.getSourceBucketName(), params.getSourceObjectKey(), params.getTargetBucketName(), params.getTargetObjectKey()); // If KMS Key ID is specified, set the AWS Key Management System parameters to be used to encrypt the object. if (StringUtils.isNotBlank(params.getKmsKeyId())) { copyObjectRequest.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(params.getKmsKeyId())); } // Otherwise, specify the server-side encryption algorithm for encrypting the object using AWS-managed keys. else { ObjectMetadata metadata = new ObjectMetadata(); metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); copyObjectRequest.setNewObjectMetadata(metadata); } return s3Operations.copyFile(copyObjectRequest, transferManager); } });
AmazonClientException { appendSingleObjectUserAgent(copyObjectRequest); assertParameterNotNull(copyObjectRequest.getSourceBucketName(), "The source bucket name must be specified when a copy request is initiated."); assertParameterNotNull(copyObjectRequest.getSourceKey(), "The source object key must be specified when a copy request is initiated."); assertParameterNotNull(copyObjectRequest.getDestinationBucketName(), "The destination bucket name must be specified when a copy request is initiated."); assertParameterNotNull(copyObjectRequest.getDestinationKey(), "The destination object key must be specified when a copy request is initiated."); assertParameterNotNull(srcS3, "The srcS3 parameter is mandatory"); "Copying object from " + copyObjectRequest.getSourceBucketName() + "/" + copyObjectRequest.getSourceKey() + " to " + copyObjectRequest.getDestinationBucketName() + "/" + copyObjectRequest.getDestinationKey(); copyObjectRequest.getSourceBucketName(), copyObjectRequest.getSourceKey()) .withSSECustomerKey(copyObjectRequest.getSourceSSECustomerKey()) .withRequesterPays(copyObjectRequest.isRequesterPays()) .withVersionId(copyObjectRequest.getSourceVersionId()); ObjectMetadata metadata = srcS3.getObjectMetadata(getObjectMetadataRequest); transferProgress.setTotalBytesToTransfer(metadata.getContentLength());
transferConfiguration.setMultipartCopyPartSize(partSize); TransferManager transfers = new TransferManager(s3); transfers.setConfiguration(transferConfiguration); ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey); final ObjectMetadata dstom = srcom.clone(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { dstom.setServerSideEncryption(serverSideEncryptionAlgorithm); CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey); copyObjectRequest.setCannedAccessControlList(cannedACL); copyObjectRequest.setNewObjectMetadata(dstom); Copy copy = transfers.copy(copyObjectRequest); copy.addProgressListener(progressListener); try {
"/" + SdkHttpUtils.urlEncode(copyObjectRequest.getSourceBucketName(), true) + "/" + SdkHttpUtils.urlEncode(copyObjectRequest.getSourceKey(), true); if (copyObjectRequest.getSourceVersionId() != null) { copySourceHeader += "?versionId=" + copyObjectRequest.getSourceVersionId(); addDateHeader(request, Headers.COPY_SOURCE_IF_MODIFIED_SINCE, copyObjectRequest.getModifiedSinceConstraint()); addDateHeader(request, Headers.COPY_SOURCE_IF_UNMODIFIED_SINCE, copyObjectRequest.getUnmodifiedSinceConstraint()); addStringListHeader(request, Headers.COPY_SOURCE_IF_MATCH, copyObjectRequest.getMatchingETagConstraints()); addStringListHeader(request, Headers.COPY_SOURCE_IF_NO_MATCH, copyObjectRequest.getNonmatchingETagConstraints()); if (copyObjectRequest.getAccessControlList() != null) { addAclHeaders(request, copyObjectRequest.getAccessControlList()); } else if (copyObjectRequest.getCannedAccessControlList() != null) { request.addHeader(Headers.S3_CANNED_ACL, copyObjectRequest.getCannedAccessControlList().toString()); if (copyObjectRequest.getStorageClass() != null) { request.addHeader(Headers.STORAGE_CLASS, copyObjectRequest.getStorageClass()); if (copyObjectRequest.getRedirectLocation() != null) { request.addHeader(Headers.REDIRECT_LOCATION, copyObjectRequest.getRedirectLocation()); populateRequesterPaysHeader(request, copyObjectRequest.isRequesterPays());
private void setS3ObjectUserProperty(BinaryKey binaryKey, String metadataKey, String metadataValue) throws BinaryStoreException { try { String key = binaryKey.toString(); ObjectMetadata metadata = s3Client.getObjectMetadata(bucketName, key); Map<String, String> userMetadata = metadata.getUserMetadata(); if(null != metadataValue && metadataValue.equals(userMetadata.get(metadataKey))) { return; // The key/value pair already exists in user metadata, skip update } userMetadata.put(metadataKey, metadataValue); metadata.setUserMetadata(userMetadata); // Update the object in place CopyObjectRequest copyRequest = new CopyObjectRequest(bucketName, key, bucketName, key); copyRequest.setNewObjectMetadata(metadata); s3Client.copyObject(copyRequest); } catch (AmazonClientException e) { throw new BinaryStoreException(e); } }
@Override public void rename(String pathFrom, String pathTo) throws IOException { String[] bucketKeyFrom = splitPathToBucketAndKey(pathFrom, true); String[] bucketKeyTo = splitPathToBucketAndKey(pathTo, true); CopyObjectRequest copyRequest = new CopyObjectRequest(bucketKeyFrom[0], bucketKeyFrom[1], bucketKeyTo[0], bucketKeyTo[1]); this.amazonS3.copyObject(copyRequest); //Delete the source this.amazonS3.deleteObject(bucketKeyFrom[0], bucketKeyFrom[1]); }