key, new SafeSliceInputStream(new BasicSliceInput(buffer.slice())), objectMetadata); putObjectRequest.getRequestClientOptions().setReadLimit(bulkSize);
PutObjectRequest request = new PutObjectRequest(bucketName, keyName, fsinput, omd); if (outputMetaData.getSize() < Integer.MAX_VALUE) { request.getRequestClientOptions().setReadLimit((int)outputMetaData.getSize()); } else { throw new RuntimeException("PutRequestSize greater than Integer.MAX_VALUE");
private Transfer startTransfer(Mapper.Context context, S3UploadDescriptor uploadDescriptor) throws IOException { InputStream input = getInputStream(uploadDescriptor.getSource(), context.getConfiguration()); int bufferSize = context.getConfiguration().getInt(ConfigurationVariable.UPLOAD_BUFFER_SIZE.getName(), -1); if (bufferSize <= 0) { // The default value is the same value used by FileSystem to configure the InputStream. // See https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml bufferSize = context.getConfiguration().getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT); } LOG.info("Buffer of the input stream is {} for file {}", bufferSize, uploadDescriptor.getSource()); // input stream should not be closed; transfer manager will do it input = new BufferedInputStream(input, bufferSize); try { PutObjectRequest request = new PutObjectRequest(uploadDescriptor.getBucketName(), uploadDescriptor.getKey(), input, uploadDescriptor.getMetadata()); String cannedAcl = context.getConfiguration().get(ConfigurationVariable.CANNED_ACL.getName()); if (cannedAcl != null) { CannedAccessControlList acl = CannedAclUtils.toCannedAccessControlList(cannedAcl); LOG.debug("Using CannedACL {}", acl.name()); request.withCannedAcl(acl); } // We add 1 to the buffer size as per the com.amazonaws.RequestClientOptions doc request.getRequestClientOptions().setReadLimit(bufferSize + 1); return transferManager.upload(request); } catch (AmazonClientException e) { throw new CopyReadException(e); } }
private Transfer startTransfer(Mapper.Context context, S3UploadDescriptor uploadDescriptor) throws IOException { InputStream input = getInputStream(uploadDescriptor.getSource(), context.getConfiguration()); int bufferSize = context.getConfiguration().getInt(ConfigurationVariable.UPLOAD_BUFFER_SIZE.getName(), -1); if (bufferSize <= 0) { // The default value is the same value used by FileSystem to configure the InputStream. // See https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml bufferSize = context.getConfiguration().getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT); } LOG.info("Buffer of the input stream is {} for file {}", bufferSize, uploadDescriptor.getSource()); // input stream should not be closed; transfer manager will do it input = new BufferedInputStream(input, bufferSize); try { PutObjectRequest request = new PutObjectRequest(uploadDescriptor.getBucketName(), uploadDescriptor.getKey(), input, uploadDescriptor.getMetadata()); String cannedAcl = context.getConfiguration().get(ConfigurationVariable.CANNED_ACL.getName()); if (cannedAcl != null) { CannedAccessControlList acl = CannedAclUtils.toCannedAccessControlList(cannedAcl); LOG.debug("Using CannedACL {}", acl.name()); request.withCannedAcl(acl); } // We add 1 to the buffer size as per the com.amazonaws.RequestClientOptions doc request.getRequestClientOptions().setReadLimit(bufferSize + 1); return transferManager.upload(request); } catch (AmazonClientException e) { throw new CopyReadException(e); } }