@Override public CopyResult call() throws Exception { try { CopyResult result = multipartCopyCallable.call(); if (result == null) { futures.addAll(multipartCopyCallable.getFutures()); futureReference.set(threadPool.submit(new CompleteMultipartCopy(multipartCopyCallable.getMultipartUploadId(), s3, origReq, futures, listener, this))); } else { copyComplete(); } return result; } catch (CancellationException e) { transfer.setState(TransferState.Canceled); publishProgress(listener, ProgressEventType.TRANSFER_CANCELED_EVENT); throw new SdkClientException("Upload canceled"); } catch (Exception e) { transfer.setState(TransferState.Failed); publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw e; } }
private static Future<?> publishByteCountEvent( final ProgressListener listener, final ProgressEventType type, final long bytes) { if (listener == ProgressListener.NOOP || listener == null || bytes <= 0) return null; return deliverEvent(listener, new ProgressEvent(type, bytes)); }
/** * Convenient method to publish a response content length event to the given * listener. * * @param listener * must not be null or else the publication will be skipped * @param bytes * must be non-negative or else the publication will be skipped */ public static Future<?> publishResponseContentLength( final ProgressListener listener, final long bytes) { return publishByteCountEvent(listener, RESPONSE_CONTENT_LENGTH_EVENT, bytes); }
try { long contentLength = Long.parseLong(s); publishResponseContentLength(listener, contentLength); } catch (NumberFormatException e) { log.warn("Cannot parse the Content-Length header of the response."); publishProgress(listener, ProgressEventType.HTTP_RESPONSE_STARTED_EVENT); try { awsResponse = responseHandler awsRequestMetrics.endEvent(Field.ResponseProcessingTime); publishProgress(listener, ProgressEventType.HTTP_RESPONSE_COMPLETED_EVENT); + httpResponse.getStatusCode() + ", Response Text: " + httpResponse.getStatusText(); throw new SdkClientException(errorMessage, e);
if (metadata == null) metadata = new ObjectMetadata(); rejectNull(bucketName, "The bucket name parameter must be specified when uploading an object"); request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutObject"); AmazonWebServiceRequest awsreq = request.getOriginalRequest(); awsreq.getRequestClientOptions() .setReadLimit(bufsize.intValue()); request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString()); publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT); publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw failure(t); publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw new SdkClientException( "Unable to verify integrity of data upload. " + "Client calculated content hash (contentMD5: " publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT);
@Override public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws SdkClientException, AmazonServiceException { uploadPartRequest = beforeClientExecution(uploadPartRequest); rejectNull(uploadPartRequest, "The request parameter must be specified when uploading a part"); final File fileOrig = uploadPartRequest.getFile(); final int partNumber = uploadPartRequest.getPartNumber(); final long partSize = uploadPartRequest.getPartSize(); rejectNull(bucketName, "The bucket name parameter must be specified when uploading a part"); rejectNull(key, "The upload ID parameter must be specified when uploading a part"); Request<UploadPartRequest> request = createRequest(bucketName, key, uploadPartRequest, HttpMethodName.PUT); request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "UploadPart"); request.addParameter("uploadId", uploadId); request.addParameter("partNumber", Integer.toString(partNumber)); awsreq.getRequestClientOptions() .setReadLimit(bufsize.intValue()); publishProgress(listener, ProgressEventType.TRANSFER_PART_STARTED_EVENT); return doUploadPart(bucketName, key, uploadId, partNumber, partSize, request, isCurr, md5DigestStream, listener);
@Override public S3Object getObject(GetObjectRequest getObjectRequest) throws SdkClientException, AmazonServiceException { getObjectRequest = beforeClientExecution(getObjectRequest); assertNotNull(getObjectRequest, "GetObjectRequest"); assertStringNotEmpty(getObjectRequest.getBucketName(), "BucketName"); assertStringNotEmpty(getObjectRequest.getKey(), "Key"); Request<GetObjectRequest> request = createRequest(getObjectRequest.getBucketName(), getObjectRequest.getKey(), getObjectRequest, HttpMethodName.GET); request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "GetObject"); request.addParameter("versionId", getObjectRequest.getVersionId()); addPartNumberIfNotNull(request, getObjectRequest.getPartNumber()); request.addHeader(Headers.RANGE, "bytes=" + Long.toString(range[0]) + "-" + Long.toString(range[1])); populateSSE_C(request, getObjectRequest.getSSECustomerKey()); final ProgressListener listener = getObjectRequest.getGeneralProgressListener(); publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT); publishProgress(listener, ProgressEventType.TRANSFER_CANCELED_EVENT); return null; publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw ase;
request.setTimeOffset(timeOffset); publishProgress(listener, ProgressEventType.HTTP_REQUEST_STARTED_EVENT); awsRequestMetrics.startEvent(Field.HttpRequestTime); awsRequestMetrics.setCounter(Field.RetryCapacityConsumed, retryCapacity.consumedCapacity()); publishProgress(listener, ProgressEventType.HTTP_REQUEST_COMPLETED_EVENT); final StatusLine statusLine = execOneParams.apacheResponse.getStatusLine(); final int statusCode = statusLine == null ? -1 : statusLine.getStatusCode(); int clockSkew = parseClockSkewOffset(execOneParams.apacheResponse, exception); SDKGlobalTime.setGlobalTimeOffset(timeOffset = clockSkew); request.setTimeOffset(timeOffset); // adjust time offset for the retry
final ProgressListener listener) { try { request.setContent(inputStream); ObjectMetadata metadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key); final String etag = metadata.getETag(); + ", partNumber: " + partNumber + ", partSize: " + partSize; throw new SdkClientException( "Unable to verify integrity of data upload. " + "Client calculated content hash (contentMD5: " publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT); UploadPartResult result = new UploadPartResult(); result.setETag(etag); return result; } catch (Throwable t) { publishProgress(listener, ProgressEventType.TRANSFER_PART_FAILED_EVENT); publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT); throw failure(t);
request.getHeaders().putAll(config.getHeaders()); request.getHeaders().putAll(requestConfig.getCustomRequestHeaders()); final InputStream origContent = request.getContent(); final InputStream toBeClosed = beforeRequest(); // for progress tracking request.setContent(notCloseable); try { publishProgress(listener, ProgressEventType.CLIENT_REQUEST_STARTED_EVENT); response = executeHelper(); publishProgress(listener, ProgressEventType.CLIENT_REQUEST_SUCCESS_EVENT); awsRequestMetrics.endEvent(AwsClientSideMonitoringMetrics.ApiCallLatency); awsRequestMetrics.getTimingInfo().endTiming(); return response; } catch (AmazonClientException e) { publishProgress(listener, ProgressEventType.CLIENT_REQUEST_FAILED_EVENT);
/** * If content length is present on the request, report it to the progress listener. * * @param listener Listener to notify. */ private void reportContentLength(ProgressListener listener) { Map<String, String> headers = request.getHeaders(); String contentLengthStr = headers.get("Content-Length"); if (contentLengthStr != null) { try { long contentLength = Long.parseLong(contentLengthStr); publishRequestContentLength(listener, contentLength); } catch (NumberFormatException e) { log.warn("Cannot parse the Content-Length header of the request."); } } }
chunkSize = Long.parseLong(customizedChunkSize) * 1024 * 1024; } catch (NumberFormatException e) { publishProgress(progressListener, ProgressEventType.TRANSFER_FAILED_EVENT); throw new AmazonClientException("Invalid chunk size: " + e.getMessage()); output = new RandomAccessFile(file, "rw"); } catch (FileNotFoundException e) { publishProgress(progressListener, ProgressEventType.TRANSFER_FAILED_EVENT); throw new AmazonClientException("Unable to open the output file " + file.getPath(), e); publishProgress(progressListener, ProgressEventType.TRANSFER_STARTED_EVENT); while (currentPosition < archiveSize) { if (currentPosition + chunkSize > archiveSize) { currentPosition, endPosition, progressListener); } catch (Throwable t) { publishProgress(progressListener, ProgressEventType.TRANSFER_FAILED_EVENT); throw failure(t); publishProgress(progressListener, ProgressEventType.TRANSFER_COMPLETED_EVENT); } finally { closeQuietly(output, log);
protected void fireProgressEvent(final ProgressEventType eventType) { publishProgress(listenerChain, eventType); }
@Override public UploadResult call() throws Exception { CompleteMultipartUploadResult res; try { CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest( origReq.getBucketName(), origReq.getKey(), uploadId, collectPartETags()) .withRequesterPays(origReq.isRequesterPays()) .withGeneralProgressListener(origReq.getGeneralProgressListener()) .withRequestMetricCollector(origReq.getRequestMetricCollector()) ; res = s3.completeMultipartUpload(req); } catch (Exception e) { monitor.uploadFailure(); publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw e; } UploadResult uploadResult = new UploadResult(); uploadResult.setBucketName(origReq .getBucketName()); uploadResult.setKey(origReq.getKey()); uploadResult.setETag(res.getETag()); uploadResult.setVersionId(res.getVersionId()); monitor.uploadComplete(); return uploadResult; }
/** * Convenient method to publish a response bytes discard event to the given listener. */ public static Future<?> publishResponseBytesDiscarded( final ProgressListener listener, final long bytesDiscarded) { return publishResetEvent(listener, ProgressEventType.RESPONSE_BYTE_DISCARD_EVENT, bytesDiscarded); }
@Override protected void onNotifyBytesRead() { publishRequestBytesTransferred(getListener(), getUnnotifiedByteCount()); } }
@Override protected void onNotifyBytesRead() { publishResponseBytesTransferred(getListener(), getUnnotifiedByteCount()); } }
@Override protected void onReset() { publishRequestReset(getListener(), getNotifiedByteCount()); }
@Override protected void onReset() { publishResponseReset(getListener(), getNotifiedByteCount()); }
if (metadata == null) metadata = new ObjectMetadata(); rejectNull(bucketName, "The bucket name parameter must be specified when uploading an object"); AmazonWebServiceRequest awsreq = request.getOriginalRequest(); awsreq.getRequestClientOptions() .setReadLimit(bufsize.intValue()); request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString()); request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass()); publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT); publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw failure(t); publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw new SdkClientException( "Unable to verify integrity of data upload. " + "Client calculated content hash (contentMD5: " publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT);