private static long getObjectSize(Path path, ObjectMetadata metadata) throws IOException { Map<String, String> userMetadata = metadata.getUserMetadata(); String length = userMetadata.get(UNENCRYPTED_CONTENT_LENGTH); if (userMetadata.containsKey(SERVER_SIDE_ENCRYPTION) && length == null) { throw new IOException(format("%s header is not set on an encrypted object: %s", UNENCRYPTED_CONTENT_LENGTH, path)); } return (length != null) ? Long.parseLong(length) : metadata.getContentLength(); }
/** * Returns the physical length of the entire object stored in S3. * This is useful during, for example, a range get operation. */ public long getInstanceLength() { // See Content-Range in // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html String contentRange = (String)metadata.get(Headers.CONTENT_RANGE); if (contentRange != null) { int pos = contentRange.lastIndexOf("/"); if (pos >= 0) return Long.parseLong(contentRange.substring(pos+1)); } return getContentLength(); }
/** * Returns true if this CopyCallable is processing a multi-part copy. * * @return True if this CopyCallable is processing a multi-part copy. */ public boolean isMultipartCopy() { return metadata.getContentLength() > configuration.getMultipartCopyThreshold(); }
public static boolean isDirectoryPlaceholder(String key, ObjectMetadata objectMetadata) { // Recognize "standard" directory place-holder indications used by // Amazon's AWS Console and Panic's Transmit. if (key.endsWith("/") && objectMetadata.getContentLength() == 0) { return true; } // Recognize s3sync.rb directory placeholders by MD5/ETag value. if ("d66759af42f282e1ba19144df2d405d0".equals(objectMetadata.getETag())) { return true; } // Recognize place-holder objects created by the Google Storage console // or S3 Organizer Firefox extension. if (key.endsWith("_$folder$") && objectMetadata.getContentLength() == 0) { return true; } // We don't use JetS3t APIs anymore, but the below check is still needed for backward compatibility. // Recognize legacy JetS3t directory place-holder objects, only gives // accurate results if an object's metadata is populated. if (objectMetadata.getContentLength() == 0 && MIMETYPE_JETS3T_DIRECTORY.equals(objectMetadata.getContentType())) { return true; } return false; }
/** * Returns the content length, or size, of this object's data, or 0 if it is unknown. * * @param bucket * the bucket containing the object. * @param objectKey * the key identifying the object. * @return the content length, or size, of this object's data, or 0 if it is unknown * @throws SdkClientException */ public long getS3ObjectContentLenght( Bucket bucket, String objectKey ) throws SdkClientException { return getS3ObjectDetails( bucket, objectKey ).getContentLength(); } }
@Override public void commitAfterRecovery() throws IOException { if (totalLength > 0L) { LOG.info("Trying to commit after recovery {} with MPU ID {}", objectName, uploadId); try { s3AccessHelper.commitMultiPartUpload(objectName, uploadId, parts, totalLength, new AtomicInteger()); } catch (IOException e) { LOG.info("Failed to commit after recovery {} with MPU ID {}. " + "Checking if file was committed before...", objectName, uploadId); LOG.trace("Exception when committing:", e); try { ObjectMetadata metadata = s3AccessHelper.getObjectMetadata(objectName); if (totalLength != metadata.getContentLength()) { String message = String.format("Inconsistent result for object %s: conflicting lengths. " + "Recovered committer for upload %s indicates %s bytes, present object is %s bytes", objectName, uploadId, totalLength, metadata.getContentLength()); LOG.warn(message); throw new IOException(message, e); } } catch (FileNotFoundException fnf) { LOG.warn("Object {} not existing after failed recovery commit with MPU ID {}", objectName, uploadId); throw new IOException(String.format("Recovering commit failed for object %s. " + "Object does not exist and MultiPart Upload %s is not valid.", objectName, uploadId), e); } } } else { LOG.debug("No data to commit for file: {}", objectName); } }
@Override public InputStream openStream() throws IOException { try { final long start; final long end = objectMetadata.getContentLength() - 1; if (offset > 0 && offset < objectMetadata.getContentLength()) { start = offset; } else if (offset < 0 && (-1 * offset) < objectMetadata.getContentLength()) { start = objectMetadata.getContentLength() + offset; } else { start = 0; } final GetObjectRequest request = new GetObjectRequest(config.getS3Bucket(), taskKey) .withMatchingETagConstraint(objectMetadata.getETag()) .withRange(start, end); return service.getObject(request).getObjectContent(); } catch (AmazonServiceException e) { throw new IOException(e); } } }
@Override public long getFileSize(Path remotePath) throws BackupRestoreException { return s3Client.getObjectMetadata(getShard(), remotePath.toString()).getContentLength(); }
/** * Returns the size of the data in this request, otherwise -1 if the content * length is unknown. * * @param putObjectRequest * The request to check. * * @return The size of the data in this request, otherwise -1 if the size of * the data is unknown. */ public static long getContentLength(PutObjectRequest putObjectRequest) { File file = getRequestFile(putObjectRequest); if (file != null) return file.length(); if (putObjectRequest.getInputStream() != null) { if (putObjectRequest.getMetadata().getContentLength() > 0) { return putObjectRequest.getMetadata().getContentLength(); } } return -1; }
@Override @Nullable protected ObjectStatus getObjectStatus(String key) throws IOException { try { ObjectMetadata meta = mClient.getObjectMetadata(mBucketName, key); return new ObjectStatus(key, meta.getETag(), meta.getContentLength(), meta.getLastModified().getTime()); } catch (AmazonServiceException e) { if (e.getStatusCode() == 404) { // file not found, possible for exists calls return null; } throw new IOException(e); } catch (AmazonClientException e) { throw new IOException(e); } }
/** * Returns the plaintext length from the request and metadata; or -1 if * unknown. */ protected final long plaintextLength(AbstractPutObjectRequest request, ObjectMetadata metadata) { if (request.getFile() != null) { return request.getFile().length(); } else if (request.getInputStream() != null && metadata.getRawMetadataValue(Headers.CONTENT_LENGTH) != null) { return metadata.getContentLength(); } return -1; }
/** * Performs the copy of an Amazon S3 object from source bucket to * destination bucket as multiple copy part requests. The information about * the part to be copied is specified in the request as a byte range * (first-last) * * @throws Exception * Any Exception that occurs while carrying out the request. */ private void copyInParts() throws Exception { multipartUploadId = initiateMultipartUpload(copyObjectRequest); long optimalPartSize = getOptimalPartSize(metadata.getContentLength()); try { CopyPartRequestFactory requestFactory = new CopyPartRequestFactory( copyObjectRequest, multipartUploadId, optimalPartSize, metadata.getContentLength()); copyPartsInParallel(requestFactory); } catch (Exception e) { publishProgress(listenerChain, ProgressEventType.TRANSFER_FAILED_EVENT); abortMultipartCopy(); throw new RuntimeException("Unable to perform multipart copy", e); } }
/** * Returns {@code true} if mapping presents for the provided key. * * @param key Key to check mapping for. * @return {@code true} if mapping presents for key. * @throws AmazonClientException If an error occurs while querying Amazon S3. */ boolean hasKey(String key) throws AmazonClientException { assert !F.isEmpty(key); try { return s3.getObjectMetadata(bucketName, key).getContentLength() != 0; } catch (AmazonServiceException e) { if (e.getStatusCode() != 404) throw e; } return false; }
/** * Returns the part size of the part * * @param getObjectRequest the request to check * @param s3 the s3 client * @param partNumber the part number * @return the part size */ @SdkInternalApi public static long getPartSize(GetObjectRequest getObjectRequest, AmazonS3 s3, int partNumber) { ValidationUtils.assertNotNull(s3, "S3 client"); ValidationUtils.assertNotNull(getObjectRequest, "GetObjectRequest"); GetObjectMetadataRequest getObjectMetadataRequest = RequestCopyUtils.createGetObjectMetadataRequestFrom(getObjectRequest) .withPartNumber(partNumber); return s3.getObjectMetadata(getObjectMetadataRequest).getContentLength(); }
private ObjectMetadata getConfigMetadata() throws Exception { try { ObjectMetadata metadata = s3Client.getObjectMetadata(arguments.getBucket(), arguments.getKey()); if ( metadata.getContentLength() > 0 ) { return metadata; } } catch ( AmazonS3Exception e ) { if ( !isNotFoundError(e) ) { throw e; } } return null; }
try { S3Object object = getS3ObjectAndMetadata(bucket, key, ssecLocal); int sizeOfFile = (int)object.getObjectMetadata().getContentLength(); fieldCount = sizeOfFile/sizeArray; totalSize = sizeOfFile;
private S3Object getConfigObject() throws Exception { try { S3Object object = s3Client.getObject(arguments.getBucket(), arguments.getKey()); if ( object.getObjectMetadata().getContentLength() > 0 ) { return object; } } catch ( AmazonS3Exception e ) { if ( !isNotFoundError(e) ) { throw e; } } return null; }
lastByte = range[1]; } else { lastByte = objectMetadata.getContentLength() - 1;
s3Object.getObjectMetadata().getContentLength(), // expected length new S3AbortableInputStream(is, httpRequest, s3Object.getObjectMetadata().getContentLength()); s3Object.setObjectContent(new S3ObjectInputStream(abortableInputStream, httpRequest, false)); return s3Object;
transferProgress.setTotalBytesToTransfer(metadata.getContentLength());