@Override protected void logout() throws BackgroundException { try { client.shutdown(); } catch(ServiceException e) { throw new S3ExceptionMappingService().map(e); } }
@Override public List<MultipartPart> list(final MultipartUpload multipart) throws BackgroundException { if(log.isInfoEnabled()) { log.info(String.format("List completed parts of %s", multipart.getUploadId())); } // This operation lists the parts that have been uploaded for a specific multipart upload. try { return session.getClient().multipartListParts(multipart); } catch(S3ServiceException e) { throw new S3ExceptionMappingService().map(MessageFormat.format("Upload {0} failed", multipart.getObjectKey()), e); } }
@Override public StorageObject call(final AbstractHttpEntity entity) throws BackgroundException { try { final RequestEntityRestStorageService client = session.getClient(); client.putObjectWithRequestEntityImpl( containerService.getContainer(file).getName(), object, entity, status.getParameters()); if(log.isDebugEnabled()) { log.debug(String.format("Saved object %s with checksum %s", file, object.getETag())); } } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Upload {0} failed", e, file); } return object; }
@Override public void delete(final MultipartUpload upload) throws BackgroundException { if(log.isInfoEnabled()) { log.info(String.format("Delete multipart upload %s", upload.getUploadId())); } try { session.getClient().multipartAbortUpload(upload); } catch(S3ServiceException e) { throw new S3ExceptionMappingService().map("Cannot delete {0}", e, new Path(new Path(PathNormalizer.normalize(upload.getBucketName()), EnumSet.of(Path.Type.directory)), upload.getObjectKey(), EnumSet.of(Path.Type.file))); } } }
protected String copy(final Path source, final S3Object destination, final TransferStatus status) throws BackgroundException { try { // Copying object applying the metadata of the original final Map<String, Object> stringObjectMap = session.getClient().copyVersionedObject(source.attributes().getVersionId(), containerService.getContainer(source).getName(), containerService.getKey(source), destination.getBucketName(), destination, false); final Map complete = (Map) stringObjectMap.get(Constants.KEY_FOR_COMPLETE_METADATA); return (String) complete.get(Constants.AMZ_VERSION_ID); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot copy {0}", e, source); } }
throw new S3ExceptionMappingService().map("Cannot read bucket versioning status", e);
@Override public boolean getStatus(final Path file) throws BackgroundException { final Path bucket = containerService.getContainer(file); try { return session.getClient().getAccelerateConfig(bucket.getName()).isEnabled(); } catch(S3ServiceException failure) { throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", failure, bucket); } }
@Override public MultipartPart call() throws BackgroundException { try { final HttpRange range = HttpRange.byLength(offset, length); final MultipartPart part = session.getClient().multipartUploadPartCopy(multipart, partNumber, containerService.getContainer(source).getName(), containerService.getKey(source), null, null, null, null, range.getStart(), range.getEnd(), source.attributes().getVersionId()); if(log.isInfoEnabled()) { log.info(String.format("Received response %s for part number %d", part, partNumber)); } // Populate part with response data that is accessible via the object's metadata return new MultipartPart(partNumber, null == part.getLastModified() ? new Date(System.currentTimeMillis()) : part.getLastModified(), null == part.getEtag() ? StringUtils.EMPTY : part.getEtag(), part.getSize()); } catch(S3ServiceException e) { throw new S3ExceptionMappingService().map("Cannot copy {0}", e, source); } } });
throw new S3ExceptionMappingService().map("Upload {0} failed", e, file);
/** * Forces a full reclaim of all caches, and waits until the reclaim completes. Cache contents that need to be retained because they are a part of an active job are retained. * Any cache contents that can be reclaimed will be. This operation may take a very long time to complete, depending on how much of the cache can be reclaimed and how many blobs the cache is managing. */ protected void clear() throws BackgroundException { try { // Cancel all active jobs to remove references to cached objects final Ds3Client ds3Client = new SpectraClientBuilder().wrap(session.getClient(), session.getHost()); ds3Client.cancelAllActiveJobsSpectraS3(new CancelAllActiveJobsSpectraS3Request()); // Clear cache final RequestEntityRestStorageService client = session.getClient(); final HttpPut request = new HttpPut(String.format("%s://%s:%s/_rest_/cache_filesystem?reclaim", session.getHost().getProtocol().getScheme(), session.getHost().getHostname(), session.getHost().getPort())); client.authorizeHttpRequest(request, null, null); final HttpResponse response = client.getHttpClient().execute(request); if(HttpStatus.SC_NO_CONTENT != response.getStatusLine().getStatusCode()) { throw new HttpResponseException(response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()); } } catch(HttpResponseException e) { throw new HttpResponseExceptionMappingService().map(e); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map(e); } catch(ServiceException e) { throw new S3ExceptionMappingService().map(e); } } }
@Override public void setStatus(final Path file, final boolean enabled) throws BackgroundException { final Path bucket = containerService.getContainer(file); try { if(!ServiceUtils.isBucketNameValidDNSName(bucket.getName())) { throw new InteroperabilityException("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods."); } session.getClient().setAccelerateConfig(bucket.getName(), new AccelerateConfig(enabled)); } catch(S3ServiceException failure) { throw new S3ExceptionMappingService().map("Failure to write attributes of {0}", failure, bucket); } }
@Override public HttpResponseOutputStream<VersionId> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final S3Object object = new S3WriteFeature(session, new S3DisabledMultipartService()) .getDetails(file, status); // ID for the initiated multipart upload. final MultipartUpload multipart; try { multipart = session.getClient().multipartStartUpload( containerService.getContainer(file).getName(), object); if(log.isDebugEnabled()) { log.debug(String.format("Multipart upload started for %s with ID %s", multipart.getObjectKey(), multipart.getUploadId())); } } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Upload {0} failed", e, file); } final MultipartOutputStream proxy = new MultipartOutputStream(multipart, file, status); return new HttpResponseOutputStream<VersionId>(new MemorySegementingOutputStream(proxy, preferences.getInteger("s3.upload.multipart.partsize.minimum"))) { @Override public VersionId getStatus() { return proxy.getVersionId(); } }; }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final HttpRange range = HttpRange.withStatus(status); final RequestEntityRestStorageService client = session.getClient(); final S3Object object = client.getVersionedObject( file.attributes().getVersionId(), containerService.getContainer(file).getName(), containerService.getKey(file), null, // ifModifiedSince null, // ifUnmodifiedSince null, // ifMatch null, // ifNoneMatch status.isAppend() ? range.getStart() : null, status.isAppend() ? (range.getEnd() == -1 ? null : range.getEnd()) : null); if(log.isDebugEnabled()) { log.debug(String.format("Reading stream with content length %d", object.getContentLength())); } return object.getDataInputStream(); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Download {0} failed", e, file); } }
@Override public void write(final Path container, final Distribution distribution, final LoginCallback prompt) throws BackgroundException { if(distribution.getMethod().equals(Distribution.WEBSITE)) { try { if(distribution.isEnabled()) { String suffix = "index.html"; if(StringUtils.isNotBlank(distribution.getIndexDocument())) { suffix = PathNormalizer.name(distribution.getIndexDocument()); } // Enable website endpoint session.getClient().setWebsiteConfig(container.getName(), new S3WebsiteConfig(suffix)); } else { // Disable website endpoint session.getClient().deleteWebsiteConfig(container.getName()); } } catch(S3ServiceException e) { throw new S3ExceptionMappingService().map("Cannot write website configuration", e); } } else { super.write(container, distribution, prompt); } }
public void create(final Path bucket, final String location) throws BackgroundException { // Create bucket if(!ServiceUtils.isBucketNameValidDNSName(bucket.getName())) { throw new InteroperabilityException(LocaleFactory.localizedString("Bucket name is not DNS compatible", "S3")); } AccessControlList acl; if(PreferencesFactory.get().getProperty("s3.bucket.acl.default").equals("public-read")) { acl = GSAccessControlList.REST_CANNED_PUBLIC_READ; } else { acl = GSAccessControlList.REST_CANNED_PRIVATE; } try { session.getClient().createBucket(new S3PathContainerService().getContainer(bucket).getName(), location, acl); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot create folder {0}", e, bucket); } } }
@Override public void setConfiguration(final Path file, final LoggingConfiguration configuration) throws BackgroundException { // Logging target bucket final Path bucket = containerService.getContainer(file); try { final S3BucketLoggingStatus status = new S3BucketLoggingStatus( StringUtils.isNotBlank(configuration.getLoggingTarget()) ? configuration.getLoggingTarget() : bucket.getName(), null); if(configuration.isEnabled()) { status.setLogfilePrefix(PreferencesFactory.get().getProperty("s3.logging.prefix")); } session.getClient().setBucketLoggingStatus(bucket.getName(), status, true); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Failure to write attributes of {0}", e, file); } } }
public void create(final Path bucket, final String location) throws BackgroundException { if(!session.configure().getBoolProperty("s3service.disable-dns-buckets", false)) { if(!ServiceUtils.isBucketNameValidDNSName(bucket.getName())) { throw new InteroperabilityException(LocaleFactory.localizedString("Bucket name is not DNS compatible", "S3")); } } AccessControlList acl; if(PreferencesFactory.get().getProperty("s3.bucket.acl.default").equals("public-read")) { acl = AccessControlList.REST_CANNED_PUBLIC_READ; } else { acl = AccessControlList.REST_CANNED_PRIVATE; } try { final String region; if("us-east-1".equals(location)) { region = "US"; } else { region = location; } // Create bucket session.getClient().createBucket(URIEncoder.encode(containerService.getContainer(bucket).getName()), region, acl); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot create folder {0}", e, bucket); } } }
@Override public void setConfiguration(final Path container, final LoggingConfiguration configuration) throws BackgroundException { try { // Logging target bucket final GSBucketLoggingStatus status = new GSBucketLoggingStatus( StringUtils.isNotBlank(configuration.getLoggingTarget()) ? configuration.getLoggingTarget() : container.getName(), null); if(configuration.isEnabled()) { status.setLogfilePrefix(PreferencesFactory.get().getProperty("google.logging.prefix")); } // Grant write for Google to logging target bucket final AccessControlList acl = session.getClient().getBucketAcl(container.getName()); final GroupByEmailAddressGrantee grantee = new GroupByEmailAddressGrantee( "cloud-storage-analytics@google.com"); if(!acl.getPermissionsForGrantee(grantee).contains(Permission.PERMISSION_WRITE)) { acl.grantPermission(grantee, Permission.PERMISSION_WRITE); session.getClient().putBucketAcl(container.getName(), acl); } session.getClient().setBucketLoggingStatusImpl(container.getName(), status); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Failure to write attributes of {0}", e); } } }
@Override public void write(final Path file, final Distribution distribution, final LoginCallback prompt) throws BackgroundException { final Path container = containerService.getContainer(file); try { if(distribution.isEnabled()) { String suffix = "index.html"; if(StringUtils.isNotBlank(distribution.getIndexDocument())) { suffix = PathNormalizer.name(distribution.getIndexDocument()); } // Enable website endpoint session.getClient().setWebsiteConfigImpl(container.getName(), new GSWebsiteConfig(suffix)); final DistributionLogging logging = this.getFeature(DistributionLogging.class, distribution.getMethod()); if(logging != null) { new GoogleStorageLoggingFeature(session).setConfiguration(container, new LoggingConfiguration( distribution.isEnabled(), distribution.getLoggingContainer())); } } else { // Disable website endpoint session.getClient().setWebsiteConfigImpl(container.getName(), new GSWebsiteConfig()); } } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot write website configuration", e); } }
@Override public void setConfiguration(final Path file, final LifecycleConfiguration configuration) throws BackgroundException { final Path container = containerService.getContainer(file); try { if(configuration.getTransition() != null || configuration.getExpiration() != null) { final LifecycleConfig config = new LifecycleConfig(); // Unique identifier for the rule. The value cannot be longer than 255 characters. When you specify an empty prefix, the rule applies to all objects in the bucket final LifecycleConfig.Rule rule = config.newRule( String.format("%s-%s", PreferencesFactory.get().getProperty("application.name"), new AlphanumericRandomStringService().random()), StringUtils.EMPTY, true); if(configuration.getTransition() != null) { rule.newTransition().setDays(configuration.getTransition()); } if(configuration.getExpiration() != null) { rule.newExpiration().setDays(configuration.getExpiration()); } session.getClient().setLifecycleConfig(container.getName(), config); } else { session.getClient().deleteLifecycleConfig(container.getName()); } } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Failure to write attributes of {0}", e, container); } }