@Override public void setConfiguration(final Path container, final LoggingConfiguration configuration) throws BackgroundException { try { // Logging target bucket final GSBucketLoggingStatus status = new GSBucketLoggingStatus( StringUtils.isNotBlank(configuration.getLoggingTarget()) ? configuration.getLoggingTarget() : container.getName(), null); if(configuration.isEnabled()) { status.setLogfilePrefix(PreferencesFactory.get().getProperty("google.logging.prefix")); } // Grant write for Google to logging target bucket final AccessControlList acl = session.getClient().getBucketAcl(container.getName()); final GroupByEmailAddressGrantee grantee = new GroupByEmailAddressGrantee( "cloud-storage-analytics@google.com"); if(!acl.getPermissionsForGrantee(grantee).contains(Permission.PERMISSION_WRITE)) { acl.grantPermission(grantee, Permission.PERMISSION_WRITE); session.getClient().putBucketAcl(container.getName(), acl); } session.getClient().setBucketLoggingStatusImpl(container.getName(), status); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Failure to write attributes of {0}", e); } } }
@Override public RequestEntityRestStorageService connect(final Proxy proxy, final HostKeyCallback hostkey, final LoginCallback prompt) { final HttpClientBuilder configuration = builder.build(proxy, this, prompt); // Only for AWS if(host.getHostname().endsWith(PreferencesFactory.get().getProperty("s3.hostname.default"))) { configuration.setServiceUnavailableRetryStrategy(new S3TokenExpiredResponseInterceptor(this, prompt)); } return new RequestEntityRestStorageService(this, this.configure(), configuration); }
/** * Forces a full reclaim of all caches, and waits until the reclaim completes. Cache contents that need to be retained because they are a part of an active job are retained. * Any cache contents that can be reclaimed will be. This operation may take a very long time to complete, depending on how much of the cache can be reclaimed and how many blobs the cache is managing. */ protected void clear() throws BackgroundException { try { // Cancel all active jobs to remove references to cached objects final Ds3Client ds3Client = new SpectraClientBuilder().wrap(session.getClient(), session.getHost()); ds3Client.cancelAllActiveJobsSpectraS3(new CancelAllActiveJobsSpectraS3Request()); // Clear cache final RequestEntityRestStorageService client = session.getClient(); final HttpPut request = new HttpPut(String.format("%s://%s:%s/_rest_/cache_filesystem?reclaim", session.getHost().getProtocol().getScheme(), session.getHost().getHostname(), session.getHost().getPort())); client.authorizeHttpRequest(request, null, null); final HttpResponse response = client.getHttpClient().execute(request); if(HttpStatus.SC_NO_CONTENT != response.getStatusLine().getStatusCode()) { throw new HttpResponseException(response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()); } } catch(HttpResponseException e) { throw new HttpResponseExceptionMappingService().map(e); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map(e); } catch(ServiceException e) { throw new S3ExceptionMappingService().map(e); } } }
session.getClient().deleteBucket(bucket); session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket);
try { session.getClient().deleteVersionedObject( file.attributes().getVersionId(), containerService.getContainer(file).getName(), containerService.getKey(file)); try { final String bucket = containerService.getContainer(file).getName(); session.getClient().deleteBucket(bucket); session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket);
return this.convert(session.getClient().getBucketAcl(containerService.getContainer(file).getName())); return this.convert(session.getClient().getVersionedObjectAcl(file.attributes().getVersionId(), containerService.getContainer(file).getName(), containerService.getKey(file)));
session.getClient().putBucketAcl(container.getName(), this.convert(acl)); session.getClient().putObjectAcl(container.getName(), containerService.getKey(file), this.convert(acl));
public void create(final Path bucket, final String location) throws BackgroundException { // Create bucket if(!ServiceUtils.isBucketNameValidDNSName(bucket.getName())) { throw new InteroperabilityException(LocaleFactory.localizedString("Bucket name is not DNS compatible", "S3")); } AccessControlList acl; if(PreferencesFactory.get().getProperty("s3.bucket.acl.default").equals("public-read")) { acl = GSAccessControlList.REST_CANNED_PUBLIC_READ; } else { acl = GSAccessControlList.REST_CANNED_PRIVATE; } try { session.getClient().createBucket(new S3PathContainerService().getContainer(bucket).getName(), location, acl); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot create folder {0}", e, bucket); } } }
&& versioningService.getConfiguration(container).isMultifactor()) { final Credentials factor = versioningService.getToken(StringUtils.EMPTY, prompt); final MultipleDeleteResult result = session.getClient().deleteMultipleObjectsWithMFA(container.getName(), keys.toArray(new ObjectKeyAndVersion[keys.size()]), factor.getUsername(), final MultipleDeleteResult result = session.getClient().deleteMultipleObjects(container.getName(), partition.toArray(new ObjectKeyAndVersion[partition.size()]),
@Override public void setConfiguration(final Path file, final LifecycleConfiguration configuration) throws BackgroundException { final Path container = containerService.getContainer(file); try { if(configuration.getTransition() != null || configuration.getExpiration() != null) { final LifecycleConfig config = new LifecycleConfig(); // Unique identifier for the rule. The value cannot be longer than 255 characters. When you specify an empty prefix, the rule applies to all objects in the bucket final LifecycleConfig.Rule rule = config.newRule( String.format("%s-%s", PreferencesFactory.get().getProperty("application.name"), new AlphanumericRandomStringService().random()), StringUtils.EMPTY, true); if(configuration.getTransition() != null) { rule.newTransition().setDays(configuration.getTransition()); } if(configuration.getExpiration() != null) { rule.newExpiration().setDays(configuration.getExpiration()); } session.getClient().setLifecycleConfig(container.getName(), config); } else { session.getClient().deleteLifecycleConfig(container.getName()); } } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Failure to write attributes of {0}", e, container); } }
session.getClient().copyVersionedObject(file.attributes().getVersionId(), containerService.getContainer(file).getName(), containerService.getKey(file), containerService.getContainer(file).getName(), destination, false); if(file.getParent().attributes().getCustom().containsKey(S3VersionedObjectListService.KEY_DELETE_MARKER)) { session.getClient().deleteVersionedObject( file.getParent().attributes().getVersionId(), containerService.getContainer(file).getName(), containerService.getKey(file.getParent()));
@Override protected StorageBucket createBucketImpl(String bucketName, String location, AccessControlList acl) throws ServiceException { return super.createBucketImpl(bucketName, location, acl, Collections.singletonMap("x-goog-project-id", host.getCredentials().getUsername())); }
protected String copy(final Path source, final S3Object destination, final TransferStatus status) throws BackgroundException { try { // Copying object applying the metadata of the original final Map<String, Object> stringObjectMap = session.getClient().copyVersionedObject(source.attributes().getVersionId(), containerService.getContainer(source).getName(), containerService.getKey(source), destination.getBucketName(), destination, false); final Map complete = (Map) stringObjectMap.get(Constants.KEY_FOR_COMPLETE_METADATA); return (String) complete.get(Constants.AMZ_VERSION_ID); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot copy {0}", e, source); } }
public void create(final Path bucket, final String location) throws BackgroundException { if(!session.configure().getBoolProperty("s3service.disable-dns-buckets", false)) { if(!ServiceUtils.isBucketNameValidDNSName(bucket.getName())) { throw new InteroperabilityException(LocaleFactory.localizedString("Bucket name is not DNS compatible", "S3")); } } AccessControlList acl; if(PreferencesFactory.get().getProperty("s3.bucket.acl.default").equals("public-read")) { acl = AccessControlList.REST_CANNED_PUBLIC_READ; } else { acl = AccessControlList.REST_CANNED_PRIVATE; } try { final String region; if("us-east-1".equals(location)) { region = "US"; } else { region = location; } // Create bucket session.getClient().createBucket(URIEncoder.encode(containerService.getContainer(bucket).getName()), region, acl); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot create folder {0}", e, bucket); } } }