/** * The value of the ETag header is calculated by taking * the ETag value of each segment, concatenating them together, and then returning the MD5 checksum of the result. * * @param checksum Checksum compute service * @param objects Files * @return Concatenated checksum */ public Checksum checksum(final ChecksumCompute checksum, final List<StorageObject> objects) throws ChecksumException { final StringBuilder concatenated = new StringBuilder(); for(StorageObject s : objects) { concatenated.append(s.getMd5sum()); } return checksum.compute(IOUtils.toInputStream(concatenated.toString(), Charset.defaultCharset()), new TransferStatus()); } }
/** * The fingerprint of a public key consists of the output of the MD5 * message-digest algorithm [RFC-1321]. The input to the algorithm is * the public key blob as described in [SSH-TRANS]. The output of the * algorithm is presented to the user as a sequence of 16 octets printed * as hexadecimal with lowercase letters and separated by colons. * <p> * For example: "4b:69:6c:72:6f:79:20:77:61:73:20:68:65:72:65:21" * * @param in Public key blob * @return The fingerprint is the MD5 of the Base64-encoded public key */ public String fingerprint(final InputStream in) throws ChecksumException { final String undelimited = ChecksumComputeFactory.get(HashAlgorithm.md5).compute(in, new TransferStatus()).hash; final StringBuilder fp = new StringBuilder(undelimited.substring(0, 2)); for(int i = 2; i <= undelimited.length() - 2; i += 2) { fp.append(":").append(undelimited.substring(i, i + 2)); } return fp.toString(); } }
@Override public void cleanup(final Transfer download) { // Save checksum before edit try { checksum = ChecksumComputeFactory.get(HashAlgorithm.md5).compute(local.getInputStream(), new TransferStatus()); } catch(BackgroundException e) { log.warn(String.format("Error computing checksum for %s. %s", local, e.getDetail())); } } };
listener.message(MessageFormat.format( LocaleFactory.localizedString("Compute MD5 hash of {0}", "Status"), local.getName())); current = ChecksumComputeFactory.get(HashAlgorithm.md5).compute(local.getInputStream(), new TransferStatus());
if(Checksum.NONE != append.checksum) { final ChecksumCompute compute = ChecksumComputeFactory.get(append.checksum.algorithm); if(compute.compute(local.getInputStream(), parent).equals(append.checksum)) { if(log.isInfoEnabled()) { log.info(String.format("Skip file %s with checksum %s", file, local.attributes().getChecksum()));
@Override public StorageObject upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { if(Checksum.NONE == status.getChecksum()) { // The client-side checksum is passed to the BlackPearl gateway by supplying the applicable CRC HTTP header. // If this is done, the BlackPearl gateway verifies that the data received matches the checksum provided. // End-to-end data protection requires that the client provide the CRC when uploading the object and then // verify the CRC after downloading the object at a later time (see Get Object). The BlackPearl gateway also // verifies the CRC when reading from physical data stores so the gateway can identify problems before // transmitting data to the client. status.setChecksum(writer.checksum(file).compute(local.getInputStream(), status)); } // Make sure file is available in cache final List<TransferStatus> chunks = bulk.query(Transfer.Type.upload, file, status); StorageObject stored = null; for(TransferStatus chunk : chunks) { chunk.setChecksum(ChecksumComputeFactory.get(HashAlgorithm.md5).compute(local.getInputStream(), chunk)); stored = super.upload(file, local, throttle, listener, chunk, callback); } return stored; }
@Override public B2UploadPartResponse call() throws BackgroundException { final TransferStatus status = new TransferStatus().length(len); final ByteArrayEntity entity = new ByteArrayEntity(content, off, len); final Checksum checksum = ChecksumComputeFactory.get(HashAlgorithm.sha1) .compute(new ByteArrayInputStream(content, off, len), status); try { return session.getClient().uploadLargeFilePart(version.id, segment, entity, checksum.hash); } catch(B2ApiException e) { throw new B2ExceptionMappingService().map("Upload {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file); } } }, new DisabledProgressListener(), new TransferBackgroundActionState(overall)).call());
public void write(final Path file, final byte[] content, final TransferStatus status) throws BackgroundException { final Write<?> write = session._getFeature(Write.class); status.setLength(content.length); status.setChecksum(write.checksum(file).compute(new ByteArrayInputStream(content), status)); final StatusOutputStream<?> out = write.write(file, status, new DisabledConnectionCallback()); try { IOUtils.write(content, out); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map(e); } finally { new DefaultStreamCloser().close(out); } } }
if(Checksum.NONE != attributes.getChecksum()) { final ChecksumCompute compute = ChecksumComputeFactory.get(attributes.getChecksum().algorithm); if(compute.compute(local.getInputStream(), parent).equals(attributes.getChecksum())) { if(log.isInfoEnabled()) { log.info(String.format("Skip file %s with checksum %s", file, local.attributes().getChecksum()));
case AWS4HMACSHA256: status.setChecksum(ChecksumComputeFactory.get(HashAlgorithm.sha256) .compute(new ByteArrayInputStream(content, off, len), status) ); break;
@Override public Path mkdir(final Path folder, final String region, final TransferStatus status) throws BackgroundException { if(containerService.isContainer(folder)) { return super.mkdir(folder, region, status); } else { if(Checksum.NONE == status.getChecksum()) { status.setChecksum(writer.checksum(folder).compute(new NullInputStream(0L), status)); } return super.mkdir(folder, region, status); } } }
@Override public Path touch(final Path file, final TransferStatus status) throws BackgroundException { if(Checksum.NONE == status.getChecksum()) { status.setChecksum(writer.checksum(file).compute(new NullInputStream(0L), status)); } status.setTimestamp(System.currentTimeMillis()); final StatusOutputStream<BaseB2Response> out = writer.write(file, status, new DisabledConnectionCallback()); new DefaultStreamCloser().close(out); return new Path(file.getParent(), file.getName(), file.getType(), new B2AttributesFinderFeature(session, fileid).toAttributes((B2FileResponse) out.getStatus())); }
@Override public StorageObject upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final S3Protocol.AuthenticationHeaderSignatureVersion signatureVersion = session.getSignatureVersion(); switch(signatureVersion) { case AWS4HMACSHA256: if(Checksum.NONE == status.getChecksum()) { status.setChecksum(writer.checksum(file).compute(local.getInputStream(), status)); } break; } try { return super.upload(file, local, throttle, listener, status, callback); } catch(InteroperabilityException e) { if(!session.getSignatureVersion().equals(signatureVersion)) { // Retry if upload fails with Header "x-amz-content-sha256" set to the hex-encoded SHA256 hash of the // request payload is required for AWS Version 4 request signing return this.upload(file, local, throttle, listener, status, callback); } throw e; } }
@Override public Path touch(final Path file, final TransferStatus status) throws BackgroundException { if(Checksum.NONE == status.getChecksum()) { status.setChecksum(writer.checksum(file).compute(new NullInputStream(0L), status)); } new DefaultStreamCloser().close(writer.write(file, status, new DisabledConnectionCallback())); return new Path(file.getParent(), file.getName(), file.getType(), new AzureAttributesFinderFeature(session, context).find(file)); }
@Override public Path mkdir(final Path folder, final String region, final TransferStatus status) throws BackgroundException { if(containerService.isContainer(folder)) { final S3BucketCreateService service = new S3BucketCreateService(session); service.create(folder, StringUtils.isBlank(region) ? PreferencesFactory.get().getProperty("s3.location") : region); return folder; } else { if(Checksum.NONE == status.getChecksum()) { status.setChecksum(writer.checksum(folder).compute(new NullInputStream(0L), status)); } // Add placeholder object status.setMime(MIMETYPE); final EnumSet<AbstractPath.Type> type = EnumSet.copyOf(folder.getType()); type.add(Path.Type.placeholder); final StatusOutputStream<StorageObject> out = writer.write(new Path(folder.getParent(), folder.getName(), type, new PathAttributes(folder.attributes())), status, new DisabledConnectionCallback()); new DefaultStreamCloser().close(out); final StorageObject metadata = out.getStatus(); return new Path(folder.getParent(), folder.getName(), type, new S3AttributesFinderFeature(session).toAttributes(metadata)); } }
@Override public Path touch(final Path file, final TransferStatus status) throws BackgroundException { if(Checksum.NONE == status.getChecksum()) { status.setChecksum(writer.checksum(file).compute(new NullInputStream(0L), status)); } status.setLength(0L); final StatusOutputStream<StorageObject> out = writer.write(file, status, new DisabledConnectionCallback()); new DefaultStreamCloser().close(out); final S3Object metadata = (S3Object) out.getStatus(); return new Path(file.getParent(), file.getName(), file.getType(), new S3AttributesFinderFeature(session).toAttributes(metadata)); }
@Override public StorageObject call() throws BackgroundException { if(overall.isCanceled()) { throw new ConnectionCanceledException(); } final TransferStatus status = new TransferStatus() .length(length) .skip(offset); status.setHeader(overall.getHeader()); status.setNonces(overall.getNonces()); status.setChecksum(writer.checksum(segment).compute(local.getInputStream(), status)); status.setSegment(true); return SwiftLargeObjectUploadFeature.super.upload( segment, local, throttle, listener, status, overall, new StreamProgress() { @Override public void progress(final long bytes) { status.progress(bytes); // Discard sent bytes in overall progress if there is an error reply for segment. overall.progress(bytes); } @Override public void setComplete() { status.setComplete(); } }, callback); } }, overall));
@Override public B2UploadPartResponse call() throws BackgroundException { if(overall.isCanceled()) { throw new ConnectionCanceledException(); } final TransferStatus status = new TransferStatus() .length(length) .skip(offset); status.setHeader(overall.getHeader()); status.setNonces(overall.getNonces()); status.setChecksum(writer.checksum(file).compute(local.getInputStream(), status)); status.setSegment(true); status.setPart(partNumber); return (B2UploadPartResponse) B2LargeUploadService.super.upload(file, local, throttle, listener, status, overall, new StreamProgress() { @Override public void progress(final long bytes) { status.progress(bytes); // Discard sent bytes in overall progress if there is an error reply for segment. overall.progress(bytes); } @Override public void setComplete() { status.setComplete(); } }, callback); } }, overall));
@Override public Checksum compute(final InputStream in, final TransferStatus status) throws ChecksumException { if(Checksum.NONE == delegate.compute(new NullInputStream(0L), new TransferStatus())) { return Checksum.NONE; } if(null == status.getHeader()) { // Write header to be reused in writer final Cryptor cryptor = cryptomator.getCryptor(); final FileHeader header = cryptor.fileHeaderCryptor().create(); status.setHeader(cryptor.fileHeaderCryptor().encryptHeader(header)); } // Make nonces reusable in case we need to compute a checksum status.setNonces(new RotatingNonceGenerator(cryptomator.numberOfChunks(status.getLength()))); return this.compute(this.normalize(in, status), status.getOffset(), status.getHeader(), status.getNonces()); }
@Override public Path mkdir(final Path folder, final String region, final TransferStatus status) throws BackgroundException { try { final BlobRequestOptions options = new BlobRequestOptions(); if(containerService.isContainer(folder)) { // Container name must be lower case. final CloudBlobContainer container = session.getClient().getContainerReference(containerService.getContainer(folder).getName()); container.create(options, context); return new Path(folder.getParent(), folder.getName(), folder.getType(), new AzureAttributesFinderFeature(session, context).find(folder)); } else { if(Checksum.NONE == status.getChecksum()) { status.setChecksum(writer.checksum(folder).compute(new NullInputStream(0L), status)); } final EnumSet<AbstractPath.Type> type = EnumSet.copyOf(folder.getType()); type.add(Path.Type.placeholder); final Path placeholder = new Path(folder.getParent(), folder.getName(), type, new PathAttributes(folder.attributes())); new DefaultStreamCloser().close(writer.write(placeholder, status, new DisabledConnectionCallback())); return new Path(placeholder.getParent(), placeholder.getName(), placeholder.getType(), new AzureAttributesFinderFeature(session, context).find(placeholder)); } } catch(URISyntaxException e) { throw new NotfoundException(e.getMessage(), e); } catch(StorageException e) { throw new AzureExceptionMappingService().map("Cannot create folder {0}", e, folder); } }