congrats Icon
New! Announcing Tabnine Chat Beta
Learn More
Tabnine Logo
MultipartUploadSlicingAlgorithm
Code IndexAdd Tabnine to your IDE (free)

How to use
MultipartUploadSlicingAlgorithm
in
org.jclouds.blobstore.strategy.internal

Best Java code snippets using org.jclouds.blobstore.strategy.internal.MultipartUploadSlicingAlgorithm (Showing top 17 results out of 315)

origin: org.apache.jclouds/jclouds-blobstore

protected long getNextChunkOffset() {
 long next = chunkOffset;
 chunkOffset += getChunkSize();
 return next;
}
origin: apache/jclouds

/**
* Phase 1 of the algorithm.
* ChunkSize does not grow from a {@code MultipartUploadStrategy.DEFAULT_PART_SIZE}
* until we reach {@code MultipartUploadSlicingAlgorithm.MAGNITUDE_BASE} number of parts.
*/
@Test
public void testWhenChunkSizeHasToStartGrowing() {
 MultipartUploadSlicingAlgorithm strategy = new MultipartUploadSlicingAlgorithm(
    MIN_PART_SIZE, MAX_PART_SIZE, MAX_NUMBER_OF_PARTS);
 // upper limit while we still have exactly defaultPartSize chunkSize
 long length = MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE * MultipartUploadSlicingAlgorithm.DEFAULT_MAGNITUDE_BASE;
 long chunkSize = strategy.calculateChunkSize(length);
 assertEquals(chunkSize, MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE);
 assertEquals(strategy.getParts(), MultipartUploadSlicingAlgorithm.DEFAULT_MAGNITUDE_BASE - 1);
 assertEquals(strategy.getRemaining(), MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE);
 assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
 // then chunkSize is increasing
 length += 1;
 chunkSize = strategy.calculateChunkSize(length);
 assertEquals(chunkSize, MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE * 2);
 assertEquals(strategy.getParts(), MultipartUploadSlicingAlgorithm.DEFAULT_MAGNITUDE_BASE / 2);
 assertEquals(strategy.getRemaining(), 1);
 assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
}
origin: org.apache.jclouds.api/openstack-swift

@Beta
protected String putMultipartBlob(String container, Blob blob, PutOptions overrides, ListeningExecutorService executor) {
 ArrayList<ListenableFuture<MultipartPart>> parts = new ArrayList<ListenableFuture<MultipartPart>>();
 long contentLength = checkNotNull(blob.getMetadata().getContentMetadata().getContentLength(),
    "must provide content-length to use multi-part upload");
 MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm(
    getMinimumMultipartPartSize(), getMaximumMultipartPartSize(), getMaximumNumberOfParts());
 long partSize = algorithm.calculateChunkSize(contentLength);
 MultipartUpload mpu = initiateMultipartUpload(container, blob.getMetadata(), partSize, overrides);
 int partNumber = 0;
 for (Payload payload : slicer.slice(blob.getPayload(), partSize)) {
   BlobUploader b =
      new BlobUploader(mpu, partNumber++, payload);
   parts.add(executor.submit(b));
 }
 return completeMultipartUpload(mpu, Futures.getUnchecked(Futures.allAsList(parts)));
}
origin: org.apache.jclouds/jclouds-blobstore

long contentLength = blob.getMetadata().getContentMetadata().getContentLength();
MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm(
   getMinimumMultipartPartSize(), getMaximumMultipartPartSize(), getMaximumNumberOfParts());
long partSize = algorithm.calculateChunkSize(contentLength);
int partNumber = 1;
while (partNumber <= algorithm.getParts()) {
 Payload payload = slicer.slice(blob.getPayload(), algorithm.getCopied(), partSize);
 BlobUploader b =
    new BlobUploader(mpu, partNumber++, payload);
 parts.add(executor.submit(b));
 algorithm.addCopied(partSize);
if (algorithm.getRemaining() != 0) {
 Payload payload = slicer.slice(blob.getPayload(), algorithm.getCopied(), algorithm.getRemaining());
 BlobUploader b =
    new BlobUploader(mpu, partNumber, payload);
origin: apache/jclouds

/**
* Phase 2 of the algorithm.
* The number of parts does not grow from {@code MultipartUploadSlicingAlgorithm.MAGNITUDE_BASE}
* until we reach the {@code MultipartUploadStrategy.MAX_PART_SIZE}.
*/
@Test
public void testWhenPartsHasToStartGrowingFromMagnitudeBase() {
 MultipartUploadSlicingAlgorithm strategy = new MultipartUploadSlicingAlgorithm(
    MIN_PART_SIZE, MAX_PART_SIZE, MAX_NUMBER_OF_PARTS);
 // upper limit while we still have exactly MAGNITUDE_BASE parts (together with the remaining)
 long length = MAX_PART_SIZE * MultipartUploadSlicingAlgorithm.DEFAULT_MAGNITUDE_BASE;
 long chunkSize = strategy.calculateChunkSize(length);
 assertEquals(chunkSize, MAX_PART_SIZE);
 assertEquals(strategy.getParts(), MultipartUploadSlicingAlgorithm.DEFAULT_MAGNITUDE_BASE - 1);
 assertEquals(strategy.getRemaining(), MAX_PART_SIZE);
 assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
 // then the number of parts is increasing
 length += 1;
 chunkSize = strategy.calculateChunkSize(length);
 assertEquals(chunkSize, MAX_PART_SIZE);
 assertEquals(strategy.getParts(), MultipartUploadSlicingAlgorithm.DEFAULT_MAGNITUDE_BASE);
 assertEquals(strategy.getRemaining(), 1);
 assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
}
origin: Nextdoor/bender

@Beta
protected String putMultipartBlob(String container, Blob blob, PutOptions overrides, ListeningExecutorService executor) {
 ArrayList<ListenableFuture<MultipartPart>> parts = new ArrayList<ListenableFuture<MultipartPart>>();
 long contentLength = checkNotNull(blob.getMetadata().getContentMetadata().getContentLength(),
    "must provide content-length to use multi-part upload");
 MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm(
    getMinimumMultipartPartSize(), getMaximumMultipartPartSize(), getMaximumNumberOfParts());
 long partSize = algorithm.calculateChunkSize(contentLength);
 MultipartUpload mpu = initiateMultipartUpload(container, blob.getMetadata(), partSize, overrides);
 int partNumber = 0;
 for (Payload payload : slicer.slice(blob.getPayload(), partSize)) {
   BlobUploader b =
      new BlobUploader(mpu, partNumber++, payload);
   parts.add(executor.submit(b));
 }
 return completeMultipartUpload(mpu, Futures.getUnchecked(Futures.allAsList(parts)));
}
origin: apache/jclouds

long contentLength = blob.getMetadata().getContentMetadata().getContentLength();
MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm(
   getMinimumMultipartPartSize(), getMaximumMultipartPartSize(), getMaximumNumberOfParts());
long partSize = algorithm.calculateChunkSize(contentLength);
int partNumber = 1;
while (partNumber <= algorithm.getParts()) {
 Payload slice = slicer.slice(payload, algorithm.getCopied(), partSize);
 BlobUploader b = new BlobUploader(mpu, partNumber++, slice);
 parts.add(repeatable ? executor.submit(b) : Futures.immediateFuture(b.call()));
 algorithm.addCopied(partSize);
if (algorithm.getRemaining() != 0) {
 Payload slice = slicer.slice(payload, algorithm.getCopied(), algorithm.getRemaining());
 BlobUploader b = new BlobUploader(mpu, partNumber, slice);
 parts.add(repeatable ? executor.submit(b) : Futures.immediateFuture(b.call()));
origin: apache/jclouds

  /**
  * Phase 3 of the algorithm.
  * The number of parts are increasing until {@code MAX_NUMBER_OF_PARTS}
  * while its size does not exceeds the {@code MultipartUploadStrategy.MAX_PART_SIZE}.
  */
  @Test
  public void testWhenPartsExceedsMaxNumberOfParts() {
   MultipartUploadSlicingAlgorithm strategy = new MultipartUploadSlicingAlgorithm(
      MIN_PART_SIZE, MAX_PART_SIZE, MAX_NUMBER_OF_PARTS);
   // upper limit while we still have exactly MAX_NUMBER_OF_PARTS parts (together with the remaining)
   long length = MAX_PART_SIZE * MAX_NUMBER_OF_PARTS;
   long chunkSize = strategy.calculateChunkSize(length);
   assertEquals(chunkSize, MAX_PART_SIZE);
   assertEquals(strategy.getParts(), MAX_NUMBER_OF_PARTS - 1);
   assertEquals(strategy.getRemaining(), MAX_PART_SIZE);
   assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);

   // then the number of parts is increasing
   length += 1;
   chunkSize = strategy.calculateChunkSize(length);
   assertEquals(chunkSize, MAX_PART_SIZE);
   assertEquals(strategy.getParts(), MAX_NUMBER_OF_PARTS);
   assertEquals(strategy.getRemaining(), 1);
   assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
  }
}
origin: apache/jclouds

@Beta
protected String putMultipartBlob(String container, Blob blob, PutOptions overrides, ListeningExecutorService executor) {
 ArrayList<ListenableFuture<MultipartPart>> parts = new ArrayList<ListenableFuture<MultipartPart>>();
 long contentLength = checkNotNull(blob.getMetadata().getContentMetadata().getContentLength(),
    "must provide content-length to use multi-part upload");
 MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm(
    getMinimumMultipartPartSize(), getMaximumMultipartPartSize(), getMaximumNumberOfParts());
 long partSize = algorithm.calculateChunkSize(contentLength);
 MultipartUpload mpu = initiateMultipartUpload(container, blob.getMetadata(), partSize, overrides);
 int partNumber = 0;
 for (Payload payload : slicer.slice(blob.getPayload(), partSize)) {
   BlobUploader b =
      new BlobUploader(mpu, partNumber++, payload);
   parts.add(executor.submit(b));
 }
 return completeMultipartUpload(mpu, Futures.getUnchecked(Futures.allAsList(parts)));
}
origin: Nextdoor/bender

protected long getNextChunkOffset() {
 long next = chunkOffset;
 chunkOffset += getChunkSize();
 return next;
}
origin: apache/jclouds

MultipartUploadSlicingAlgorithm strategy = new MultipartUploadSlicingAlgorithm(
   MIN_PART_SIZE, MAX_PART_SIZE, MAX_NUMBER_OF_PARTS);
long chunkSize = strategy.calculateChunkSize(length);
assertEquals(chunkSize, MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE);
assertEquals(strategy.getParts(), 0);
assertEquals(strategy.getRemaining(), length);
assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
chunkSize = strategy.calculateChunkSize(length);
assertEquals(chunkSize, MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE);
assertEquals(strategy.getParts(), 0);
assertEquals(strategy.getRemaining(), length);
assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
chunkSize = strategy.calculateChunkSize(length);
assertEquals(chunkSize, MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE);
assertEquals(strategy.getParts(), 1);
assertEquals(strategy.getRemaining(), 1);
assertEquals(chunkSize * strategy.getParts() + strategy.getRemaining(), length);
origin: com.amysta.jclouds/jclouds-blobstore

@Beta
protected String putMultipartBlob(String container, Blob blob, PutOptions overrides, ListeningExecutorService executor) {
 ArrayList<ListenableFuture<MultipartPart>> parts = new ArrayList<ListenableFuture<MultipartPart>>();
 MultipartUpload mpu = initiateMultipartUpload(container, blob.getMetadata(), overrides);
 try {
   long contentLength = blob.getMetadata().getContentMetadata().getContentLength();
   MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm(
      getMinimumMultipartPartSize(), getMaximumMultipartPartSize(), getMaximumNumberOfParts());
   long partSize = algorithm.calculateChunkSize(contentLength);
   int partNumber = 1;
   for (Payload payload : slicer.slice(blob.getPayload(), partSize)) {
    BlobUploader b =
       new BlobUploader(mpu, partNumber++, payload);
    parts.add(executor.submit(b));
   }
   return completeMultipartUpload(mpu, Futures.getUnchecked(Futures.allAsList(parts)));
 } catch (RuntimeException re) {
   abortMultipartUpload(mpu);
   throw re;
 }
}
origin: apache/jclouds

protected long getNextChunkOffset() {
 long next = chunkOffset;
 chunkOffset += getChunkSize();
 return next;
}
origin: Nextdoor/bender

@Beta
protected String putMultipartBlob(String container, Blob blob, PutOptions overrides, ListeningExecutorService executor) {
 ArrayList<ListenableFuture<MultipartPart>> parts = new ArrayList<ListenableFuture<MultipartPart>>();
 MultipartUpload mpu = initiateMultipartUpload(container, blob.getMetadata(), overrides);
 try {
   long contentLength = blob.getMetadata().getContentMetadata().getContentLength();
   MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm(
      getMinimumMultipartPartSize(), getMaximumMultipartPartSize(), getMaximumNumberOfParts());
   long partSize = algorithm.calculateChunkSize(contentLength);
   int partNumber = 1;
   for (Payload payload : slicer.slice(blob.getPayload(), partSize)) {
    BlobUploader b =
       new BlobUploader(mpu, partNumber++, payload);
    parts.add(executor.submit(b));
   }
   return completeMultipartUpload(mpu, Futures.getUnchecked(Futures.allAsList(parts)));
 } catch (RuntimeException re) {
   abortMultipartUpload(mpu);
   throw re;
 }
}
origin: com.amysta.jclouds/jclouds-blobstore

protected long getNextChunkOffset() {
 long next = chunkOffset;
 chunkOffset += getChunkSize();
 return next;
}
origin: apache/jclouds

@Test(groups = { "integration", "live" })
public void testPutMultipartByteSource() throws Exception {
 long length = Math.max(getMinimumMultipartBlobSize(), MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE + 1);
 BlobStore blobStore = view.getBlobStore();
 MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm(
     blobStore.getMinimumMultipartPartSize(), blobStore.getMaximumMultipartPartSize(),
     blobStore.getMaximumNumberOfParts());
 // make sure that we are creating multiple parts
 assertThat(algorithm.calculateChunkSize(length)).isLessThan(length);
 ByteSource byteSource = TestUtils.randomByteSource().slice(0, length);
 Payload payload = new ByteSourcePayload(byteSource);
 HashCode hashCode = byteSource.hash(Hashing.md5());
 testPut(payload, hashCode, payload, length, new PutOptions().multipart(true));
}
origin: apache/jclouds

@Test(groups = { "integration", "live" })
public void testPutMultipartInputStream() throws Exception {
 long length = Math.max(getMinimumMultipartBlobSize(), MultipartUploadSlicingAlgorithm.DEFAULT_PART_SIZE + 1);
 BlobStore blobStore = view.getBlobStore();
 MultipartUploadSlicingAlgorithm algorithm = new MultipartUploadSlicingAlgorithm(
     blobStore.getMinimumMultipartPartSize(), blobStore.getMaximumMultipartPartSize(),
     blobStore.getMaximumNumberOfParts());
 // make sure that we are creating multiple parts
 assertThat(algorithm.calculateChunkSize(length)).isLessThan(length);
 ByteSource byteSource = TestUtils.randomByteSource().slice(0, length);
 Payload payload = new InputStreamPayload(byteSource.openStream());
 testPut(payload, null, new ByteSourcePayload(byteSource), length, new PutOptions().multipart(true));
}
org.jclouds.blobstore.strategy.internalMultipartUploadSlicingAlgorithm

Most used methods

  • <init>
  • calculateChunkSize
  • getChunkSize
  • getParts
  • getRemaining
  • addCopied
  • getCopied

Popular in Java

  • Making http post requests using okhttp
  • getApplicationContext (Context)
  • scheduleAtFixedRate (ScheduledExecutorService)
  • compareTo (BigDecimal)
  • ObjectMapper (com.fasterxml.jackson.databind)
    ObjectMapper provides functionality for reading and writing JSON, either to and from basic POJOs (Pl
  • BufferedReader (java.io)
    Wraps an existing Reader and buffers the input. Expensive interaction with the underlying reader is
  • Charset (java.nio.charset)
    A charset is a named mapping between Unicode characters and byte sequences. Every Charset can decode
  • SecureRandom (java.security)
    This class generates cryptographically secure pseudo-random numbers. It is best to invoke SecureRand
  • BoxLayout (javax.swing)
  • IsNull (org.hamcrest.core)
    Is the value null?
  • Top PhpStorm plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now