public void uploadPages(final InputStream sourceStream, final long offset, final long length, BlobRequestOptions options, OperationContext opContext) throws StorageException, IOException { ((CloudPageBlob) getBlob()).uploadPages(sourceStream, offset, length, null, options, opContext); }
private void writePages(ByteArrayInputStream pageData, long offset, long writeLength) { final CloudPageBlob blobRef = (CloudPageBlob) this.parentBlobRef; try { blobRef.uploadPages(pageData, offset, writeLength, this.accessCondition, this.options, this.opContext); } catch (final IOException e) { this.lastError = e; } catch (final StorageException e) { this.lastError = Utility.initIOException(e); } }
public void uploadPages(final InputStream sourceStream, final long offset, final long length, BlobRequestOptions options, OperationContext opContext) throws StorageException, IOException { ((CloudPageBlob) getBlob()).uploadPages(sourceStream, offset, length, null, options, opContext); }
private void writePages(ByteArrayInputStream pageData, long offset, long writeLength) { final CloudPageBlob blobRef = (CloudPageBlob) this.parentBlobRef; try { blobRef.uploadPages(pageData, offset, writeLength, this.accessCondition, this.options, this.opContext); } catch (final IOException e) { this.lastError = e; } catch (final StorageException e) { this.lastError = Utility.initIOException(e); } }
/** * Uploads a range of contiguous pages, up to 4 MB in size, at the specified offset in the page blob. * * @param sourceStream * An {@link InputStream} object which represents the input stream to write to the page blob. * @param offset * A <code>long</code> which represents the offset, in number of bytes, at which to begin writing the * data. This value must be a multiple of 512. * @param length * A <code>long</code> which represents the length, in bytes, of the data to write. This value must be a * multiple of 512. * * @throws IllegalArgumentException * If the offset or length are not multiples of 512, or if the length is greater than 4 MB. * @throws IOException * If an I/O exception occurred. * @throws StorageException * If a storage service error occurred. */ @DoesServiceRequest public void uploadPages(final InputStream sourceStream, final long offset, final long length) throws StorageException, IOException { this.uploadPages(sourceStream, offset, length, null /* accessCondition */, null /* options */, null /* opContext */); }
/** * Uploads a range of contiguous pages, up to 4 MB in size, at the specified offset in the page blob. * * @param sourceStream * An {@link InputStream} object which represents the input stream to write to the page blob. * @param offset * A <code>long</code> which represents the offset, in number of bytes, at which to begin writing the * data. This value must be a multiple of 512. * @param length * A <code>long</code> which represents the length, in bytes, of the data to write. This value must be a * multiple of 512. * * @throws IllegalArgumentException * If the offset or length are not multiples of 512, or if the length is greater than 4 MB. * @throws IOException * If an I/O exception occurred. * @throws StorageException * If a storage service error occurred. */ @DoesServiceRequest public void uploadPages(final InputStream sourceStream, final long offset, final long length) throws StorageException, IOException { this.uploadPages(sourceStream, offset, length, null /* accessCondition */, null /* options */, null /* opContext */); }
/** * Writes the list of live processors in the system to the blob. * Write is successful only if the lease ID passed is valid and the processor holds the lease. * Called only by the leader. * @param processors List of live processors to be published on the blob. * @param leaseId LeaseID of the valid lease that the processor holds on the blob. Null if there is no lease. * @return true if write to the blob is successful, false if leaseID is null or an Azure storage service error or IO exception occurred. */ public boolean publishLiveProcessorList(List<String> processors, String leaseId) { try { if (leaseId == null) { return false; } byte[] data = SamzaObjectMapper.getObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsBytes(processors); byte[] pageData = Arrays.copyOf(data, (int) BARRIER_STATE_BLOCK_SIZE); InputStream is = new ByteArrayInputStream(pageData); blob.uploadPages(is, JOB_MODEL_BLOCK_SIZE + BARRIER_STATE_BLOCK_SIZE, PROCESSOR_LIST_BLOCK_SIZE, AccessCondition.generateLeaseCondition(leaseId), null, null); LOG.info("Uploaded list of live processors to blob."); return true; } catch (StorageException | IOException e) { LOG.error("Processor list: " + processors + "publish failed", e); return false; } }
/** * Writes the barrier state to the blob. * Write is successful only if the lease ID passed is valid and the processor holds the lease. * Called only by the leader. * @param state Barrier state to be published to the blob. * @param leaseId LeaseID of the valid lease that the processor holds on the blob. Null if there is no lease. * @return true if write to the blob is successful, false if leaseID is null or an Azure storage service error or IO exception occurred. */ public boolean publishBarrierState(String state, String leaseId) { try { if (leaseId == null) { return false; } byte[] data = SamzaObjectMapper.getObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsBytes(state); byte[] pageData = Arrays.copyOf(data, (int) BARRIER_STATE_BLOCK_SIZE); InputStream is = new ByteArrayInputStream(pageData); //uploadPages is only successful when the AccessCondition provided has an active and valid lease ID. It fails otherwise. blob.uploadPages(is, JOB_MODEL_BLOCK_SIZE, BARRIER_STATE_BLOCK_SIZE, AccessCondition.generateLeaseCondition(leaseId), null, null); LOG.info("Uploaded barrier state {} to blob", state); return true; } catch (StorageException | IOException e) { LOG.error("Barrier state " + state + " publish failed", e); return false; } }
/** * Writes the job model to the blob. * Write is successful only if the lease ID passed is valid and the processor holds the lease. * Called by the leader. * @param prevJM Previous job model version that the processor was operating on. * @param currJM Current job model version that the processor is operating on. * @param prevJMV Previous job model version that the processor was operating on. * @param currJMV Current job model version that the processor is operating on. * @param leaseId LeaseID of the lease that the processor holds on the blob. Null if there is no lease. * @return true if write to the blob is successful, false if leaseID is null or an Azure storage service error or IO exception occurred. */ public boolean publishJobModel(JobModel prevJM, JobModel currJM, String prevJMV, String currJMV, String leaseId) { try { if (leaseId == null) { return false; } JobModelBundle bundle = new JobModelBundle(prevJM, currJM, prevJMV, currJMV); byte[] data = SamzaObjectMapper.getObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsBytes(bundle); byte[] pageData = Arrays.copyOf(data, (int) JOB_MODEL_BLOCK_SIZE); InputStream is = new ByteArrayInputStream(pageData); blob.uploadPages(is, 0, JOB_MODEL_BLOCK_SIZE, AccessCondition.generateLeaseCondition(leaseId), null, null); LOG.info("Uploaded {} jobModel to blob", bundle.getCurrJobModel()); return true; } catch (StorageException | IOException e) { LOG.error("JobModel publish failed for version = " + currJMV, e); return false; } }
private CloudPageBlob setUpPageRanges() throws StorageException, URISyntaxException, IOException { int blobLengthToUse = 8 * 512; byte[] buffer = BlobTestHelper.getRandomBuffer(blobLengthToUse); String blobName = BlobTestHelper.generateRandomBlobNameWithPrefix("testblob"); final CloudPageBlob blobRef = this.container.getPageBlobReference(blobName); blobRef.create(blobLengthToUse); // Upload page 0 ByteArrayInputStream inputStream = new ByteArrayInputStream(buffer); blobRef.uploadPages(inputStream, 0, 512); // Upload pages 2-4 inputStream = new ByteArrayInputStream(buffer, 512, 3 * 512); blobRef.uploadPages(inputStream, 2 * 512, 3 * 512); // Upload page 6 inputStream = new ByteArrayInputStream(buffer, 3 * 512, 512); blobRef.uploadPages(inputStream, 6 * 512, 512); // Page0: 512 bytes should be the first 512 bytes of the random buffer (page 0) // Page1: 512 bytes should be 0 // Page2-4: 3 * 512 bytes should be equal to bytes (512 -> 4 * 512) of the random buffer (pages 2-4) // Page5: 512 bytes should be 0 // Page6: 512 bytes should be the 4th 512 byte segmented of the random buffer // Page7-8: 2 * 512 bytes should be 0 return blobRef; }
private void doUpdatePageBlob(CloudPageBlob client, InputStream is, BlobServiceRequestOptions opts, Exchange exchange) throws Exception { Long blobOffset = getConfiguration().getBlobOffset(); Long blobDataLength = getConfiguration().getDataLength(); PageRange range = exchange.getIn().getHeader(BlobServiceConstants.PAGE_BLOB_RANGE, PageRange.class); if (range != null) { blobOffset = range.getStartOffset(); blobDataLength = range.getEndOffset() - range.getStartOffset(); } if (blobDataLength == null) { blobDataLength = (long)is.available(); } try { client.uploadPages(is, blobOffset, blobDataLength, opts.getAccessCond(), opts.getRequestOpts(), opts.getOpContext()); } finally { closeInputStreamIfNeeded(is); } }
@Test public void testDownloadPageRangeDiffWithOffsetAndLength() throws StorageException, URISyntaxException, IOException { final CloudPageBlob blobRef = setUpPageRanges(); final CloudPageBlob snapshot = (CloudPageBlob) blobRef.createSnapshot(); // Add page 1 InputStream inputStream = new ByteArrayInputStream(BlobTestHelper.getRandomBuffer(512)); blobRef.uploadPages(inputStream, 0, 512); List<PageRangeDiff> actualPageRanges = blobRef.downloadPageRangesDiff(snapshot.getSnapshotID(), (long) 0, (long) 5 * 512, null, null, null); List<PageRangeDiff> expectedPageRanges = new ArrayList<PageRangeDiff>(); expectedPageRanges.add(new PageRangeDiff(0, 512 - 1, false)); assertEquals(expectedPageRanges.size(), actualPageRanges.size()); for (int i = 0; i < expectedPageRanges.size(); i++) { assertEquals(expectedPageRanges.get(i).getStartOffset(), actualPageRanges.get(i).getStartOffset()); assertEquals(expectedPageRanges.get(i).getEndOffset(), actualPageRanges.get(i).getEndOffset()); assertEquals(expectedPageRanges.get(i).isCleared(), actualPageRanges.get(i).isCleared()); } }
@Test public void testDownloadPageRangeDiff() throws StorageException, URISyntaxException, IOException { final CloudPageBlob blobRef = setUpPageRanges(); final CloudPageBlob snapshot = (CloudPageBlob) blobRef.createSnapshot(); // Add page 1 InputStream inputStream = new ByteArrayInputStream(BlobTestHelper.getRandomBuffer(512)); inputStream = new ByteArrayInputStream(BlobTestHelper.getRandomBuffer(512)); blobRef.uploadPages(inputStream, 0, 512); // Clear page 6 blobRef.clearPages(6 * 512, 512); List<PageRangeDiff> actualPageRanges = blobRef.downloadPageRangesDiff(snapshot.getSnapshotID()); List<PageRangeDiff> expectedPageRanges = new ArrayList<PageRangeDiff>(); expectedPageRanges.add(new PageRangeDiff(0, 512 - 1, false)); expectedPageRanges.add(new PageRangeDiff(6 * 512, 7 * 512 - 1, true)); assertEquals(expectedPageRanges.size(), actualPageRanges.size()); for (int i = 0; i < expectedPageRanges.size(); i++) { assertEquals(expectedPageRanges.get(i).getStartOffset(), actualPageRanges.get(i).getStartOffset()); assertEquals(expectedPageRanges.get(i).getEndOffset(), actualPageRanges.get(i).getEndOffset()); assertEquals(expectedPageRanges.get(i).isCleared(), actualPageRanges.get(i).isCleared()); } }
blobRef.uploadPages(inputStream, 0, 512); assertNotNull(blobRef.getProperties().getPageBlobSequenceNumber()); blobRef.uploadPages(inputStream, 2 * 512, 3 * 512); assertNotNull(blobRef.getProperties().getPageBlobSequenceNumber()); blobRef.uploadPages(inputStream, 0, 256); fail("Did not throw expected exception on non-512-byte-aligned length"); blobRef.uploadPages(inputStream, 3 * 256, 3 * 512); fail("Did not throw expected exception on non-512-byte-aligned offset");
@Test public void testEightTBBlob() throws StorageException, URISyntaxException, IOException { CloudPageBlob blob = this.container.getPageBlobReference("blob1"); CloudPageBlob blob2 = this.container.getPageBlobReference("blob1"); long eightTb = 8L * 1024L * 1024L * 1024L * 1024L; blob.create(eightTb); assertEquals(eightTb, blob.getProperties().getLength()); blob2.downloadAttributes(); assertEquals(eightTb, blob2.getProperties().getLength()); for (ListBlobItem listBlob : this.container.listBlobs()) { CloudPageBlob listPageBlob = (CloudPageBlob)listBlob; assertEquals(eightTb, listPageBlob.getProperties().getLength()); } CloudPageBlob blob3 = this.container.getPageBlobReference("blob3"); blob3.create(1024); blob3.resize(eightTb); final Random randGenerator = new Random(); final byte[] buffer = new byte[1024]; randGenerator.nextBytes(buffer); blob.uploadPages(new ByteArrayInputStream(buffer), eightTb - 512L, 512L); ArrayList<PageRange> ranges = blob.downloadPageRanges(); assertEquals(1, ranges.size()); assertEquals(eightTb - 512L, ranges.get(0).getStartOffset()); assertEquals(eightTb - 1L, ranges.get(0).getEndOffset()); }
blobRef.uploadPages(inputStream, 0, blobLengthToUse); assertNotNull(blobRef.getProperties().getPageBlobSequenceNumber());