private DefaultRpcBatch(Storage storage) { this.storage = storage; batches = new LinkedList<>(); // add OpenCensus HttpRequestInitializer batches.add(storage.batch(batchRequestInitializer)); }
@Override public void addDelete( StorageObject storageObject, RpcBatch.Callback<Void> callback, Map<Option, ?> options) { try { if (currentBatchSize == MAX_BATCH_SIZE) { batches.add(storage.batch()); currentBatchSize = 0; } deleteCall(storageObject, options).queue(batches.getLast(), toJsonCallback(callback)); currentBatchSize++; } catch (IOException ex) { throw translate(ex); } }
@Override public void addGet( StorageObject storageObject, RpcBatch.Callback<StorageObject> callback, Map<Option, ?> options) { try { if (currentBatchSize == MAX_BATCH_SIZE) { batches.add(storage.batch()); currentBatchSize = 0; } getCall(storageObject, options).queue(batches.getLast(), toJsonCallback(callback)); currentBatchSize++; } catch (IOException ex) { throw translate(ex); } }
@Override public void addPatch( StorageObject storageObject, RpcBatch.Callback<StorageObject> callback, Map<Option, ?> options) { try { if (currentBatchSize == MAX_BATCH_SIZE) { batches.add(storage.batch()); currentBatchSize = 0; } patchCall(storageObject, options).queue(batches.getLast(), toJsonCallback(callback)); currentBatchSize++; } catch (IOException ex) { throw translate(ex); } }
private BatchRequest createBatchRequest() { return storageClient.batch(httpRequestInitializer); }
private DefaultRpcBatch(Storage storage) { this.storage = storage; batches = new LinkedList<>(); // add OpenCensus HttpRequestInitializer batches.add(storage.batch(batchRequestInitializer)); }
@Override public void addDelete( StorageObject storageObject, RpcBatch.Callback<Void> callback, Map<Option, ?> options) { try { if (currentBatchSize == MAX_BATCH_SIZE) { batches.add(storage.batch()); currentBatchSize = 0; } deleteCall(storageObject, options).queue(batches.getLast(), toJsonCallback(callback)); currentBatchSize++; } catch (IOException ex) { throw translate(ex); } }
@Override public void addGet( StorageObject storageObject, RpcBatch.Callback<StorageObject> callback, Map<Option, ?> options) { try { if (currentBatchSize == MAX_BATCH_SIZE) { batches.add(storage.batch()); currentBatchSize = 0; } getCall(storageObject, options).queue(batches.getLast(), toJsonCallback(callback)); currentBatchSize++; } catch (IOException ex) { throw translate(ex); } }
@Override public void addPatch( StorageObject storageObject, RpcBatch.Callback<StorageObject> callback, Map<Option, ?> options) { try { if (currentBatchSize == MAX_BATCH_SIZE) { batches.add(storage.batch()); currentBatchSize = 0; } patchCall(storageObject, options).queue(batches.getLast(), toJsonCallback(callback)); currentBatchSize++; } catch (IOException ex) { throw translate(ex); } }
private void flushPendingRequests() throws IOException { if (pendingRequests.isEmpty()) { return; } BatchRequest batch = gcs.batch(requestInitializer); while (batch.size() < maxRequestsPerBatch && !pendingRequests.isEmpty()) { // enqueue request at head pendingRequests.remove().enqueue(batch); } responseFutures.add( requestsExecutor.submit( () -> { batch.execute(); return null; })); }
// Create the Storage service object Storage storage = new Storage(httpTransport, jsonFactory, credential); // Create a new batch request BatchRequest batch = storage.batch(); // Add some requests to the batch request storage.objectAccessControls().insert("bucket-name", "object-key1", new ObjectAccessControl().setEntity("user-123423423").setRole("READER")) .queue(batch, callback); storage.objectAccessControls().insert("bucket-name", "object-key2", new ObjectAccessControl().setEntity("user-guy@example.com").setRole("READER")) .queue(batch, callback); storage.objectAccessControls().insert("bucket-name", "object-key3", new ObjectAccessControl().setEntity("group-foo@googlegroups.com").setRole("OWNER")) .queue(batch, callback); // Execute the batch request. The individual callbacks will be called when requests finish. batch.execute();
private BatchResponse batchChunk(BatchRequest request) { com.google.api.client.googleapis.batch.BatchRequest batch = storage.batch(); final Map<StorageObject, Tuple<Boolean, StorageException>> deletes = Maps.newConcurrentMap();