@Override public BulkCommand getCommand(String commandId) { KeyValueStore keyValueStore = getKvStore(); byte[] statusAsBytes = keyValueStore.get(COMMAND_PREFIX + commandId); if (statusAsBytes == null) { return null; } return BulkCodecs.getCommandCodec().decode(statusAsBytes); }
@Override public BulkStatus getStatus(String commandId) { KeyValueStore keyValueStore = getKvStore(); byte[] statusAsBytes = keyValueStore.get(STATUS_PREFIX + commandId); if (statusAsBytes == null) { log.debug("Request status of unknown command: {}", commandId); return BulkStatus.unknownOf(commandId); } return BulkCodecs.getStatusCodec().decode(statusAsBytes); }
@Override public void processRecord(ComputationContext context, String documentIdsStreamName, Record record) { Codec<DataBucket> codec = BulkCodecs.getDataBucketCodec(); DataBucket in = codec.decode(record.getData()); String commandId = in.getCommandId(); long nbDocuments = in.getCount(); appendToFile(commandId, in.getData()); if (counters.containsKey(commandId)) { counters.put(commandId, nbDocuments + counters.get(commandId)); } else { counters.put(commandId, nbDocuments); } lastBuckets.put(commandId, in); if (counters.get(commandId) < getTotal(commandId)) { return; } finishBlob(context, commandId); }
@Override public LogRecord<M> read(Duration timeout) throws InterruptedException { if (closed) { throw new IllegalStateException("The tailer has been closed."); } if (records.isEmpty()) { int items = poll(timeout); if (isRebalanced) { isRebalanced = false; log.debug("Rebalance happens during poll, raising exception"); throw new RebalanceException("Partitions has been rebalanced"); } if (items == 0) { if (log.isTraceEnabled()) { log.trace("No data " + id + " after " + timeout.toMillis() + " ms"); } return null; } } ConsumerRecord<String, Bytes> record = records.poll(); lastOffsets.put(new TopicPartition(record.topic(), record.partition()), record.offset()); M value = decodeCodec.decode(record.value().get()); LogPartition partition = LogPartition.of(ns.getLogName(record.topic()), record.partition()); LogOffset offset = new LogOffsetImpl(partition, record.offset()); consumerMoved = false; if (log.isDebugEnabled()) { log.debug(String.format("Read from %s/%s, key: %s, value: %s", offset, group, record.key(), value)); } return new LogRecord<>(value, offset); }
@SuppressWarnings("unchecked") protected LogRecord<M> read() { if (closed) { throw new IllegalStateException("The tailer has been closed."); } List<M> value = new ArrayList<>(1); long offset = cqTailer.index(); if (NO_CODEC.equals(codec)) { // default format to keep backward compatibility try { if (!cqTailer.readDocument(w -> value.add((M) w.read(MSG_KEY).object()))) { return null; } } catch (ClassCastException e) { throw new IllegalArgumentException(e); } } else { if (!cqTailer.readDocument(w -> value.add(codec.decode(w.read().bytes())))) { return null; } } return new LogRecord<>(value.get(0), new LogOffsetImpl(partition, offset)); }
@Override public void processRecord(ComputationContext context, String inputStream, Record record) { BulkStatus status = codec.decode(record.getData()); if (IndexAction.ACTION_NAME.equals(status.getAction()) && BulkStatus.State.COMPLETED.equals(status.getState())) { logIndexing(status); BulkService bulkService = Framework.getService(BulkService.class); BulkCommand command = bulkService.getCommand(status.getId()); refreshIndexIfNeeded(command); updateAliasIfNeeded(command); } context.askForCheckpoint(); }
@Override public void processRecord(ComputationContext context, String inputStreamName, Record record) { Codec<DataBucket> codec = BulkCodecs.getDataBucketCodec(); DataBucket in = codec.decode(record.getData());
@Override public void processRecord(ComputationContext context, String inputStream, Record record) { DataBucket in = codec.decode(record.getData()); if (in.getCount() > 0) { BulkRequest bulkRequest = decodeRequest(in); for (DocWriteRequest request : bulkRequest.requests()) { bulkProcessor.add(request); } BulkStatus delta = BulkStatus.deltaOf(in.getCommandId()); delta.setProcessed(in.getCount()); AbstractBulkComputation.updateStatus(context, delta); } updates = true; }
@Override public void processRecord(ComputationContext context, String inputStreamName, Record record) { BulkBucket bucket = BulkCodecs.getBucketCodec().decode(record.getData()); command = getCommand(bucket.getCommandId()); if (command != null) { delta = BulkStatus.deltaOf(command.getId()); delta.setProcessingStartTime(Instant.now()); delta.setProcessed(bucket.getIds().size()); startBucket(record.getKey()); for (List<String> batch : Lists.partition(bucket.getIds(), command.getBatchSize())) { processBatchOfDocuments(batch); } delta.setProcessingEndTime(Instant.now()); endBucket(context, delta); context.askForCheckpoint(); } else { if (isAbortedCommand(bucket.getCommandId())) { log.debug("Skipping aborted command: {}", bucket.getCommandId()); context.askForCheckpoint(); } else { // this requires a manual intervention, the kv store might have been lost log.error("Stopping processing, unknown command: {}, offset: {}, record: {}.", bucket.getCommandId(), context.getLastOffset(), record); context.askForTermination(); } } }
protected void processRecord(ComputationContext context, Record record) { BulkCommand command = null; try { command = BulkCodecs.getCommandCodec().decode(record.getData()); String commandId = command.getId(); int bucketSize = command.getBucketSize() > 0 ? command.getBucketSize()
@Override public void processRecord(ComputationContext context, String documentIdsStreamName, Record record) { Codec<DataBucket> codec = BulkCodecs.getDataBucketCodec(); DataBucket in = codec.decode(record.getData()); String commandId = in.getCommandId(); long documents = in.getCount(); String storeName = Framework.getService(BulkService.class).getStatus(commandId).getAction(); Blob blob = getBlob(in.getDataAsString(), storeName); // store it in download transient store TransientStore store = Framework.getService(TransientStoreService.class) .getStore(DownloadService.TRANSIENT_STORE_STORE_NAME); store.putBlobs(commandId, Collections.singletonList(blob)); store.setCompleted(commandId, true); // update the command status BulkStatus delta = BulkStatus.deltaOf(commandId); delta.setProcessed(documents); String url = Framework.getService(DownloadService.class).getDownloadUrl(commandId); Map<String, Serializable> result = Collections.singletonMap("url", url); delta.setResult(result); AbstractBulkComputation.updateStatus(context, delta); context.askForCheckpoint(); }
@Override public void processRecord(ComputationContext context, String inputStreamName, Record record) { Codec<DataBucket> codec = BulkCodecs.getDataBucketCodec(); DataBucket in = codec.decode(record.getData()); String storeName = Framework.getService(BulkService.class).getStatus(in.getCommandId()).getAction(); Blob blob = getBlob(in.getDataAsString(), storeName); try { blob = BlobUtils.zip(blob, blob.getFilename() + ".zip"); } catch (IOException e) { log.error("Unable to zip blob", e); } storeBlob(blob, in.getCommandId(), storeName); DataBucket out = new DataBucket(in.getCommandId(), in.getCount(), getTransientStoreKey(in.getCommandId())); context.produceRecord(OUTPUT_1, Record.of(in.getCommandId(), codec.encode(out))); context.askForCheckpoint(); }
@Override public void processRecord(ComputationContext context, String inputStreamName, Record record) { Codec<BulkStatus> codec = BulkCodecs.getStatusCodec(); BulkStatus recordStatus = codec.decode(record.getData()); BulkServiceImpl bulkService = (BulkServiceImpl) Framework.getService(BulkService.class); BulkStatus status; if (!recordStatus.isDelta()) { status = recordStatus; } else { status = bulkService.getStatus(recordStatus.getId()); if (UNKNOWN.equals(status.getState())) { // this requires a manual intervention, the kv store might have been lost log.error("Stopping processing, unknown status for command: {}, offset: {}, record: {}.", recordStatus.getId(), context.getLastOffset(), record); context.askForTermination(); return; } status.merge(recordStatus); } byte[] statusAsBytes = bulkService.setStatus(status); if (status.getState() == COMPLETED || recordStatus.getState() == ABORTED) { context.produceRecord(OUTPUT_1, status.getId(), statusAsBytes); } context.askForCheckpoint(); } }