public FederatedOperationChain(final Operation... operations) { this(new OperationChain(operations)); }
public FederatedOperationChain() { this(new OperationChain()); }
public Builder() { super(new FederatedOperationChain<>(new OperationChain())); }
private List<Operation> migrateOperation(final Operation op) { final OperationView opView = OperationView.class.cast(op); final Map<String, ViewMigration> migratedEntities = migrateViewElements(entities, opView.getView()::getEntity); final Map<String, ViewMigration> migratedEdges = migrateViewElements(edges, opView.getView()::getEdge); final View.Builder viewBuilder = new View.Builder().merge(opView.getView()); for (final Map.Entry<String, ViewMigration> entry : migratedEntities.entrySet()) { viewBuilder.entity(entry.getKey(), entry.getValue().buildViewElementDefinition()); } for (final Map.Entry<String, ViewMigration> entry : migratedEdges.entrySet()) { viewBuilder.edge(entry.getKey(), entry.getValue().buildViewElementDefinition()); } viewBuilder.config(ViewValidator.SKIP_VIEW_VALIDATION, TRUE); final View updatedView = viewBuilder.build(); LOGGER.debug("Migrated view: {}", updatedView); opView.setView(updatedView); final List<Operation> migrationOps = ViewMigration.createMigrationOps(aggregateAfter, migratedEdges.values(), migratedEntities.values()); if (LOGGER.isDebugEnabled()) { try { LOGGER.debug("Migrated operations: {}", StringUtil.toString(JSONSerialiser.serialise(new OperationChain<>(migrationOps), true))); } catch (final SerialisationException e) { LOGGER.debug("Failed to json serialise the migration operations: {}", new OperationChain<>(migrationOps)); } } return migrationOps; }
public void doOperation(final OP operation, final Context context, final AccumuloStore store) throws OperationException { final String outputPath = getOutputPath(operation); if (null == outputPath || outputPath.isEmpty()) { throw new OperationException("Option outputPath must be set for this option to be run against the accumulostore"); } final String failurePath = getFailurePath(operation); if (null == failurePath || failurePath.isEmpty()) { throw new OperationException("Option failurePath must be set for this option to be run against the accumulostore"); } prepareKeyValues(operation, new AccumuloKeyRangePartitioner(store)); final ImportAccumuloKeyValueFiles importAccumuloKeyValueFiles = new ImportAccumuloKeyValueFiles.Builder() .inputPath(outputPath) .failurePath(failurePath) .build(); store.execute(new OperationChain<>(importAccumuloKeyValueFiles), context); }
public void doOperation(final ImportRDDOfElements operation, final Context context, final AccumuloStore store) throws OperationException { final String outputPath = operation.getOption(OUTPUT_PATH); if (null == outputPath || outputPath.isEmpty()) { throw new OperationException("Option outputPath must be set for this option to be run against the accumulostore"); } final String failurePath = operation.getOption(FAILURE_PATH); if (null == failurePath || failurePath.isEmpty()) { throw new OperationException("Option failurePath must be set for this option to be run against the accumulostore"); } final ElementConverterFunction func = new ElementConverterFunction(SparkContextUtil.getSparkSession(context, store.getProperties()).sparkContext().broadcast(store.getKeyPackage().getKeyConverter(), ACCUMULO_ELEMENT_CONVERTER_CLASS_TAG)); final RDD<Tuple2<Key, Value>> rdd = operation.getInput().flatMap(func, TUPLE2_CLASS_TAG); final ImportKeyValuePairRDDToAccumulo op = new ImportKeyValuePairRDDToAccumulo.Builder() .input(rdd) .failurePath(failurePath) .outputPath(outputPath) .build(); store.execute(new OperationChain<>(op), context); } }
public void doOperation(final ImportJavaRDDOfElements operation, final Context context, final AccumuloStore store) throws OperationException { final String outputPath = operation.getOption(OUTPUT_PATH); if (null == outputPath || outputPath.isEmpty()) { throw new OperationException("Option outputPath must be set for this option to be run against the accumulostore"); } final String failurePath = operation.getOption(FAILURE_PATH); if (null == failurePath || failurePath.isEmpty()) { throw new OperationException("Option failurePath must be set for this option to be run against the accumulostore"); } final SparkContext sparkContext = SparkContextUtil.getSparkSession(context, store.getProperties()).sparkContext(); final Broadcast<AccumuloElementConverter> broadcast = JavaSparkContext.fromSparkContext(sparkContext).broadcast(store.getKeyPackage().getKeyConverter()); final ElementConverterFunction func = new ElementConverterFunction(broadcast); final JavaPairRDD<Key, Value> rdd = operation.getInput().flatMapToPair(func); final ImportKeyValueJavaPairRDDToAccumulo op = new ImportKeyValueJavaPairRDDToAccumulo.Builder() .input(rdd) .failurePath(failurePath) .outputPath(outputPath) .build(); store.execute(new OperationChain(op), context); } }