public CloseableIterable<? extends Element> doOperation(final GetAllElements operation, final User user, final AccumuloStore store) throws OperationException { try { return new AccumuloAllElementsRetriever(store, operation, user); } catch (final IteratorSettingException | StoreException e) { throw new OperationException("Failed to get elements", e); } } }
restart = true; } catch (final OperationException e) { throw new RuntimeException(e.getMessage(), e);
@Override public Schema doOperation(final GetSchema operation, final Context context, final Store store) throws OperationException { if (null == operation) { throw new OperationException("Operation cannot be null"); } return ((FederatedStore) store).getSchema(operation, context); } }
public static void invokeSplitPointCalculations(final ExecutorService pool, final List<Callable<Tuple2<String, Map<Object, Integer>>>> tasks, final Map<String, Map<Object, Integer>> groupToSplitPoints) throws OperationException { try { List<Future<Tuple2<String, Map<Object, Integer>>>> results = pool.invokeAll(tasks); for (int i = 0; i < tasks.size(); i++) { final Tuple2<String, Map<Object, Integer>> result = results.get(i).get(); if (null != result) { final Map<Object, Integer> splitPoints = result._2; if (!splitPoints.isEmpty()) { groupToSplitPoints.put(result._1, splitPoints); } } } } catch (final Exception e) { throw new OperationException(e.getMessage(), e); } } }
protected Configuration getConfiguration(final OP operation) throws OperationException { final Configuration conf = new Configuration(); final String serialisedConf = operation.getOption(AbstractGetRDDHandler.HADOOP_CONFIGURATION_KEY); if (null != serialisedConf) { try { final ByteArrayInputStream bais = new ByteArrayInputStream(serialisedConf.getBytes(CommonConstants.UTF_8)); conf.readFields(new DataInputStream(bais)); } catch (final IOException e) { throw new OperationException("Exception decoding Configuration from options", e); } } return conf; }
private void moveData(final FileSystem fs, final String tempFileDir, final String group, final String column, final String splitNumber) throws StoreException, IOException, OperationException { // Move data from temp to data final String sourceFile = ParquetStore.getGroupDirectory(group, column, tempFileDir) + SORTED + SPLIT + splitNumber + "/part-00000-*.parquet"; final FileStatus[] files = fs.globStatus(new Path(sourceFile)); if (files.length == 1) { final Path destPath = new Path(ParquetStore.getGroupDirectory(group, column, tempFileDir + SORTED) + "/part-" + zeroPad(splitNumber, 5) + ".gz.parquet"); fs.mkdirs(destPath.getParent()); fs.rename(files[0].getPath(), destPath); } else if (files.length > 1) { throw new OperationException("Expected to get only one file which matched the file pattern " + sourceFile); } }
protected Configuration getConfiguration(final OP operation) throws OperationException { final String serialisedConf = operation.getOption(AbstractGetRDDHandler.HADOOP_CONFIGURATION_KEY); if (null == serialisedConf) { return new Configuration(); } try { return AbstractGetRDDHandler.convertStringToConfiguration(serialisedConf); } catch (final IOException e) { throw new OperationException("Exception decoding Configuration from options", e); } }
public CloseableIterable<? extends Element> doOperation(final GetElements operation, final User user, final AccumuloStore store) throws OperationException { if (null != operation.getOption("accumulostore.operation.return_matched_id_as_edge_source")) { throw new IllegalArgumentException("The accumulostore.operation.return_matched_id_as_edge_source option has been removed. Instead of flipping the Edges around the result Edges will have a matchedVertex field set specifying if the SOURCE or DESTINATION was matched."); } if (null == operation.getInput()) { throw new OperationException("Operation input is undefined - please specify an input."); } try { return new AccumuloElementsRetriever(store, operation, user); } catch (final IteratorSettingException | StoreException e) { throw new OperationException("Failed to get elements", e); } } }
private void fetchElements(final AddElementsFromHdfs operation, final AccumuloStore store) throws OperationException { final AddElementsFromHdfsTool fetchTool = new AddElementsFromHdfsTool(new AccumuloAddElementsFromHdfsJobFactory(), operation, store); final int response; try { LOGGER.info("Running FetchElementsFromHdfsTool job"); response = ToolRunner.run(fetchTool.getConfig(), fetchTool, new String[0]); LOGGER.info("Finished running FetchElementsFromHdfsTool job"); } catch (final Exception e) { LOGGER.error("Failed to fetch elements from HDFS: {}", e.getMessage()); throw new OperationException("Failed to fetch elements from HDFS", e); } if (AddElementsFromHdfsTool.SUCCESS_RESPONSE != response) { LOGGER.error("Failed to fetch elements from HDFS. Response code was {}", response); throw new OperationException("Failed to fetch elements from HDFS. Response code was: " + response); } }
private void importElements(final AddElementsFromHdfs operation, final AccumuloStore store) throws OperationException { final ImportElementsToAccumuloTool importTool; final int response; importTool = new ImportElementsToAccumuloTool(operation.getOutputPath(), operation.getFailurePath(), store, operation.getOptions()); try { LOGGER.info("Running import job"); response = ToolRunner.run(importTool, new String[0]); LOGGER.info("Finished running import job"); } catch (final Exception e) { LOGGER.error("Failed to import elements into Accumulo: {}", e.getMessage()); throw new OperationException("Failed to import elements into Accumulo", e); } if (ImportElementsToAccumuloTool.SUCCESS_RESPONSE != response) { LOGGER.error("Failed to import elements into Accumulo. Response code was {}", response); throw new OperationException("Failed to import elements into Accumulo. Response code was: " + response); } }
@Override public int run(final String[] strings) throws Exception { jobFactory.prepareStore(store); LOGGER.info("Adding elements from HDFS"); final List<Job> jobs = jobFactory.createJobs(operation, store); for (final Job job : jobs) { job.waitForCompletion(true); if (!job.isSuccessful()) { LOGGER.error("Error running job"); throw new OperationException("Error running job"); } } LOGGER.info("Finished adding elements from HDFS"); return SUCCESS_RESPONSE; }
@Override public List<String> getSplits(final SplitStoreFromFile operation, final Context context, final Store store) throws OperationException { try { return super.getSplits(operation, context, store); } catch (final OperationException e) { // ignore error and try and load the splits from hdfs instead } try { final FileSystem fs = FileSystem.get(new Configuration()); return IOUtils.readLines(fs.open(new Path(operation.getInputPath()))); } catch (final IOException e) { throw new OperationException("Failed to load splits from hdfs file: " + operation.getInputPath(), e); } } }
public void writeElements(final Iterator<? extends Element> elements) throws OperationException { try { // Write elements _writeElements(elements); } catch (final IOException | OperationException e) { throw new OperationException("Exception writing elements to temporary directory: " + tempFilesDir, e); } finally { // Close the writers for (final Map<Integer, ParquetWriter<Element>> splitToWriter : groupSplitToWriter.values()) { for (final ParquetWriter<Element> writer : splitToWriter.values()) { try { writer.close(); } catch (final IOException ignored) { // ignored } } } } }
public static synchronized String[] getSplits(final AccumuloStore store) throws OperationException { final Connector connector; try { connector = store.getConnection(); } catch (final StoreException e) { throw new OperationException("Failed to create accumulo connection", e); } final String table = store.getTableName(); try { return connector.tableOperations() .listSplits(table) .stream() .sorted() .map(Text::toString) .toArray(String[]::new); } catch (final TableNotFoundException | AccumuloSecurityException | AccumuloException e) { throw new OperationException("Failed to get accumulo split points from table " + table, e); } } }
private void generateSplitsFromSampleData(final SampleDataForSplitPoints operation, final AccumuloStore store) throws OperationException { try { if (store.getTabletServers().size() < 2) { LOGGER.warn("There is only 1 tablet server so no split points will be calculated."); return; } } catch (final StoreException e) { throw new OperationException(e.getMessage(), e); } final SampleDataAndCreateSplitsFileTool sampleTool = new SampleDataAndCreateSplitsFileTool(new AccumuloSampleDataForSplitPointsJobFactory(), operation, store); try { ToolRunner.run(sampleTool, new String[0]); } catch (final Exception e) { throw new OperationException(e.getMessage(), e); } LOGGER.info("Finished calculating splits"); } }
private void importFiles(final ImportAccumuloKeyValueFiles operation, final AccumuloStore store) throws OperationException { final ImportElementsToAccumuloTool importTool = new ImportElementsToAccumuloTool(operation.getInputPath(), operation.getFailurePath(), store, operation.getOptions()); try { ToolRunner.run(importTool, new String[0]); } catch (final Exception e) { throw new OperationException(e.getMessage(), e); } } }
private void doOperation(final SplitStoreFromIterable<String> operation, final AccumuloStore store) throws OperationException { if (null == operation.getInput()) { throw new OperationException("Operation input is required."); } final SortedSet<Text> splits = new TreeSet<>(); for (final String split : operation.getInput()) { splits.add(new Text(Base64.decodeBase64(split))); } try { store.getConnection().tableOperations().addSplits(store.getTableName(), splits); LOGGER.info("Added {} splits to table {}", splits.size(), store.getTableName()); } catch (final TableNotFoundException | AccumuloException | AccumuloSecurityException | StoreException e) { LOGGER.error("Failed to add {} split points to table {}", splits.size(), store.getTableName()); throw new RuntimeException("Failed to add split points: " + e.getMessage(), e); } } }
} catch (final IOException e) { LOGGER.error("Exception reading results file and outputting split points: {}", e.getMessage()); throw new OperationException(e.getMessage(), e);
public <O> O executeOpChainViaUrl(final OperationChain<O> opChain, final Context context) throws OperationException { final String opChainJson; try { opChainJson = new String(JSONSerialiser.serialise(opChain), CommonConstants.UTF_8); } catch (final UnsupportedEncodingException | SerialisationException e) { throw new OperationException("Unable to serialise operation chain into JSON.", e); } final URL url = getProperties().getGafferUrl("graph/operations/execute"); try { return doPost(url, opChainJson, opChain.getOutputTypeReference(), context); } catch (final StoreException e) { throw new OperationException(e.getMessage(), e); } }
} catch (final IOException e) { LOGGER.error("Failed to create Hadoop job: {}", e.getMessage()); throw new OperationException("Failed to create the Hadoop job: " + e.getMessage(), e); } catch (final IOException | InterruptedException | ClassNotFoundException e) { LOGGER.error("Exception running job: {}", e.getMessage()); throw new OperationException("Error while waiting for job to complete: " + e.getMessage(), e); if (!job.isSuccessful()) { LOGGER.error("Job was not successful (job name is {})", job.getJobName()); throw new OperationException("Error running job"); throw new OperationException("Error running job" + e.getMessage(), e); } catch (final IOException e) { LOGGER.error("Failed to get counter org.apache.hadoop.mapred.TaskCounter.REDUCE_OUTPUT_RECORDS from job: {}", e.getMessage()); throw new OperationException("Failed to get counter: " + TaskCounter.REDUCE_OUTPUT_RECORDS, e); } catch (final IOException e) { LOGGER.error("Exception getting filesystem: {}", e.getMessage()); throw new OperationException("Failed to get filesystem from configuration: " + e.getMessage(), e); } catch (final IOException e) { LOGGER.error("Failed to delete the results file {}", resultsFile); throw new OperationException("Failed to delete the results file: " + e.getMessage(), e);