private void validateOperation(final AddElementsFromHdfs operation) { if (null != operation.getMinMapTasks()) { LOGGER.warn("minMapTasks field will be ignored"); } if (null != operation.getMaxMapTasks()) { LOGGER.warn("maxMapTasks field will be ignored"); } if (null != operation.getNumReduceTasks() && (null != operation.getMinReduceTasks() || null != operation.getMaxReduceTasks())) { throw new IllegalArgumentException("minReduceTasks and/or maxReduceTasks should not be set if numReduceTasks is"); } if (null != operation.getMinReduceTasks() && null != operation.getMaxReduceTasks()) { LOGGER.warn("Logic for the minimum may result in more reducers than the maximum set"); if (operation.getMinReduceTasks() > operation.getMaxReduceTasks()) { throw new IllegalArgumentException("Minimum number of reducers must be less than the maximum number of reducers"); } } if (null == operation.getSplitsFilePath()) { throw new IllegalArgumentException("splitsFilePath is required"); } if (null == operation.getWorkingPath()) { throw new IllegalArgumentException("workingPath is required"); } }
public void doOperation(final AddElementsFromHdfs operation, final Context context, final AccumuloStore store) throws OperationException { validateOperation(operation); if (null == operation.getSplitsFilePath()) { final String splitsFilePath = getPathWithSlashSuffix(operation.getWorkingPath()) + context.getJobId() + "/splits"; LOGGER.info("Using working directory for splits files: " + splitsFilePath); operation.setSplitsFilePath(splitsFilePath); } try { checkHdfsDirectories(operation, store); } catch (final IOException e) { throw new OperationException("Operation failed due to filesystem error: " + e.getMessage()); } if (!operation.isUseProvidedSplits() && needsSplitting(store)) { sampleAndSplit(operation, context, store); } fetchElements(operation, store); final String skipImport = operation.getOption(AccumuloStoreConstants.ADD_ELEMENTS_FROM_HDFS_SKIP_IMPORT); if (null == skipImport || !"TRUE".equalsIgnoreCase(skipImport)) { importElements(operation, store); } else { LOGGER.info("Skipping import as {} was {}", AccumuloStoreConstants.ADD_ELEMENTS_FROM_HDFS_SKIP_IMPORT, skipImport); } }
final String workingPath = operation.getWorkingPath(); if (null == workingPath) { throw new IllegalArgumentException("Prior to adding the data, the table needs to be split. To do this the workingPath must be set to a temporary directory");