@Override
public InputSplit[] getSplits(final JobConf conf, int ignored) throws IOException {
final String domainName = DomainBuilderProperties.getDomainName(conf);
RunWithCoordinator.run(DomainBuilderProperties.getConfigurator(conf), new RunnableWithCoordinator() {
@Override
public void run(Coordinator coordinator) throws IOException {
domain = DomainBuilderProperties.getDomain(coordinator, domainName);
domainVersionToCompact = domain.getVersion(DomainCompactorProperties.getVersionNumberToCompact(domainName, conf));
}
});
final int domainNumParts = domain.getNumParts();
final StorageEngine storageEngine = domain.getStorageEngine();
final InputSplit[] splits = new InputSplit[domainNumParts];
for (int partition = 0; partition < domainNumParts; ++partition) {
String[] locations = new String[]{};
if (storageEngine instanceof IncrementalStorageEngine) {
IncrementalUpdatePlanner updatePlanner = ((IncrementalStorageEngine)storageEngine).getUpdatePlanner(domain);
IncrementalUpdatePlan updatePlan = updatePlanner.computeUpdatePlan(domainVersionToCompact);
List<String> paths = updatePlanner.getRemotePartitionFilePaths(updatePlan, storageEngine.getPartitionRemoteFileOps(StorageEngine.RemoteLocation.DOMAIN_BUILDER, partition));
locations = LocalityHelper.getHostsSortedByLocality(paths, conf);
}
splits[partition] = new HadoopDomainCompactorInputSplit(domainName, partition, locations);
}
return splits;
}