public static void cleanDomains(Collection <Domain> domains, boolean deleteMetadata, StorageEngine.RemoteLocation dataLocation) throws IOException { for (Domain domain : domains) { StorageEngine storageEngine = domain.getStorageEngine(); RemoteDomainCleaner cleaner = storageEngine.getRemoteDomainCleaner(); if (cleaner == null) { LOG.info("Failed to clean Domain " + domain.getName() + ". No Remote Domain Cleaner is configured."); continue; } RemoteDomainVersionDeleter deleter = storageEngine.getRemoteDomainVersionDeleter(dataLocation); if (deleter == null) { LOG.info("Failed to clean Domain " + domain.getName() + ". No Remote Domain Version Deleter is configured."); continue; } LOG.info("Cleaning Domain " + domain.getName()); cleaner.deleteOldVersions(deleter, deleteMetadata); } }
@Override protected Writer getWriter(StorageEngine storageEngine, DomainVersion domainVersion, PartitionRemoteFileOps partitionRemoteFileOps, int partitionNumber) throws IOException { return storageEngine.getCompactorWriter(domainVersion, partitionRemoteFileOps, partitionNumber); } }
public KeyAndPartitionWritableComparable(StorageEngine storageEngine, Partitioner partitioner, int numPartitions, BytesWritable key) { this.keyAndPartitionWritable = new KeyAndPartitionWritable(partitioner, numPartitions, key); this.comparableKey = BytesUtils.byteBufferDeepCopy(storageEngine.getComparableKey(ByteBuffer.wrap(key.getBytes(), 0, key.getLength()))); }
StorageEngine storageEngine = hostDomain.getDomain().getStorageEngine(); DiskPartitionAssignment assignments = storageEngine.getDataDirectoryPerPartition(configurator, getPartitionNumbers(hostDomain.getPartitions())); for (String filePath : storageEngine.getFiles(assignments, versionNumber, hostDomainPartition.getPartitionNumber())) { File file = new File(filePath);
public static void main(String[] args) throws IOException, InvalidConfigurationException { CommandLineChecker.check(args, new String[]{"configuration", "domain name", "domain version number"}, RemoteDomainVersionDeletionHelper.class ); String configurationPath = args[0]; String domainName = args[1]; Integer versionNumber = Integer.parseInt(args[2]); Coordinator coordinator = new YamlCoordinatorConfigurator(configurationPath).createCoordinator(); Domain domain = coordinator.getDomain(domainName); if (domain == null) { throw new RuntimeException("Given domain was not found: " + domainName); } DomainVersion domainVersion = domain.getVersion(versionNumber); if (domainVersion == null) { throw new RuntimeException("Given version was not found: " + domainName + " version " + versionNumber); } LOG.info("Deleting remote data for domain " + domainName + " version " + versionNumber); domainVersion.setDefunct(true); domain.getStorageEngine().getRemoteDomainVersionDeleter(StorageEngine.RemoteLocation.DOMAIN_BUILDER).deleteVersion(versionNumber); } }
DiskPartitionAssignment assignment = engine.getDataDirectoryPerPartition(configurator, partitionNumbers); reader = engine.getReader(configurator.getReaderConfigurator(numTotalPartitions), partition.getPartitionNumber(), assignment); } catch (IOException | IllegalArgumentException e) {
private void setNewPartitionWriter(int partitionNumber) throws IOException { // First, close current writer closeCurrentWriterIfNeeded(); LOG.info("Setting up new writer for partition " + partitionNumber); // Check for existing partitions if (writtenPartitions.contains(partitionNumber)) { throw new RuntimeException("Partition " + partitionNumber + " has already been written."); } // Set up new writer writerOutputPath = new Path(outputPath, "partition_" + partitionNumber + "_" + UUID.randomUUID().toString()); numRecordsWritten = 0; writerPartition = partitionNumber; writtenPartitions.add(partitionNumber); writer = getWriter(storageEngine, domainVersion, storageEngine.getPartitionRemoteFileOpsFactory(StorageEngine.RemoteLocation.DOMAIN_BUILDER).getPartitionRemoteFileOps(writerOutputPath.toString(), partitionNumber), partitionNumber); }
@Override public void map(Text domainName, IntWritable partitionNumber, OutputCollector<KeyAndPartitionWritable, ValueWritable> outputCollector, Reporter reporter) throws IOException { LOG.info("Compacting Domain " + domainName.toString() + " Version " + domainVersionToCompact.getVersionNumber() + " Partition " + partitionNumber.get() + " in " + localTmpOutput.getAbsolutePath()); // Get compacting updater DiskPartitionAssignment assignment = new DiskPartitionAssignment(Collections.singletonMap( partitionNumber.get(), localTmpOutput.getAbsolutePath() )); Compactor compactor = storageEngine.getCompactor(assignment, partitionNumber.get()); if (compactor == null) { throw new RuntimeException("Failed to load compacting updater for domain " + domainName + " with storage engine: " + storageEngine); } // Close coordinator when possible compactor.closeCoordinatorOpportunistically(coordinator); // Perform compaction compactor.compact(domainVersionToCompact, new OutputCollectorWriter(reporter, partitionNumber, outputCollector)); }
private ArrayList<PartitionUpdateTask> buildPartitionUpdateTasks( PartitionUpdateTaskStatisticsAggregator partitionUpdateTaskStatisticsAggregator, List<Throwable> encounteredThrowables) throws IOException { ArrayList<PartitionUpdateTask> partitionUpdateTasks = new ArrayList<PartitionUpdateTask>(); for (HostDomain hostDomain : host.getAssignedDomains()) { StorageEngine engine = hostDomain.getDomain().getStorageEngine(); DiskPartitionAssignment assignments = engine.getDataDirectoryPerPartition(configurator, getPartitionNumbers(hostDomain.getPartitions())); for (HostDomainPartition partition : hostDomain.getPartitions()) { partitionUpdateTasks.add( new PartitionUpdateTask( hostDomain, partition, partitionUpdateTaskStatisticsAggregator, encounteredThrowables, assignments)); } } // Sort update tasks per partition id, so that we update domains concurrently but in order of partition number Collections.sort(partitionUpdateTasks); return partitionUpdateTasks; }
private void deletePartition(HostDomain hostDomain, HostDomainPartition partition) throws IOException { LOG.info("Deleting Domain " + hostDomain.getDomain().getName() + " partition " + partition.getPartitionNumber()); Deleter deleter = hostDomain.getDomain().getStorageEngine().getDeleter(assignment, partition.getPartitionNumber()); deleter.delete(); hostDomain.removePartition(partition.getPartitionNumber()); }
@Override public InputSplit[] getSplits(final JobConf conf, int ignored) throws IOException { final String domainName = DomainBuilderProperties.getDomainName(conf); RunWithCoordinator.run(DomainBuilderProperties.getConfigurator(conf), new RunnableWithCoordinator() { @Override public void run(Coordinator coordinator) throws IOException { domain = DomainBuilderProperties.getDomain(coordinator, domainName); domainVersionToCompact = domain.getVersion(DomainCompactorProperties.getVersionNumberToCompact(domainName, conf)); } }); final int domainNumParts = domain.getNumParts(); final StorageEngine storageEngine = domain.getStorageEngine(); final InputSplit[] splits = new InputSplit[domainNumParts]; // Create splits for (int partition = 0; partition < domainNumParts; ++partition) { // Compute remote partition file paths for this split if possible String[] locations = new String[]{}; if (storageEngine instanceof IncrementalStorageEngine) { IncrementalUpdatePlanner updatePlanner = ((IncrementalStorageEngine)storageEngine).getUpdatePlanner(domain); IncrementalUpdatePlan updatePlan = updatePlanner.computeUpdatePlan(domainVersionToCompact); List<String> paths = updatePlanner.getRemotePartitionFilePaths(updatePlan, storageEngine.getPartitionRemoteFileOps(StorageEngine.RemoteLocation.DOMAIN_BUILDER, partition)); locations = LocalityHelper.getHostsSortedByLocality(paths, conf); } splits[partition] = new HadoopDomainCompactorInputSplit(domainName, partition, locations); } return splits; }
public void operate(FlowProcess flowProcess, FunctionCall<AddPartitionAndComparableKeyFields> call) { // Load configuration lazily loadConfiguration(flowProcess); // Compute partition and comparable key TupleEntry tupleEntry = call.getArguments(); BytesWritable key = (BytesWritable)tupleEntry.getObject(0); ByteBuffer keyByteBuffer = ByteBuffer.wrap(key.getBytes(), 0, key.getLength()); PartitionIntWritable partition = new PartitionIntWritable(partitioner.partition(keyByteBuffer, domainNumParts)); ByteBuffer comparableKey = storageEngine.getComparableKey(keyByteBuffer); byte[] comparableKeyBuffer = new byte[comparableKey.remaining()]; System.arraycopy(comparableKey.array(), comparableKey.arrayOffset() + comparableKey.position(), comparableKeyBuffer, 0, comparableKey.remaining()); BytesWritable comparableKeyBytesWritable = new BytesWritable(comparableKeyBuffer); // Add partition and comparable key fields call.getOutputCollector().add(new Tuple(partition, comparableKeyBytesWritable)); }