/** Reset all fields in order to prepare for the next iteration */ void reset(Configuration conf) { cluster = NetworkTopology.getInstance(conf); storageGroupMap.clear(); sources.clear(); moverThreadAllocator.reset(); for(StorageGroup t : targets) { t.getDDatanode().shutdownMoveExecutor(); } targets.clear(); globalBlocks.removeAllButRetain(movedBlocks); movedBlocks.cleanup(); }
final StorageType[] storageTypes = blkLocs.getStorageTypes(); for (int i = 0; i < datanodeUuids.length; i++) { final StorageGroup g = storageGroupMap.get( datanodeUuids[i], storageTypes[i]); if (g != null) { // not unknown
final DBlock block = globalBlocks.get(blk.getBlock()); synchronized (block) { block.clearLocations(); final StorageType[] storageTypes = blk.getStorageTypes(); for (int i = 0; i < datanodeUuids.length; i++) { final StorageGroup g = storageGroupMap.get( datanodeUuids[i], storageTypes[i]); if (g != null) { // not unknown
final DBlock block = globalBlocks.get(blk.getBlock()); synchronized (block) { block.clearLocations(); final StorageType[] storageTypes = blk.getStorageTypes(); for (int i = 0; i < datanodeUuids.length; i++) { final StorageGroup g = storageGroupMap.get( datanodeUuids[i], storageTypes[i]); if (g != null) { // not unknown
/** * @return whether there is still remaining migration work for the next * round */ private boolean processNamespace() throws IOException { getSnapshottableDirs(); boolean hasRemaining = false; for (Path target : targetPaths) { hasRemaining |= processPath(target.toUri().getPath()); } // wait for pending move to finish and retry the failed migration boolean hasFailed = Dispatcher.waitForMoveCompletion(storages.targets .values()); if (hasFailed) { if (retryCount.get() == retryMaxAttempts) { throw new IOException("Failed to move some block's after " + retryMaxAttempts + " retries."); } else { retryCount.incrementAndGet(); } } else { // Reset retry count if no failure. retryCount.set(0); } hasRemaining |= hasFailed; return hasRemaining; }
/** * @return whether there is still remaining migration work for the next * round */ private boolean processNamespace() throws IOException { getSnapshottableDirs(); boolean hasRemaining = false; for (Path target : targetPaths) { hasRemaining |= processPath(target.toUri().getPath()); } // wait for pending move to finish and retry the failed migration boolean hasFailed = Dispatcher.waitForMoveCompletion(storages.targets .values()); if (hasFailed) { if (retryCount.get() == retryMaxAttempts) { throw new IOException("Failed to move some block's after " + retryMaxAttempts + " retries."); } else { retryCount.incrementAndGet(); } } else { // Reset retry count if no failure. retryCount.set(0); } hasRemaining |= hasFailed; return hasRemaining; }
public G get(String datanodeUuid, StorageType storageType) { return map.get(toKey(datanodeUuid, storageType)); }
private StorageGroup getTarget(String uuid, StorageType storageType) { return targets.get(uuid, storageType); }
private static <G extends StorageGroup> G get(StorageGroupMap<G> map, MLocation ml) { return map.get(ml.datanode.getDatanodeUuid(), ml.storageType); }
private static <G extends StorageGroup> G get(StorageGroupMap<G> map, MLocation ml) { return map.get(ml.datanode.getDatanodeUuid(), ml.storageType); }
private StorageGroup getTarget(String uuid, StorageType storageType) { return targets.get(uuid, storageType); }
/** Reset all fields in order to prepare for the next iteration */ void reset(Configuration conf) { cluster = NetworkTopology.getInstance(conf); storageGroupMap.clear(); sources.clear(); moverThreadAllocator.reset(); for(StorageGroup t : targets) { t.getDDatanode().shutdownMoveExecutor(); } targets.clear(); globalBlocks.removeAllButRetain(movedBlocks); movedBlocks.cleanup(); }
public void put(G g) { final String key = toKey(g.getDatanodeInfo().getDatanodeUuid(), g.storageType); final StorageGroup existing = map.put(key, g); Preconditions.checkState(existing == null); }
/** Reset all fields in order to prepare for the next iteration */ void reset(Configuration conf) { cluster = NetworkTopology.getInstance(conf); storageGroupMap.clear(); sources.clear(); moverThreadAllocator.reset(); for(StorageGroup t : targets) { t.getDDatanode().shutdownMoveExecutor(); } targets.clear(); globalBlocks.removeAllButRetain(movedBlocks); movedBlocks.cleanup(); }
long bytesToMove() { Preconditions.checkState( storageGroupMap.size() >= sources.size() + targets.size(), "Mismatched number of storage groups (" + storageGroupMap.size() + " < " + sources.size() + " sources + " + targets.size() + " targets)"); long b = 0L; for (Source src : sources) { b += src.getScheduledSize(); } return b; }
public G get(String datanodeUuid, StorageType storageType) { return map.get(toKey(datanodeUuid, storageType)); }