/** * Query if the thread pool exist for the volume * @param volume the root of a volume * @return true if there is one thread pool for the volume * false otherwise */ synchronized boolean queryVolume(FsVolumeImpl volume) { String storageId = volume.getStorageID(); if (executors == null) { throw new RuntimeException( "AsyncLazyPersistService is already shutdown"); } ThreadPoolExecutor executor = executors.get(storageId); return (executor != null); }
/** * Execute the task sometime in the future, using ThreadPools. */ synchronized void execute(FsVolumeImpl volume, Runnable task) { if (executors == null) { throw new RuntimeException("AsyncDiskService is already shutdown"); } if (volume == null) { throw new RuntimeException("A null volume does not have a executor"); } ThreadPoolExecutor executor = executors.get(volume.getStorageID()); if (executor == null) { throw new RuntimeException("Cannot find volume " + volume + " for execution of task " + task); } else { executor.execute(task); } }
/** * Stops AsyncLazyPersistService for a volume. * @param volume the root of the volume. */ synchronized void removeVolume(FsVolumeImpl volume) { String storageId = volume.getStorageID(); if (executors == null) { throw new RuntimeException("AsyncDiskService is already shutdown"); } ThreadPoolExecutor executor = executors.get(storageId); if (executor == null) { throw new RuntimeException("Can not find volume with storage id " + storageId + " to remove."); } else { executor.shutdown(); executors.remove(storageId); } }
/** * Starts AsyncLazyPersistService for a new volume * @param volume the root of the new data volume. */ synchronized void addVolume(FsVolumeImpl volume) { String storageId = volume.getStorageID(); if (executors == null) { throw new RuntimeException("AsyncLazyPersistService is already shutdown"); } ThreadPoolExecutor executor = executors.get(storageId); if (executor != null) { throw new RuntimeException("Volume " + volume + " is already existed."); } addExecutorForVolume(storageId); }
private void addExecutorForVolume(final FsVolumeImpl volume) { ThreadFactory threadFactory = new ThreadFactory() { int counter = 0; @Override public Thread newThread(Runnable r) { int thisIndex; synchronized (this) { thisIndex = counter++; } Thread t = new Thread(threadGroup, r); t.setName("Async disk worker #" + thisIndex + " for volume " + volume); return t; } }; ThreadPoolExecutor executor = new ThreadPoolExecutor( CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME, THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory); // This can reduce the number of running threads executor.allowCoreThreadTimeOut(true); executors.put(volume.getStorageID(), executor); }
/** * Starts AsyncDiskService for a new volume * @param volume the root of the new data volume. */ synchronized void addVolume(FsVolumeImpl volume) { if (executors == null) { throw new RuntimeException("AsyncDiskService is already shutdown"); } if (volume == null) { throw new RuntimeException("Attempt to add a null volume"); } ThreadPoolExecutor executor = executors.get(volume.getStorageID()); if (executor != null) { throw new RuntimeException("Volume " + volume + " is already existed."); } addExecutorForVolume(volume); }
/** * Asynchronously lazy persist the block from the RamDisk to Disk. */ void submitLazyPersistTask(String bpId, long blockId, long genStamp, long creationTime, ReplicaInfo replica, FsVolumeReference target) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("LazyWriter schedule async task to persist RamDisk block pool id: " + bpId + " block id: " + blockId); } ReplicaLazyPersistTask lazyPersistTask = new ReplicaLazyPersistTask( bpId, blockId, genStamp, creationTime, replica, target); FsVolumeImpl volume = (FsVolumeImpl)target.getVolume(); execute(volume.getStorageID(), lazyPersistTask); }
/** * Dynamically add new volumes to the existing volumes that this DN manages. * * @param ref a reference to the new FsVolumeImpl instance. */ void addVolume(FsVolumeReference ref) { FsVolumeImpl volume = (FsVolumeImpl) ref.getVolume(); volumes.add(volume); if (blockScanner != null) { blockScanner.addVolumeScanner(ref); } else { // If the volume is not put into a volume scanner, it does not need to // hold the reference. IOUtils.cleanup(null, ref); } // If the volume is used to replace a failed volume, it needs to reset the // volume failure info for this volume. removeVolumeFailureInfo(volume.getStorageLocation()); FsDatasetImpl.LOG.info("Added new volume: " + volume.getStorageID()); }
builders.get(v.getStorageID()).build());
@Override public void run() { final long blockLength = replicaToDelete.getBlockDataLength(); final long metaLength = replicaToDelete.getMetadataLength(); boolean result; result = (trashDirectory == null) ? deleteFiles() : moveFiles(); if (!result) { LOG.warn("Unexpected error trying to " + (trashDirectory == null ? "delete" : "move") + " block " + block.getBlockPoolId() + " " + block.getLocalBlock() + " at file " + replicaToDelete.getBlockURI() + ". Ignored."); } else { if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){ datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID()); } volume.onBlockFileDeletion(block.getBlockPoolId(), blockLength); volume.onMetaFileDeletion(block.getBlockPoolId(), metaLength); LOG.info("Deleted " + block.getBlockPoolId() + " " + block.getLocalBlock() + " URI " + replicaToDelete.getBlockURI()); } updateDeletedBlockId(block); IOUtils.cleanup(null, volumeRef); } }
@Override public void run() { long dfsBytes = blockFile.length() + metaFile.length(); boolean result; result = (trashDirectory == null) ? deleteFiles() : moveFiles(); if (!result) { LOG.warn("Unexpected error trying to " + (trashDirectory == null ? "delete" : "move") + " block " + block.getBlockPoolId() + " " + block.getLocalBlock() + " at file " + blockFile + ". Ignored."); } else { if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){ datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID()); } volume.decDfsUsed(block.getBlockPoolId(), dfsBytes); LOG.info("Deleted " + block.getBlockPoolId() + " " + block.getLocalBlock() + " file " + blockFile); } updateDeletedBlockId(block); IOUtils.cleanup(null, volumeRef); } }
builders.get(v.getStorageID()).build());
builders.get(v.getStorageID()).build());
/** * Stores the information related to a namenode in the cluster */ public static class NameNodeInfo { final NameNode nameNode; final Configuration conf; final String nameserviceId; final String nnId; StartupOption startOpt; NameNodeInfo(NameNode nn, String nameserviceId, String nnId, StartupOption startOpt, Configuration conf) { this.nameNode = nn; this.nameserviceId = nameserviceId; this.nnId = nnId; this.startOpt = startOpt; this.conf = conf; } public void setStartOpt(StartupOption startOpt) { this.startOpt = startOpt; } }
private void setVolumeFull(DataNode dn, StorageType type) { List<? extends FsVolumeSpi> volumes = dn.getFSDataset().getVolumes(); for (FsVolumeSpi v : volumes) { FsVolumeImpl volume = (FsVolumeImpl) v; if (volume.getStorageType() == type) { LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]" + volume.getStorageID()); volume.setCapacityForTesting(0); } } }
@Override public void run() { long dfsBytes = blockFile.length() + metaFile.length(); boolean result; result = (trashDirectory == null) ? deleteFiles() : moveFiles(); if (!result) { LOG.warn("Unexpected error trying to " + (trashDirectory == null ? "delete" : "move") + " block " + block.getBlockPoolId() + " " + block.getLocalBlock() + " at file " + blockFile + ". Ignored."); } else { if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){ datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID()); } volume.decDfsUsed(block.getBlockPoolId(), dfsBytes); LOG.info("Deleted " + block.getBlockPoolId() + " " + block.getLocalBlock() + " file " + blockFile); } updateDeletedBlockId(block); IOUtils.cleanup(null, volumeRef); } }