public static String[] toStorageIDs(DatanodeStorageInfo[] storages) { if (storages == null) { return null; } String[] storageIDs = new String[storages.length]; for(int i = 0; i < storageIDs.length; i++) { storageIDs[i] = storages[i].getStorageID(); } return storageIDs; }
DatanodeStorageInfo createProvidedStorage(DatanodeStorage ds) { assert null == storageMap.get(ds.getStorageID()); DatanodeStorageInfo storage = new ProvidedDatanodeStorageInfo(this, ds); storage.setHeartbeatedSinceFailover(true); storageMap.put(storage.getStorageID(), storage); return storage; }
void injectStorage(DatanodeStorageInfo s) { synchronized (storageMap) { DatanodeStorageInfo storage = storageMap.get(s.getStorageID()); if (null == storage) { LOG.info("Adding new storage ID {} for DN {}", s.getStorageID(), getXferAddr()); DFSTopologyNodeImpl parent = null; if (getParent() instanceof DFSTopologyNodeImpl) { parent = (DFSTopologyNodeImpl) getParent(); } StorageType type = s.getStorageType(); if (!hasStorageType(type) && parent != null) { // we are about to add a type this node currently does not have, // inform the parent that a new type is added to this datanode parent.childAddStorage(getName(), type); } storageMap.put(s.getStorageID(), s); } else { assert storage == s : "found " + storage + " expected " + s; } } }
DatanodeStorageInfo currInfo = storages[i]; StorageType storageType = currInfo.getStorageType(); sids.add(currInfo.getStorageID()); types.add(storageType); if (StorageType.PROVIDED.equals(storageType)) { locs.add(new DatanodeInfoWithStorage( currInfo.getDatanodeDescriptor(), currInfo.getStorageID(), storageType)); excludedUUids.add(currInfo.getDatanodeDescriptor().getDatanodeUuid());
if (ratio < storageInfoDefragmentRatio) { datanodesAndStorages.add(node.getDatanodeUuid()); datanodesAndStorages.add(storage.getStorageID()); storage.getStorageID(), ratio, (ratio < storageInfoDefragmentRatio) ? " (queued for defragmentation)" : ""); storage.getStorageID(), storage.treeSetFillRatio(), aborted ? " (aborted)" : "");
final DatanodeStorageInfo s = locations.get(i); datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid(); storageIDs[i] = s.getStorageID(); storageTypes[i] = s.getStorageType();
if (storageInfo.numBlocks() == 0) { DatanodeStorageInfo info = storageMap.remove(storageInfo.getStorageID()); if (!hasStorageType(info.getStorageType())) {
sb.append(NodeBase.getPath(dnDesc)); } else { sb.append(new DatanodeInfoWithStorage(dnDesc, storage.getStorageID(), storage.getStorageType()));
if (state == StoredReplicaState.LIVE) { if (storage.getStorageType() == StorageType.PROVIDED) { storage = new DatanodeStorageInfo(node, storage.getStorageID(), storage.getStorageType(), storage.getState());
/** * Find specified DatanodeStorageInfo. * @return DatanodeStorageInfo or null if not found. */ DatanodeStorageInfo findStorageInfo(DatanodeDescriptor dn) { int len = getCapacity(); DatanodeStorageInfo providedStorageInfo = null; for(int idx = 0; idx < len; idx++) { DatanodeStorageInfo cur = getStorageInfo(idx); if(cur != null) { if (cur.getStorageType() == StorageType.PROVIDED) { // if block resides on provided storage, only match the storage ids if (dn.getStorageInfo(cur.getStorageID()) != null) { // do not return here as we have to check the other // DatanodeStorageInfos for this block which could be local providedStorageInfo = cur; } } else if (cur.getDatanodeDescriptor() == dn) { return cur; } } } return providedStorageInfo; }
DatanodeDescriptor node = storage.getDatanodeDescriptor(); if (corruptNodes.contains(node)) { String storageId = storage.getStorageID(); DatanodeStorageInfo storageInfo = node.getStorageInfo(storageId); State state = (storageInfo == null) ? null : storageInfo.getState();
public static String[] toStorageIDs(DatanodeStorageInfo[] storages) { String[] storageIDs = new String[storages.length]; for(int i = 0; i < storageIDs.length; i++) { storageIDs[i] = storages[i].getStorageID(); } return storageIDs; }
public static String[] toStorageIDs(DatanodeStorageInfo[] storages) { String[] storageIDs = new String[storages.length]; for(int i = 0; i < storageIDs.length; i++) { storageIDs[i] = storages[i].getStorageID(); } return storageIDs; }
+ "storage report for {} from datanode {}", strBlockReportId, storageInfo.getStorageID(), nodeID.getDatanodeUuid()); processFirstBlockReport(storageInfo, newReport);
storageMap.put(storage.getStorageID(), storage); if (oldType != newType && !hasStorageType(oldType) && parent != null) {
DatanodeStorageInfo updateStorage(DatanodeStorage s) { synchronized (storageMap) { DatanodeStorageInfo storage = storageMap.get(s.getStorageID()); if (storage == null) { LOG.info("Adding new storage ID " + s.getStorageID() + " for DN " + getXferAddr()); storage = new DatanodeStorageInfo(this, s); storageMap.put(s.getStorageID(), storage); } else if (storage.getState() != s.getState() || storage.getStorageType() != s.getStorageType()) { // For backwards compatibility, make sure that the type and // state are updated. Some reports from older datanodes do // not include these fields so we may have assumed defaults. storage.updateFromStorage(s); storageMap.put(storage.getStorageID(), storage); } return storage; } }
DatanodeStorageInfo updateStorage(DatanodeStorage s) { synchronized (storageMap) { DatanodeStorageInfo storage = storageMap.get(s.getStorageID()); if (storage == null) { LOG.info("Adding new storage ID " + s.getStorageID() + " for DN " + getXferAddr()); storage = new DatanodeStorageInfo(this, s); storageMap.put(s.getStorageID(), storage); } else if (storage.getState() != s.getState() || storage.getStorageType() != s.getStorageType()) { // For backwards compatibility, make sure that the type and // state are updated. Some reports from older datanodes do // not include these fields so we may have assumed defaults. storage.updateFromStorage(s); storageMap.put(storage.getStorageID(), storage); } return storage; } }
private void markAllBlocksAsCorrupt(BlockManager bm, ExtendedBlock blk) throws IOException { for (DatanodeStorageInfo info : bm.getStorages(blk.getLocalBlock())) { bm.findAndMarkBlockAsCorrupt( blk, info.getDatanodeDescriptor(), info.getStorageID(), "STORAGE_ID"); } }
public static StorageReport[] getStorageReportsForDatanode( DatanodeDescriptor dnd) { ArrayList<StorageReport> reports = new ArrayList<StorageReport>(); for (DatanodeStorageInfo storage : dnd.getStorageInfos()) { DatanodeStorage dns = new DatanodeStorage( storage.getStorageID(), storage.getState(), storage.getStorageType()); StorageReport report = new StorageReport( dns ,false, storage.getCapacity(), storage.getDfsUsed(), storage.getRemaining(), storage.getBlockPoolUsed(), 0L); reports.add(report); } return reports.toArray(StorageReport.EMPTY_ARRAY); }
@Override public Boolean get() { final DatanodeDescriptor dnDescriptor = cluster.getNamesystem().getBlockManager().getDatanodeManager(). getDatanode(datanodeToRemoveStorageFrom.getDatanodeUuid()); assertNotNull(dnDescriptor); DatanodeStorageInfo[] infos = dnDescriptor.getStorageInfos(); for (DatanodeStorageInfo info : infos) { if (info.getStorageID().equals(storageIdToRemove)) { LOG.info("Still found storage " + storageIdToRemove + " on " + info + "."); return false; } } assertEquals(NUM_STORAGES_PER_DN - 1, infos.length); return true; } }, 1000, 30000);