@Override public GridGgfsFileInfo applyx(GridGgfsFileInfo info) throws GridException { GridGgfsFileMap map = new GridGgfsFileMap(info.fileMap()); map.deleteRange(range); if (log.isDebugEnabled()) log.debug("Deleted range from file map [fileId=" + info.id() + ", range=" + range + ", oldMap=" + info.fileMap() + ", newMap=" + map + ']'); GridGgfsFileInfo updated = new GridGgfsFileInfo(info, info.length()); updated.fileMap(map); return updated; } };
/** * @param fileInfo File info to construct listing entry from. */ public GridGgfsListingEntry(GridGgfsFileInfo fileInfo) { fileId = fileInfo.id(); affKey = fileInfo.affinityKey(); if (fileInfo.isFile()) { blockSize = fileInfo.blockSize(); len = fileInfo.length(); } props = fileInfo.properties(); accessTime = fileInfo.accessTime(); modificationTime = fileInfo.modificationTime(); }
/** * @param blockIdx Block index. * @param fileInfo File info. * @return Block key. */ public GridGgfsBlockKey blockKey(long blockIdx, GridGgfsFileInfo fileInfo) { if (fileInfo.affinityKey() != null) return new GridGgfsBlockKey(fileInfo.id(), fileInfo.affinityKey(), fileInfo.evictExclude(), blockIdx); if (fileInfo.fileMap() != null) { GridUuid affKey = fileInfo.fileMap().affinityKey(blockIdx * fileInfo.blockSize(), false); return new GridGgfsBlockKey(fileInfo.id(), affKey, fileInfo.evictExclude(), blockIdx); } return new GridGgfsBlockKey(fileInfo.id(), null, fileInfo.evictExclude(), blockIdx); }
/** * Constructs file info. * * @param listing New directory listing. * @param old Old file info. */ GridGgfsFileInfo(Map<String, GridGgfsListingEntry> listing, GridGgfsFileInfo old) { this(old.isDirectory(), old.id, old.blockSize, old.len, old.affKey, listing, old.props, old.fileMap(), old.lockId, false, old.accessTime, old.modificationTime, old.evictExclude()); }
fileName); GridGgfsListingEntry entry = parentInfo.listing().get(fileName); fileName); assert parentInfo.isDirectory(); GridGgfsFileInfo updated = new GridGgfsFileInfo(fileInfo, accessTime == -1 ? fileInfo.accessTime() : accessTime, modificationTime == -1 ? fileInfo.modificationTime() : modificationTime);
/** * Optimize buffer size. * * @param bufSize Requested buffer size. * @param fileInfo File info. * @return Optimized buffer size. */ @SuppressWarnings("IfMayBeConditional") private static int optimizeBufferSize(int bufSize, GridGgfsFileInfo fileInfo) { assert bufSize > 0; if (fileInfo == null) return bufSize; int blockSize = fileInfo.blockSize(); if (blockSize <= 0) return bufSize; if (bufSize <= blockSize) // Optimize minimum buffer size to be equal file's block size. return blockSize; int maxBufSize = blockSize * MAX_BLOCKS_CNT; if (bufSize > maxBufSize) // There is no profit or optimization from larger buffers. return maxBufSize; if (fileInfo.length() == 0) // Make buffer size multiple of block size (optimized for new files). return bufSize / blockSize * blockSize; return bufSize; }
/** * Calculates size of directory or file for given ID. * * @param fileId File ID. * @param sum Summary object that will collect information. * @throws GridException If failed. */ private void summary0(GridUuid fileId, GridGgfsPathSummary sum) throws GridException { assert sum != null; GridGgfsFileInfo info = meta.info(fileId); if (info != null) { if (info.isDirectory()) { if (!ROOT_ID.equals(info.id())) sum.directoriesCount(sum.directoriesCount() + 1); for (GridGgfsListingEntry entry : info.listing().values()) summary0(entry.fileId(), sum); } else { sum.filesCount(sum.filesCount() + 1); sum.totalLength(sum.totalLength() + info.length()); } } }
/** * Set lock on file info. * * @param info File info. * @return New file info with lock set. * @throws GridException In case lock is already set on that file. */ public GridGgfsFileInfo lockInfo(GridGgfsFileInfo info) throws GridException { if (busyLock.enterBusy()) { try { assert info != null; if (info.lockId() != null) throw new GridException("Failed to lock file (file is being concurrently written) [fileId=" + info.id() + ", lockId=" + info.lockId() + ']'); return new GridGgfsFileInfo(info, GridUuid.randomUuid(), info.modificationTime()); } finally { busyLock.leaveBusy(); } } else throw new IllegalStateException("Failed to get lock info because Grid is stopping: " + info); }
/** * Gets initial affinity range. This range will have 0 length and will start from first * non-occupied file block. * * @param fileInfo File info to build initial range for. * @return Affinity range. */ private GridGgfsFileAffinityRange initialStreamRange(GridGgfsFileInfo fileInfo) { if (!ggfsCtx.configuration().isFragmentizerEnabled()) return null; if (!Boolean.parseBoolean(fileInfo.properties().get(GridGgfs.PROP_PREFER_LOCAL_WRITES))) return null; int blockSize = fileInfo.blockSize(); // Find first non-occupied block offset. long off = ((fileInfo.length() + blockSize - 1) / blockSize) * blockSize; // Need to get last affinity key and reuse it if we are on the same node. long lastBlockOff = off - fileInfo.blockSize(); if (lastBlockOff < 0) lastBlockOff = 0; GridGgfsFileMap map = fileInfo.fileMap(); GridUuid prevAffKey = map == null ? null : map.affinityKey(lastBlockOff, false); GridUuid affKey = data.nextAffinityKey(prevAffKey); return affKey == null ? null : new GridGgfsFileAffinityRange(off, off, affKey); }
@Override public GridGgfsSecondaryOutputStreamDescriptor onSuccess(Map<GridGgfsPath, GridGgfsFileInfo> infos) throws Exception { GridGgfsFileInfo info = infos.get(path); if (info.isDirectory()) throw new GridGgfsException("Failed to open output stream to the file in the " + "secondary file system because the path points to a directory: " + path); out = fs.append(path, bufSize, false, null); // Synchronize file ending. long len = info.length(); int blockSize = info.blockSize(); int remainder = (int)(len % blockSize); if (remainder > 0) { int blockIdx = (int)(len / blockSize); GridGgfsReader reader = fs.open(path, bufSize); try { ggfsCtx.data().dataBlock(info, path, blockIdx, reader).get(); } finally { reader.close(); } } // Set lock and return. info = lockInfo(info); metaCache.putx(info.id(), info); return new GridGgfsSecondaryOutputStreamDescriptor(infos.get(path.parent()).id(), info, out); }
for (long i = 0; i < fileInfo.blocksCount(); i++) { GridGgfsBlockKey key = new GridGgfsBlockKey(fileInfo.id(), fileInfo.affinityKey(), fileInfo.evictExclude(), i);
return null; assert info.isDirectory(); Map<String, GridGgfsListingEntry> listing = info.listing(); if (!exclude.contains(fileInfo.id()) && fileInfo.fileMap() != null && !fileInfo.fileMap().ranges().isEmpty()) return fileInfo;
GridGgfsListingEntry entry = parentInfo.listing().get(fileName); assert parentInfo.isDirectory(); if (!rmvLocked && fileInfo.lockId() != null) throw new GridGgfsException("Failed to remove file (file is opened for writing) [fileName=" + fileName + ", fileId=" + fileId + ", lockId=" + fileInfo.lockId() + ']'); if (fileInfo.isDirectory()) { Map<String, GridGgfsListingEntry> listing = fileInfo.listing(); GridGgfsListingEntry listingEntry = parentInfo.listing().get(fileName); return GridGgfsFileInfo.builder(fileInfo).path(path).build();
/** * Tries to remove blocks affected by fragmentizer. If {@code cleanNonColocated} is {@code true}, will remove * non-colocated blocks as well. * * @param fileInfo File info to clean up. * @param range Range to clean up. * @param cleanNonColocated {@code True} if all blocks should be cleaned. */ public void cleanBlocks(GridGgfsFileInfo fileInfo, GridGgfsFileAffinityRange range, boolean cleanNonColocated) { long startIdx = range.startOffset() / fileInfo.blockSize(); long endIdx = range.endOffset() / fileInfo.blockSize(); if (log.isDebugEnabled()) log.debug("Cleaning blocks [fileInfo=" + fileInfo + ", range=" + range + ", cleanNonColocated=" + cleanNonColocated + ", startIdx=" + startIdx + ", endIdx=" + endIdx + ']'); try { try (GridDataLoader<GridGgfsBlockKey, byte[]> ldr = dataLoader()) { for (long idx = startIdx; idx <= endIdx; idx++) { ldr.removeData(new GridGgfsBlockKey(fileInfo.id(), range.affinityKey(), fileInfo.evictExclude(), idx)); if (cleanNonColocated) ldr.removeData(new GridGgfsBlockKey(fileInfo.id(), null, fileInfo.evictExclude(), idx)); } } } catch (GridException e) { log.error("Failed to clean up file range [fileInfo=" + fileInfo + ", range=" + range + ']', e); } }
/** {@inheritDoc} */ @Override public GridGgfsFileInfo apply(GridGgfsFileInfo oldInfo) { GridGgfsFileMap oldMap = oldInfo.fileMap(); GridGgfsFileMap newMap = new GridGgfsFileMap(oldMap); newMap.addRange(range); // Update file length. GridGgfsFileInfo updated = new GridGgfsFileInfo(oldInfo, oldInfo.length() + space); updated.fileMap(newMap); return updated; }
/** {@inheritDoc} */ @Override @Nullable public GridGgfsFileInfo apply(GridGgfsFileInfo fileInfo) { assert fileInfo != null : "File info not found for the child: " + entry.fileId(); assert fileInfo.isDirectory(); Map<String, GridGgfsListingEntry> listing = new HashMap<>(fileInfo.listing().size() + (rmv ? 0 : 1)); listing.putAll(fileInfo.listing()); if (rmv) { GridGgfsListingEntry oldEntry = listing.get(fileName); if (oldEntry == null || !oldEntry.fileId().equals(entry.fileId())) throw new GridRuntimeException("Directory listing doesn't contain expected file" + " [listing=" + listing + ", fileName=" + fileName + ", entry=" + entry + ']'); // Modify listing in-place. listing.remove(fileName); } else { // Modify listing in-place. GridGgfsListingEntry oldEntry = listing.put(fileName, entry); if (oldEntry != null && !oldEntry.fileId().equals(entry.fileId())) throw new GridRuntimeException("Directory listing contains unexpected file" + " [listing=" + listing + ", fileName=" + fileName + ", entry=" + entry + ", oldEntry=" + oldEntry + ']'); } return new GridGgfsFileInfo(listing, fileInfo); }
throw new GridGgfsFileNotFoundException("Failed to lock parent directory (not found): " + parentId); if (!parentInfo.isDirectory()) throw new GridGgfsInvalidPathException("Parent file is not a directory: " + parentInfo); Map<String, GridGgfsListingEntry> parentListing = parentInfo.listing(); return entry.fileId(); GridUuid fileId = newFileInfo.id();
if (info.isDirectory()) { deleteDirectory(TRASH_ID, id); assert info.isFile(); if (info.path() != null) evts.record(new GridGgfsEvent(info.path(), ggfsCtx.kernalContext().discovery().localNode(), EVT_GGFS_FILE_PURGED)); else
for (int i = 1; i <= prefetchBlocks; i++) { if (fileInfo.blockSize() * (i + blockIdx) >= fileInfo.length()) break; else if (locCache.get(blockIdx + i) == null) "[path=" + path + ", blockIdx=" + blockIdx + ']'); int blockSize = fileInfo.blockSize(); if (blockIdx == fileInfo.blocksCount() - 1) blockSize = (int)(fileInfo.length() % blockSize); throw new IOException("Inconsistent file's data block (incorrectly written?)" + " [path=" + path + ", blockIdx=" + blockIdx + ", blockSize=" + bytes.length + ", expectedBlockSize=" + blockSize + ", fileBlockSize=" + fileInfo.blockSize() + ", fileLen=" + fileInfo.length() + ']');