/** * Gets next file for fragmentizer to be processed. * * @param exclude File IDs to exclude (the ones that are currently being processed). * @return File ID to process or {@code null} if there are no such files. * @throws GridException In case of error. */ @Nullable private GridGgfsFileInfo fileForFragmentizer(Collection<GridUuid> exclude) throws GridException { return fragmentizerEnabled ? ggfsCtx.meta().fileForFragmentizer(exclude) : null; }
/** * Gets next file for fragmentizer to be processed. * * @param exclude File IDs to exclude (the ones that are currently being processed). * @return File ID to process or {@code null} if there are no such files. * @throws GridException In case of error. */ @Nullable private GridGgfsFileInfo fileForFragmentizer(Collection<GridUuid> exclude) throws GridException { return fragmentizerEnabled ? ggfsCtx.meta().fileForFragmentizer(exclude) : null; }
/** * Constructs file output stream. * * @param ggfsCtx GGFS context. * @param path Path to stored file. * @param fileInfo File info to write binary data to. * @param prefetchBlocks Number of blocks to prefetch. * @param seqReadsBeforePrefetch Amount of sequential reads before prefetch is triggered. * @param secReader Optional secondary file system reader. * @param metrics Local GGFS metrics. */ GridGgfsInputStreamImpl(GridGgfsContext ggfsCtx, GridGgfsPath path, GridGgfsFileInfo fileInfo, int prefetchBlocks, int seqReadsBeforePrefetch, @Nullable GridGgfsReader secReader, GridGgfsLocalMetrics metrics) { assert ggfsCtx != null; assert path != null; assert fileInfo != null; assert metrics != null; this.path = path; this.fileInfo = fileInfo; this.prefetchBlocks = prefetchBlocks; this.seqReadsBeforePrefetch = seqReadsBeforePrefetch; this.secReader = secReader; this.metrics = metrics; meta = ggfsCtx.meta(); data = ggfsCtx.data(); log = ggfsCtx.kernalContext().log(GridGgfsInputStream.class); maxLocCacheSize = (prefetchBlocks > 0 ? prefetchBlocks : 1) * 3 / 2; locCache = new LinkedHashMap<>(maxLocCacheSize, 1.0f); pendingFuts = new GridConcurrentHashSet<>(prefetchBlocks > 0 ? prefetchBlocks : 1); }
/** * Constructs file output stream. * * @param ggfsCtx GGFS context. * @param path Path to stored file. * @param fileInfo File info to write binary data to. * @param prefetchBlocks Number of blocks to prefetch. * @param seqReadsBeforePrefetch Amount of sequential reads before prefetch is triggered. * @param secReader Optional secondary file system reader. * @param metrics Local GGFS metrics. */ GridGgfsInputStreamImpl(GridGgfsContext ggfsCtx, GridGgfsPath path, GridGgfsFileInfo fileInfo, int prefetchBlocks, int seqReadsBeforePrefetch, @Nullable GridGgfsReader secReader, GridGgfsLocalMetrics metrics) { assert ggfsCtx != null; assert path != null; assert fileInfo != null; assert metrics != null; this.path = path; this.fileInfo = fileInfo; this.prefetchBlocks = prefetchBlocks; this.seqReadsBeforePrefetch = seqReadsBeforePrefetch; this.secReader = secReader; this.metrics = metrics; meta = ggfsCtx.meta(); data = ggfsCtx.data(); log = ggfsCtx.kernalContext().log(GridGgfsInputStream.class); maxLocCacheSize = (prefetchBlocks > 0 ? prefetchBlocks : 1) * 3 / 2; locCache = new LinkedHashMap<>(maxLocCacheSize, 1.0f); pendingFuts = new GridConcurrentHashSet<>(prefetchBlocks > 0 ? prefetchBlocks : 1); }
log = ggfsCtx.kernalContext().log(GridGgfsImpl.class); evts = ggfsCtx.kernalContext().event(); meta = ggfsCtx.meta(); data = ggfsCtx.data(); secondaryFs = cfg.getSecondaryFileSystem();
log = ggfsCtx.kernalContext().log(GridGgfsImpl.class); evts = ggfsCtx.kernalContext().event(); meta = ggfsCtx.meta(); data = ggfsCtx.data(); secondaryFs = cfg.getSecondaryFileSystem();
meta = ggfsCtx.meta(); data = ggfsCtx.data();
meta = ggfsCtx.meta(); data = ggfsCtx.data();
/** * Constructor. * * @param ggfsCtx GGFS context. */ GridGgfsDeleteWorker(GridGgfsContext ggfsCtx) { super("ggfs-delete-worker%" + ggfsCtx.ggfs().name() + "%" + ggfsCtx.kernalContext().localNodeId() + "%"); this.ggfsCtx = ggfsCtx; meta = ggfsCtx.meta(); data = ggfsCtx.data(); evts = ggfsCtx.kernalContext().event(); String ggfsName = ggfsCtx.ggfs().name(); topic = F.isEmpty(ggfsName) ? TOPIC_GGFS : TOPIC_GGFS.topic(ggfsName); assert meta != null; assert data != null; log = ggfsCtx.kernalContext().log(GridGgfsDeleteWorker.class); }
/** * Constructor. * * @param ggfsCtx GGFS context. */ GridGgfsDeleteWorker(GridGgfsContext ggfsCtx) { super("ggfs-delete-worker%" + ggfsCtx.ggfs().name() + "%" + ggfsCtx.kernalContext().localNodeId() + "%"); assert ggfsCtx != null; this.ggfsCtx = ggfsCtx; meta = ggfsCtx.meta(); data = ggfsCtx.data(); evts = ggfsCtx.kernalContext().event(); String ggfsName = ggfsCtx.ggfs().name(); topic = F.isEmpty(ggfsName) ? TOPIC_GGFS : TOPIC_GGFS.topic(ggfsName); assert meta != null; assert data != null; log = ggfsCtx.kernalContext().log(GridGgfsDeleteWorker.class); }
GridUuid fileId = req.fileId(); GridGgfsFileInfo fileInfo = ggfsCtx.meta().info(fileId); case RANGE_STATUS_INITIAL: { updated = ggfsCtx.meta().updateInfo(fileId, updateRange(range, RANGE_STATUS_MOVING)); updated = ggfsCtx.meta().updateInfo(fileId, updateRange(range, RANGE_STATUS_MOVED)); updated = ggfsCtx.meta().updateInfo(fileId, deleteRange(range));
GridUuid fileId = req.fileId(); GridGgfsFileInfo fileInfo = ggfsCtx.meta().info(fileId); case RANGE_STATUS_INITIAL: { updated = ggfsCtx.meta().updateInfo(fileId, updateRange(range, RANGE_STATUS_MOVING)); updated = ggfsCtx.meta().updateInfo(fileId, updateRange(range, RANGE_STATUS_MOVED)); updated = ggfsCtx.meta().updateInfo(fileId, deleteRange(range));