/** {@inheritDoc} */ @Override public void onKernalStop(boolean cancel) { for (GridGgfsContext ggfsCtx : ggfsCache.values()) { if (log.isDebugEnabled()) log.debug("Stopping ggfs: " + ggfsCtx.configuration().getName()); List<GridGgfsManager> mgrs = ggfsCtx.managers(); for (ListIterator<GridGgfsManager> it = mgrs.listIterator(mgrs.size()); it.hasPrevious();) { GridGgfsManager mgr = it.previous(); mgr.onKernalStop(cancel); } } if (log.isDebugEnabled()) log.debug("Finished executing GGFS processor onKernalStop() callback."); }
/** {@inheritDoc} */ @Override public void onKernalStop(boolean cancel) { for (GridGgfsContext ggfsCtx : ggfsCache.values()) { if (log.isDebugEnabled()) log.debug("Stopping ggfs: " + ggfsCtx.configuration().getName()); List<GridGgfsManager> mgrs = ggfsCtx.managers(); for (ListIterator<GridGgfsManager> it = mgrs.listIterator(mgrs.size()); it.hasPrevious();) { GridGgfsManager mgr = it.previous(); mgr.onKernalStop(cancel); } } if (log.isDebugEnabled()) log.debug("Finished executing GGFS processor onKernalStop() callback."); }
/** {@inheritDoc} */ @Override public void stop(boolean cancel) { // Stop GGFS instances. for (GridGgfsContext ggfsCtx : ggfsCache.values()) { if (log.isDebugEnabled()) log.debug("Stopping ggfs: " + ggfsCtx.configuration().getName()); List<GridGgfsManager> mgrs = ggfsCtx.managers(); for (ListIterator<GridGgfsManager> it = mgrs.listIterator(mgrs.size()); it.hasPrevious();) { GridGgfsManager mgr = it.previous(); mgr.stop(cancel); } ggfsCtx.ggfs().stop(); } ggfsCache.clear(); if (log.isDebugEnabled()) log.debug("GGFS processor stopped."); }
/** {@inheritDoc} */ @Override public void stop(boolean cancel) { // Stop GGFS instances. for (GridGgfsContext ggfsCtx : ggfsCache.values()) { if (log.isDebugEnabled()) log.debug("Stopping ggfs: " + ggfsCtx.configuration().getName()); List<GridGgfsManager> mgrs = ggfsCtx.managers(); for (ListIterator<GridGgfsManager> it = mgrs.listIterator(mgrs.size()); it.hasPrevious();) { GridGgfsManager mgr = it.previous(); mgr.stop(cancel); } ggfsCtx.ggfs().stop(); } ggfsCache.clear(); if (log.isDebugEnabled()) log.debug("GGFS processor stopped."); }
/** {@inheritDoc} */ @Override protected void start0() throws GridException { GridGgfsConfiguration ggfsCfg = ggfsCtx.configuration(); Map<String,String> cfg = ggfsCfg.getIpcEndpointConfiguration(); if (F.isEmpty(cfg)) { // Set default configuration. cfg = new HashMap<>(); cfg.put("type", U.isWindows() ? "tcp" : "shmem"); cfg.put("port", String.valueOf(DFLT_IPC_PORT)); } if (ggfsCfg.isIpcEndpointEnabled()) bind(cfg, /*management*/false); if (ggfsCfg.getManagementPort() >= 0) { cfg = new HashMap<>(); cfg.put("type", "tcp"); cfg.put("port", String.valueOf(ggfsCfg.getManagementPort())); bind(cfg, /*management*/true); } if (bindWorker != null) new GridThread(bindWorker).start(); }
/** {@inheritDoc} */ @Override protected void start0() throws GridException { GridGgfsConfiguration ggfsCfg = ggfsCtx.configuration(); Map<String,String> cfg = ggfsCfg.getIpcEndpointConfiguration(); if (F.isEmpty(cfg)) { // Set default configuration. cfg = new HashMap<>(); cfg.put("type", U.isWindows() ? "tcp" : "shmem"); cfg.put("port", String.valueOf(DFLT_IPC_PORT)); } if (ggfsCfg.isIpcEndpointEnabled()) bind(cfg, /*management*/false); if (ggfsCfg.getManagementPort() >= 0) { cfg = new HashMap<>(); cfg.put("type", "tcp"); cfg.put("port", String.valueOf(ggfsCfg.getManagementPort())); bind(cfg, /*management*/true); } if (bindWorker != null) new GridThread(bindWorker).start(); }
/** * Constructs GGFS IPC handler. */ GridGgfsIpcHandler(GridGgfsContext ggfsCtx, boolean mgmt) { assert ggfsCtx != null; this.mgmt = mgmt; ctx = ggfsCtx.kernalContext(); ggfs = ggfsCtx.ggfs(); // Keep buffer size multiple of block size so no extra byte array copies is performed. bufSize = ggfsCtx.configuration().getBlockSize() * 2; log = ctx.log(GridGgfsIpcHandler.class); }
/** * Constructs GGFS IPC handler. */ GridGgfsIpcHandler(GridGgfsContext ggfsCtx, boolean mgmt) { assert ggfsCtx != null; this.mgmt = mgmt; ctx = ggfsCtx.kernalContext(); ggfs = ggfsCtx.ggfs(); // Keep buffer size multiple of block size so no extra byte array copies is performed. bufSize = ggfsCtx.configuration().getBlockSize() * 2; log = ctx.log(GridGgfsIpcHandler.class); }
/** {@inheritDoc} */ @Override protected void onKernalStart0() throws GridException { if (ggfsCtx.configuration().isFragmentizerEnabled()) { // Check at startup if this node is a fragmentizer coordinator. GridDiscoveryEvent locJoinEvt = ggfsCtx.kernalContext().discovery().localJoinEvent(); checkLaunchCoordinator(locJoinEvt); } }
/** {@inheritDoc} */ @Override protected void onKernalStart0() throws GridException { if (ggfsCtx.configuration().isFragmentizerEnabled()) { // Check at startup if this node is a fragmentizer coordinator. GridDiscoveryEvent locJoinEvt = ggfsCtx.kernalContext().discovery().localJoinEvent(); checkLaunchCoordinator(locJoinEvt); } }
while (fragmentingFiles.size() < ggfsCtx.configuration().getFragmentizerConcurrentFiles()) { GridGgfsFileInfo fileInfo = fileForFragmentizer(fragmentingFiles.keySet());
/** * Gets initial affinity range. This range will have 0 length and will start from first * non-occupied file block. * * @param fileInfo File info to build initial range for. * @return Affinity range. */ private GridGgfsFileAffinityRange initialStreamRange(GridGgfsFileInfo fileInfo) { if (!ggfsCtx.configuration().isFragmentizerEnabled()) return null; if (!Boolean.parseBoolean(fileInfo.properties().get(GridGgfs.PROP_PREFER_LOCAL_WRITES))) return null; int blockSize = fileInfo.blockSize(); // Find first non-occupied block offset. long off = ((fileInfo.length() + blockSize - 1) / blockSize) * blockSize; // Need to get last affinity key and reuse it if we are on the same node. long lastBlockOff = off - fileInfo.blockSize(); if (lastBlockOff < 0) lastBlockOff = 0; GridGgfsFileMap map = fileInfo.fileMap(); GridUuid prevAffKey = map == null ? null : map.affinityKey(lastBlockOff, false); GridUuid affKey = data.nextAffinityKey(prevAffKey); return affKey == null ? null : new GridGgfsFileAffinityRange(off, off, affKey); }
/** * Gets initial affinity range. This range will have 0 length and will start from first * non-occupied file block. * * @param fileInfo File info to build initial range for. * @return Affinity range. */ private GridGgfsFileAffinityRange initialStreamRange(GridGgfsFileInfo fileInfo) { if (!ggfsCtx.configuration().isFragmentizerEnabled()) return null; if (!Boolean.parseBoolean(fileInfo.properties().get(GridGgfs.PROP_PREFER_LOCAL_WRITES))) return null; int blockSize = fileInfo.blockSize(); // Find first non-occupied block offset. long off = ((fileInfo.length() + blockSize - 1) / blockSize) * blockSize; // Need to get last affinity key and reuse it if we are on the same node. long lastBlockOff = off - fileInfo.blockSize(); if (lastBlockOff < 0) lastBlockOff = 0; GridGgfsFileMap map = fileInfo.fileMap(); GridUuid prevAffKey = map == null ? null : map.affinityKey(lastBlockOff, false); GridUuid affKey = data.nextAffinityKey(prevAffKey); return affKey == null ? null : new GridGgfsFileAffinityRange(off, off, affKey); }
/** {@inheritDoc} */ @Override protected void start0() throws GridException { if (!ggfsCtx.configuration().isFragmentizerEnabled()) return; // We care only about node leave and fail events. ggfsCtx.kernalContext().event().addLocalEventListener(new GridLocalEventListener() { @Override public void onEvent(GridEvent evt) { assert evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED; GridDiscoveryEvent discoEvt = (GridDiscoveryEvent)evt; checkLaunchCoordinator(discoEvt); } }, EVT_NODE_LEFT, EVT_NODE_FAILED); fragmentizerWorker = new FragmentizerWorker(); String ggfsName = ggfsCtx.configuration().getName(); topic = F.isEmpty(ggfsName) ? TOPIC_GGFS : TOPIC_GGFS.topic(ggfsName); ggfsCtx.kernalContext().io().addMessageListener(topic, fragmentizerWorker); new GridThread(fragmentizerWorker).start(); }
/** {@inheritDoc} */ @Override protected void start0() throws GridException { if (!ggfsCtx.configuration().isFragmentizerEnabled()) return; // We care only about node leave and fail events. ggfsCtx.kernalContext().event().addLocalEventListener(new GridLocalEventListener() { @Override public void onEvent(GridEvent evt) { assert evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED; GridDiscoveryEvent discoEvt = (GridDiscoveryEvent)evt; checkLaunchCoordinator(discoEvt); } }, EVT_NODE_LEFT, EVT_NODE_FAILED); fragmentizerWorker = new FragmentizerWorker(); String ggfsName = ggfsCtx.configuration().getName(); topic = F.isEmpty(ggfsName) ? TOPIC_GGFS : TOPIC_GGFS.topic(ggfsName); ggfsCtx.kernalContext().io().addMessageListener(topic, fragmentizerWorker); new GridThread(fragmentizerWorker).start(); }
/** * Creates new instance of explicit data loader. * * @return New instance of data loader. */ private GridDataLoader<GridGgfsBlockKey, byte[]> dataLoader() { GridDataLoader<GridGgfsBlockKey, byte[]> ldr = ggfsCtx.kernalContext().<GridGgfsBlockKey, byte[]>dataLoad().dataLoader(dataCachePrj.name()); GridGgfsConfiguration cfg = ggfsCtx.configuration(); if (cfg.getPerNodeBatchSize() > 0) ldr.perNodeBufferSize(cfg.getPerNodeBatchSize()); if (cfg.getPerNodeParallelBatchCount() > 0) ldr.perNodeParallelLoadOperations(cfg.getPerNodeParallelBatchCount()); ldr.updater(GridDataLoadCacheUpdaters.<GridGgfsBlockKey, byte[]>batchedSorted()); return ldr; }
/** * Creates new instance of explicit data loader. * * @return New instance of data loader. */ private GridDataLoader<GridGgfsBlockKey, byte[]> dataLoader() { GridDataLoader<GridGgfsBlockKey, byte[]> ldr = ggfsCtx.kernalContext().<GridGgfsBlockKey, byte[]>dataLoad().dataLoader(dataCachePrj.name()); GridGgfsConfiguration cfg = ggfsCtx.configuration(); if (cfg.getPerNodeBatchSize() > 0) ldr.perNodeBufferSize(cfg.getPerNodeBatchSize()); if (cfg.getPerNodeParallelBatchCount() > 0) ldr.perNodeParallelLoadOperations(cfg.getPerNodeParallelBatchCount()); ldr.updater(GridDataLoadCacheUpdaters.<GridGgfsBlockKey, byte[]>batchedSorted()); return ldr; }
/** {@inheritDoc} */ @Override protected void start0() throws GridException { cfg = ggfsCtx.configuration(); metaCache = ggfsCtx.kernalContext().cache().cache(cfg.getMetaCacheName()); if (metaCache.configuration().getAtomicityMode() != TRANSACTIONAL) throw new GridException("Meta cache should be transactional: " + cfg.getMetaCacheName()); evts = ggfsCtx.kernalContext().event(); sampling = new GridGgfsSamplingKey(cfg.getName()); assert metaCache != null; id2InfoPrj = (GridCacheProjectionEx<GridUuid, GridGgfsFileInfo>)metaCache.<GridUuid, GridGgfsFileInfo>cache(); log = ggfsCtx.kernalContext().log(GridGgfsMetaManager.class); }
/** {@inheritDoc} */ @Override protected void start0() throws GridException { cfg = ggfsCtx.configuration(); metaCache = ggfsCtx.kernalContext().cache().cache(cfg.getMetaCacheName()); metaCacheStartFut = ggfsCtx.kernalContext().cache().internalCache(cfg.getMetaCacheName()).preloader() .startFuture(); if (metaCache.configuration().getAtomicityMode() != TRANSACTIONAL) throw new GridException("Meta cache should be transactional: " + cfg.getMetaCacheName()); evts = ggfsCtx.kernalContext().event(); sampling = new GridGgfsSamplingKey(cfg.getName()); assert metaCache != null; id2InfoPrj = (GridCacheProjectionEx<GridUuid, GridGgfsFileInfo>)metaCache.<GridUuid, GridGgfsFileInfo>cache(); log = ggfsCtx.kernalContext().log(GridGgfsMetaManager.class); }
ggfsCtx.configuration().getFragmentizerLocalWritesRatio()) {