/** * Constructor. */ protected FragmentizerWorker() { super(igfsCtx.kernalContext().igniteInstanceName(), "fragmentizer-worker", igfsCtx.kernalContext().log(IgfsFragmentizerManager.class)); }
/** * Constructor. */ private BindWorker() { super(igfsCtx.kernalContext().igniteInstanceName(), "bind-worker", igfsCtx.kernalContext().log(IgfsServerManager.class)); }
/** * Creates accept worker. */ protected AcceptWorker() { super(igfsCtx.kernalContext().igniteInstanceName(), "igfs-accept-worker", IgfsServer.this.log); }
/** * Constructor. */ protected FragmentizerCoordinator() { super(igfsCtx.kernalContext().igniteInstanceName(), "fragmentizer-coordinator", igfsCtx.kernalContext().log(IgfsFragmentizerManager.class)); igfsCtx.kernalContext().event().addLocalEventListener(this, EVT_NODE_LEFT, EVT_NODE_FAILED); igfsCtx.kernalContext().io().addMessageListener(topic, this); }
/** * @param nodeId Node ID. * @param topic Topic. * @param msg Message. * @param plc Policy. * @throws IgniteCheckedException In case of error. */ public void send(UUID nodeId, Object topic, IgfsCommunicationMessage msg, byte plc) throws IgniteCheckedException { if (!kernalContext().localNodeId().equals(nodeId)) msg.prepareMarshal(kernalContext().config().getMarshaller()); if (topic instanceof GridTopic) kernalContext().io().sendToGridTopic(nodeId, (GridTopic)topic, msg, plc); else kernalContext().io().sendToCustomTopic(nodeId, topic, msg, plc); }
/** * Constructs igfs server manager. * @param igfsCtx IGFS context. * @param endpointCfg Endpoint configuration to start. * @param mgmt Management flag - if true, server is intended to be started for Visor. */ public IgfsServer(IgfsContext igfsCtx, IgfsIpcEndpointConfiguration endpointCfg, boolean mgmt) { assert igfsCtx != null; assert endpointCfg != null; this.endpointCfg = endpointCfg; this.igfsCtx = igfsCtx; this.mgmt = mgmt; log = igfsCtx.kernalContext().log(IgfsServer.class); marsh = new IgfsMarshaller(); }
/** * Creates client worker. * * @param idx Worker index for worker thread naming. * @param endpoint Connected client endpoint. * @throws IgniteCheckedException If endpoint output stream cannot be obtained. */ protected ClientWorker(IpcEndpoint endpoint, int idx) throws IgniteCheckedException { super(igfsCtx.kernalContext().igniteInstanceName(), "igfs-client-worker-" + idx, IgfsServer.this.log); this.endpoint = endpoint; ses = new IgfsClientSession(); out = new IgfsDataOutputStream(new BufferedOutputStream(endpoint.outputStream())); }
/** {@inheritDoc} */ @Override protected void start0() throws IgniteCheckedException { metaCacheStartLatch = new CountDownLatch(1); cfg = igfsCtx.configuration(); evts = igfsCtx.kernalContext().event(); sampling = new IgfsSamplingKey(cfg.getName()); log = igfsCtx.kernalContext().log(IgfsMetaManager.class); metaCacheName = cfg.getMetaCacheConfiguration().getName(); }
/** * Executes IGFS task with overridden maximum range length (see * {@link org.apache.ignite.configuration.FileSystemConfiguration#getMaximumTaskRangeLength()} for more information). * * @param task Task to execute. * @param rslvr Optional resolver to control split boundaries. * @param paths Collection of paths to be processed within this task. * @param skipNonExistentFiles Whether to skip non existent files. If set to {@code true} non-existent files will * be ignored. Otherwise an exception will be thrown. * @param maxRangeLen Optional maximum range length. If {@code 0}, then by default all consecutive * IGFS blocks will be included. * @param arg Optional task argument. * @return Execution future. */ <T, R> IgniteInternalFuture<R> executeAsync0(IgfsTask<T, R> task, @Nullable IgfsRecordResolver rslvr, Collection<IgfsPath> paths, boolean skipNonExistentFiles, long maxRangeLen, @Nullable T arg) { return igfsCtx.kernalContext().task().execute(task, new IgfsTaskArgsImpl<>(cfg.getName(), paths, rslvr, skipNonExistentFiles, maxRangeLen, arg)); }
/** * Constructor. * * @param igfsCtx IGFS context. */ IgfsDeleteWorker(IgfsContext igfsCtx) { super("igfs-delete-worker%" + igfsCtx.igfs().name() + "%" + igfsCtx.kernalContext().localNodeId() + "%"); this.igfsCtx = igfsCtx; meta = igfsCtx.meta(); data = igfsCtx.data(); assert meta != null; assert data != null; log = igfsCtx.kernalContext().log(IgfsDeleteWorker.class); }
/** {@inheritDoc} */ @Override public void close(boolean force) { notifyListenersOnClose(); String gridName = igfs.context().kernalContext().grid().name(); synchronized (REF_CTR_MUX) { Integer cnt = REF_CTRS.get(gridName); if (cnt != null) { // The node was created by this HadoopIgfsWrapper. // The node must be stopped when there are not opened filesystems that are used one. if (cnt > 1) REF_CTRS.put(gridName, cnt - 1); else { REF_CTRS.remove(gridName); G.stop(gridName, false); } } } }
/** * Called when IGFS processor is started. * * @param igfsCtx IGFS context. */ public void start(IgfsContext igfsCtx) throws IgniteCheckedException { if (!starting.compareAndSet(false, true)) assert false : "Method start is called more than once for manager: " + this; assert igfsCtx != null; this.igfsCtx = igfsCtx; log = igfsCtx.kernalContext().log(getClass()); start0(); if (log != null && log.isDebugEnabled()) log.debug(startInfo()); }
/** {@inheritDoc} */ @Override protected void onKernalStart0() throws IgniteCheckedException { if (igfsCtx.configuration().isFragmentizerEnabled()) { // Check at startup if this node is a fragmentizer coordinator. DiscoveryEvent locJoinEvt = igfsCtx.kernalContext().discovery().localJoinEvent(); checkLaunchCoordinator(locJoinEvt); } }
/** * Get compute facade for client tasks. * * @return Compute facade. */ private IgniteCompute clientCompute() { assert client; IgniteCompute cliCompute0 = cliCompute; if (cliCompute0 == null) { IgniteEx ignite = igfsCtx.kernalContext().grid(); ClusterGroup cluster = ignite.cluster().forIgfsMetadataDataNodes(cfg.getName(), metaCacheName); cliCompute0 = ignite.compute(cluster); cliCompute = cliCompute0; } assert cliCompute0 != null; return cliCompute0; }
/** * Updates IGFS metrics when the stream is closed. */ protected void updateMetricsOnClose() { IgfsLocalMetrics metrics = igfsCtx.metrics(); metrics.addWrittenBytesTime(bytes, time); metrics.decrementFilesOpenedForWrite(); GridEventStorageManager evts = igfsCtx.kernalContext().event(); if (evts.isRecordable(EVT_IGFS_FILE_CLOSED_WRITE)) evts.record(new IgfsEvent(path, igfsCtx.localNode(), EVT_IGFS_FILE_CLOSED_WRITE, bytes)); }
@Override public IgfsStatus call() throws Exception { IgniteBiTuple<Long, Long> space = igfsCtx.kernalContext().grid().compute().execute( new IgfsGlobalSpaceTask(name()), null); return new IgfsStatus(space.get1(), space.get2()); } });
/** {@inheritDoc} */ @Override protected void start0() throws IgniteCheckedException { if (!igfsCtx.configuration().isFragmentizerEnabled()) return; // We care only about node leave and fail events. igfsCtx.kernalContext().event().addLocalEventListener(new GridLocalEventListener() { @Override public void onEvent(Event evt) { assert evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED; DiscoveryEvent discoEvt = (DiscoveryEvent)evt; checkLaunchCoordinator(discoEvt); } }, EVT_NODE_LEFT, EVT_NODE_FAILED); fragmentizerWorker = new FragmentizerWorker(); String igfsName = igfsCtx.configuration().getName(); topic = F.isEmpty(igfsName) ? TOPIC_IGFS : TOPIC_IGFS.topic(igfsName); igfsCtx.kernalContext().io().addMessageListener(topic, fragmentizerWorker); new IgniteThread(fragmentizerWorker).start(); }
/** * Gets meta cache. * * @param igfs The IGFS instance. * @return The data cache. */ protected static GridCacheAdapter<IgniteUuid, IgfsEntryInfo> getMetaCache(IgniteFileSystem igfs) { String dataCacheName = igfs.configuration().getMetaCacheConfiguration().getName(); IgniteEx igniteEx = ((IgfsEx)igfs).context().kernalContext().grid(); return ((IgniteKernal)igniteEx).internalCache(dataCacheName); }
/** * Gets the data cache instance for this IGFS instance. * * @param igfs The IGFS unstance. * @return The data cache. */ protected static GridCacheAdapter<IgfsBlockKey, byte[]> getDataCache(IgniteFileSystem igfs) { String dataCacheName = igfs.configuration().getDataCacheConfiguration().getName(); IgniteEx igniteEx = ((IgfsEx)igfs).context().kernalContext().grid(); return ((IgniteKernal)igniteEx).internalCache(dataCacheName); }
/** * Creates new instance of explicit data streamer. * * @return New instance of data streamer. */ private IgniteDataStreamer<IgfsBlockKey, byte[]> dataStreamer() { IgniteDataStreamer<IgfsBlockKey, byte[]> ldr = igfsCtx.kernalContext().<IgfsBlockKey, byte[]>dataStream().dataStreamer(dataCachePrj.name()); FileSystemConfiguration cfg = igfsCtx.configuration(); if (cfg.getPerNodeBatchSize() > 0) ldr.perNodeBufferSize(cfg.getPerNodeBatchSize()); if (cfg.getPerNodeParallelBatchCount() > 0) ldr.perNodeParallelOperations(cfg.getPerNodeParallelBatchCount()); ldr.receiver(DataStreamerCacheUpdaters.<IgfsBlockKey, byte[]>batchedSorted()); return ldr; }