/** * Constructor. */ private BindWorker() { super(ggfsCtx.kernalContext().gridName(), "bind-worker", ggfsCtx.kernalContext().log()); }
/** * Creates accept worker. */ protected AcceptWorker() { super(ggfsCtx.kernalContext().gridName(), "ggfs-accept-worker", log); }
/** * Constructor. */ protected FragmentizerWorker() { super(ggfsCtx.kernalContext().gridName(), "fragmentizer-worker", ggfsCtx.kernalContext().log()); }
/** * Constructor. */ private BindWorker() { super(ggfsCtx.kernalContext().gridName(), "bind-worker", ggfsCtx.kernalContext().log()); }
/** * Creates accept worker. */ protected AcceptWorker() { super(ggfsCtx.kernalContext().gridName(), "ggfs-accept-worker", log); }
/** * Constructor. */ protected FragmentizerCoordinator() { super(ggfsCtx.kernalContext().gridName(), "fragmentizer-coordinator", ggfsCtx.kernalContext().log()); ggfsCtx.kernalContext().event().addLocalEventListener(this, EVT_NODE_LEFT, EVT_NODE_FAILED); ggfsCtx.kernalContext().io().addMessageListener(topic, this); }
/** * Gracefully stops worker by adding STOP_INFO to queue. */ private void stop() { delReqs.offer(F.t(new GridFutureAdapter<>(ggfsCtx.kernalContext()), stopInfo)); }
/** * Gracefully stops worker by adding STOP_INFO to queue. */ private void stop() { delReqs.offer(F.t(new GridFutureAdapter<>(ggfsCtx.kernalContext()), stopInfo)); }
/** * @param nodeId Node ID. * @param topic Topic. * @param msg Message. * @param plc Policy. * @throws GridException In case of error. */ public void send(UUID nodeId, Object topic, GridGgfsCommunicationMessage msg, GridIoPolicy plc) throws GridException { if (!kernalContext().localNodeId().equals(nodeId)) msg.prepareMarshal(kernalContext().config().getMarshaller()); kernalContext().io().send(nodeId, topic, msg, plc); }
/** * @return Local node. */ private GridNode localNode() { if (locNode == null) locNode = ggfsCtx.kernalContext().discovery().localNode(); return locNode; }
/** * @return Local node. */ private GridNode localNode() { if (locNode == null) locNode = ggfsCtx.kernalContext().discovery().localNode(); return locNode; }
/** {@inheritDoc} */ @Override public <T, R> GridFuture<R> execute(GridGgfsTask<T, R> task, @Nullable GridGgfsRecordResolver rslvr, Collection<GridGgfsPath> paths, boolean skipNonExistentFiles, long maxRangeLen, @Nullable T arg) { return ggfsCtx.kernalContext().task().execute(task, new GridGgfsTaskArgsImpl<>(cfg.getName(), paths, rslvr, skipNonExistentFiles, maxRangeLen, arg)); }
/** {@inheritDoc} */ @Override public <T, R> GridFuture<R> execute(Class<? extends GridGgfsTask<T, R>> taskCls, @Nullable GridGgfsRecordResolver rslvr, Collection<GridGgfsPath> paths, boolean skipNonExistentFiles, long maxRangeSize, @Nullable T arg) { return ggfsCtx.kernalContext().task().execute((Class<GridGgfsTask<T, R>>)taskCls, new GridGgfsTaskArgsImpl<>(cfg.getName(), paths, rslvr, skipNonExistentFiles, maxRangeSize, arg)); }
/** {@inheritDoc} */ @Override public <T, R> GridFuture<R> execute(Class<? extends GridGgfsTask<T, R>> taskCls, @Nullable GridGgfsRecordResolver rslvr, Collection<GridGgfsPath> paths, boolean skipNonExistentFiles, long maxRangeSize, @Nullable T arg) { return ggfsCtx.kernalContext().task().execute((Class<GridGgfsTask<T, R>>)taskCls, new GridGgfsTaskArgsImpl<>(cfg.getName(), paths, rslvr, skipNonExistentFiles, maxRangeSize, arg)); }
/** * Creates client worker. * * @param idx Worker index for worker thread naming. * @param endpoint Connected client endpoint. * @throws GridException If endpoint output stream cannot be obtained. */ protected ClientWorker(GridIpcEndpoint endpoint, int idx) throws GridException { super(ggfsCtx.kernalContext().gridName(), "ggfs-client-worker-" + idx, log); this.endpoint = endpoint; ses = new GridGgfsClientSession(); out = new GridGgfsDataOutputStream(new BufferedOutputStream(endpoint.outputStream())); }
/** * @param e Error. */ private void onLocalError(GridException e) { if (e instanceof GridGgfsOutOfSpaceException) onDone(new GridException("Failed to write data (not enough space on node): " + ggfsCtx.kernalContext().localNodeId(), e)); else onDone(new GridException( "Failed to wait for write completion (write failed on node): " + ggfsCtx.kernalContext().localNodeId(), e)); }
/** * Constructs GGFS IPC handler. */ GridGgfsIpcHandler(GridGgfsContext ggfsCtx, boolean mgmt) { assert ggfsCtx != null; this.mgmt = mgmt; ctx = ggfsCtx.kernalContext(); ggfs = ggfsCtx.ggfs(); // Keep buffer size multiple of block size so no extra byte array copies is performed. bufSize = ggfsCtx.configuration().getBlockSize() * 2; log = ctx.log(GridGgfsIpcHandler.class); }
/** {@inheritDoc} */ @Override protected void start0() throws GridException { cfg = ggfsCtx.configuration(); metaCache = ggfsCtx.kernalContext().cache().cache(cfg.getMetaCacheName()); if (metaCache.configuration().getAtomicityMode() != TRANSACTIONAL) throw new GridException("Meta cache should be transactional: " + cfg.getMetaCacheName()); evts = ggfsCtx.kernalContext().event(); sampling = new GridGgfsSamplingKey(cfg.getName()); assert metaCache != null; id2InfoPrj = (GridCacheProjectionEx<GridUuid, GridGgfsFileInfo>)metaCache.<GridUuid, GridGgfsFileInfo>cache(); log = ggfsCtx.kernalContext().log(GridGgfsMetaManager.class); }
/** {@inheritDoc} */ @Override protected void onKernalStart0() throws GridException { if (ggfsCtx.configuration().isFragmentizerEnabled()) { // Check at startup if this node is a fragmentizer coordinator. GridDiscoveryEvent locJoinEvt = ggfsCtx.kernalContext().discovery().localJoinEvent(); checkLaunchCoordinator(locJoinEvt); } }
/** {@inheritDoc} */ @Override protected void onKernalStart0() throws GridException { if (ggfsCtx.configuration().isFragmentizerEnabled()) { // Check at startup if this node is a fragmentizer coordinator. GridDiscoveryEvent locJoinEvt = ggfsCtx.kernalContext().discovery().localJoinEvent(); checkLaunchCoordinator(locJoinEvt); } }