/** Returns true if tree was produced by this node. * Note: chunkKey is key stored at this local node */ private boolean isLocalTree(byte treeProducerId) { assert _computeOOB : "Calling this method makes sense only for oobee"; int idx = H2O.SELF.index(); return idx == treeProducerId; }
public ResponseInfo(String redirect_url, long time, Status status) { this.h2o = H2O.NAME; this.node = H2O.SELF.toString(); this.redirect_url = redirect_url; this.time = time; this.status = status; } }
public static final H2ONode intern( H2Okey key ) { H2ONode h2o = INTERN.get(key); if( h2o != null ) return h2o; final int idx = UNIQUE.getAndIncrement(); h2o = new H2ONode(key,idx); H2ONode old = INTERN.putIfAbsent(key,h2o); if( old != null ) return old; synchronized(H2O.class) { while( idx >= IDX.length ) IDX = Arrays.copyOf(IDX,IDX.length<<1); IDX[idx] = h2o; } return h2o; } public static final H2ONode intern( InetAddress ip, int port ) { return intern(new H2Okey(ip,port)); }
@Override public void lcompute() { _infos = new NodeTaskInfo[H2O.CLOUD.size()]; TaskInfo [][] ts = new TaskInfo[H2O.CLOUD.size()][]; int i = 0; for (H2ONode n : H2O.CLOUD._memary) ts[i++] = n.currentTasksInfo(); RPC [] pendingRPCs = UDPTimeOutThread.pendingRPCs(); TaskInfo [] pending = new TaskInfo[pendingRPCs.length]; for(int j = 0; j < pendingRPCs.length; ++j) pending[j] = new TaskInfo(pendingRPCs[j].task(),pendingRPCs[j].taskNum(),pendingRPCs[j].target().index(), pendingRPCs[j].isDone()? task_status.DONE:task_status.CMP,pendingRPCs[j]._callCnt); _infos[H2O.SELF.index()] = new NodeTaskInfo(pending,ts); tryComplete(); } }
if( (counter+Math.abs(H2O.SELF.hashCode())) % 300 == 0) { hb._gflops = Linpack.run(hb._cpus_allowed); hb._membw = MemoryBandwidth.run(hb._cpus_allowed); int rpcs = 0; for( H2ONode h2o : cloud._memary ) rpcs += h2o.taskSize(); hb._rpcs = (char)rpcs; if( delta > SUSPECT ) {// We suspect this Node has taken a dirt nap if( !h2o._announcedLostContact ) { Paxos.print("hart: announce suspect node",cloud._memary,h2o.toString()); h2o._announcedLostContact = true; Paxos.print("hart: regained contact with node",cloud._memary,h2o.toString()); h2o._announcedLostContact = false;
RPCCall old = ab._h2o.has_task(task); assert !ab.hasTCP():"ERROR: got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi); // All the resends should be UDP only return; RPCCall rpc2 = ab._h2o.record_task(rpc); if( rpc2==null ) { // Atomically insert (to avoid double-work) if( (rpc._dt instanceof DRemoteTask || rpc._dt instanceof MRTask2) && rpc._dt.logVerbose() ) assert !ab.hasTCP():"ERROR: got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi); // All the resends should be UDP only assert !ab.hasTCP():"ERROR: got tcp resend with existing in-progress task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi); // All the resends should be UDP only assert !ab.hasTCP():"ERROR: got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi); // All the resends should be UDP only old.resend_ack();
void drainClose() { if( isClosed() ) return; // Already closed assert _h2o != null || _chan != null; // Byte-array backed should not be closed if( _chan != null ) { // Channel assumed sick from prior IOException ByteChannel chan = _chan; // Read before closing try { chan.close(); } catch( IOException ignore ) {} // Silently close _chan = null; // No channel now! if( !_read && chan instanceof SocketChannel) _h2o.freeTCPSocket((SocketChannel)chan); // Recycle writable TCP channel } restorePriority(); // And if we raised priority, lower it back bbFree(); _time_close_ms = System.currentTimeMillis(); TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections assert isClosed(); }
static boolean hasGPU(H2ONode node, int gpu_id) { final boolean hasGPU; if (H2O.SELF.equals(node)) { hasGPU = hasGPU(gpu_id); } else { HasGPUTask t = new HasGPUTask(gpu_id); new RPC<>(node, t).call().get(); hasGPU = t._hasGPU; } Log.debug("Availability of GPU (id=" + gpu_id + ") on node " + node + ": " + hasGPU); return hasGPU; }
super.init(expensive); if (H2O.CLOUD.size() > 1) { if(H2O.SELF.getSecurityManager().securityEnabled) { throw new H2OIllegalArgumentException("Cannot run XGBoost on an SSL enabled cluster larger than 1 node. XGBoost does not support SSL encryption.");
if( !_read ) _h2o.freeTCPSocket((SocketChannel)_chan); // Recycle writable TCP channel
public byte[] getBoosterBytes() { final H2ONode boosterNode = getBoosterNode(); final byte[] boosterBytes; if (H2O.SELF.equals(boosterNode)) { boosterBytes = XGBoost.getRawArray(XGBoostUpdater.getUpdater(_modelKey).getBooster()); } else { Log.debug("Booster will be retrieved from a remote node, node=" + boosterNode); FetchBoosterTask t = new FetchBoosterTask(_modelKey); boosterBytes = new RPC<>(boosterNode, t).call().get()._boosterBytes; } return boosterBytes; }
public NodeTaskInfo(TaskInfo [] pending, TaskInfo[][] ts) { _node = H2O.SELF.toString(); _remotes = ts; _pending = pending; } }
public final String addrString() { return _packh2o==null ? "multicast" : _packh2o.toString(); } public final String ioflavor() {
static void updateRFModelLocalForests(Key modelKey, final int num_trees) { final int selfIdx = H2O.SELF.index(); new TAtomic<SpeeDRFModel>() { @Override public SpeeDRFModel atomic(SpeeDRFModel old) { if (old == null) return null; SpeeDRFModel newModel = (SpeeDRFModel)old.clone(); newModel.local_forests[selfIdx] = new Key[num_trees]; return newModel; } }.invoke(modelKey); }
@Override public String elementToString(JsonElement element, String contextName) { String str = element.getAsString(); if( str.equals(H2O.SELF.toString()) ) { return "<a href='StoreView.html'>"+str+"</a>"; } String str2 = str.startsWith("/") ? str.substring(1) : str; String str3 = "<a href='http://" + str2 + "/StoreView.html'>" + str + "</a>"; return str3; } }
public Key localModelInfoKey(H2ONode node) { return Key.make(_model_id + ".node" + node.index(), (byte) 1 /*replica factor*/, (byte) 31 /*hidden user-key*/, true, node); }
topDir + File.separator + "node" + i + H2O.CLOUD._memary[i].toString().replace(':', '_').replace('/', '_') + ".zip"; ZipEntry ze = new ZipEntry(filename);