/** Returns true if tree was produced by this node. * Note: chunkKey is key stored at this local node */ private boolean isLocalTree(byte treeProducerId) { assert _computeOOB : "Calling this method makes sense only for oobee"; int idx = H2O.SELF.index(); return idx == treeProducerId; }
static void updateRFModelLocalForests(Key modelKey, final int num_trees) { final int selfIdx = H2O.SELF.index(); new TAtomic<SpeeDRFModel>() { @Override public SpeeDRFModel atomic(SpeeDRFModel old) { if (old == null) return null; SpeeDRFModel newModel = (SpeeDRFModel)old.clone(); newModel.local_forests[selfIdx] = new Key[num_trees]; return newModel; } }.invoke(modelKey); }
public Key localModelInfoKey(H2ONode node) { return Key.make(_model_id + ".node" + node.index(), (byte) 1 /*replica factor*/, (byte) 31 /*hidden user-key*/, true, node); }
static void updateRFModel(Key modelKey, final int numSplitFeatures) { final int idx = H2O.SELF.index(); new TAtomic<SpeeDRFModel>() { @Override public SpeeDRFModel atomic(SpeeDRFModel old) { if(old == null) return null; SpeeDRFModel newModel = (SpeeDRFModel)old.clone(); newModel.node_split_features[idx] = numSplitFeatures; return newModel; } }.invoke(modelKey); }
static void appendKey(Key model, final Key tKey, final Key dtKey, final String tString, final int tree_id) { final int selfIdx = H2O.SELF.index(); new TAtomic<SpeeDRFModel>() { @Override public SpeeDRFModel atomic(SpeeDRFModel old) { if(old == null) return null; return SpeeDRFModel.make(old, tKey, dtKey, selfIdx, tString, tree_id); } }.invoke(model); }
private NewChunk makeNC( ) { return _gatherRows==false ? XNC : new NewChunk(null,H2O.SELF.index()); }
@Override public void lcompute() { _ok = new int[_files.length][H2O.CLOUD.size()]; for (int i = 0; i < _files.length; ++i) { File f = new File(_files[i]); if (f.exists() && (f.length()==_sizes[i])) _ok[i][H2O.SELF.index()] = 1; } tryComplete(); }
@Override public void lcompute() { _result = new String[H2O.CLOUD.size()]; Map<Thread, StackTraceElement[]> allStackTraces = Thread.getAllStackTraces(); StringBuilder sb = new StringBuilder(); for (Entry<Thread,StackTraceElement[]> el : allStackTraces.entrySet()) { append(sb, el.getKey()); append(sb, el.getValue()); sb.append('\n'); } _result[H2O.SELF.index()] = sb.toString(); tryComplete(); }
@Override public void lcompute() { _result = new byte[H2O.CLOUD._memary.length][]; int idx = H2O.SELF.index(); baos = new ByteArrayOutputStream(); ZipOutputStream zos = new ZipOutputStream(baos); try { zipDir(Log.LOG_DIR, zos); } catch (IOException e) { H2O.ignore(e); } finally { try { zos.close(); baos.close(); } catch (Exception xe) { // do nothing } byte[] arr = baos.toByteArray(); _result[idx] = arr; tryComplete(); } }
@Override public void map(Chunk[] cs) { chunk_counts = new long[chunkTypes.length]; chunk_byte_sizes = new long[chunkTypes.length]; byte_size_per_node = new long[H2O.CLOUD.size()]; for (Chunk c : cs) { boolean found = false; for (int j = 0; j < chunkTypes.length; ++j) { if (c.getClass().getSimpleName().equals(chunkTypes[j] + "Chunk")) { found = true; chunk_counts[j]++; chunk_byte_sizes[j] += c.byteSize(); byte_size_per_node[H2O.SELF.index()] += c.byteSize(); } } if (!found) { throw H2O.unimpl(); } } }
@Override public void map(Key key) { _lEnums = new Enum[H2O.CLOUD.size()][]; if(MultiFileParseTask._enums.containsKey(_k)){ _lEnums[H2O.SELF.index()] = _gEnums = MultiFileParseTask._enums.get(_k); // if we are the original node (i.e. there will be no sending over // wire), we have to clone the enums not to share the same object // (causes problems when computing column domain and renumbering maps). if( H2O.SELF.index() == _homeNode ) { _gEnums = _gEnums.clone(); for(int i = 0; i < _gEnums.length; ++i) _gEnums[i] = _gEnums[i].clone(); } MultiFileParseTask._enums.remove(_k); } }
@Override public void lcompute() { _infos = new NodeTaskInfo[H2O.CLOUD.size()]; TaskInfo [][] ts = new TaskInfo[H2O.CLOUD.size()][]; int i = 0; for (H2ONode n : H2O.CLOUD._memary) ts[i++] = n.currentTasksInfo(); RPC [] pendingRPCs = UDPTimeOutThread.pendingRPCs(); TaskInfo [] pending = new TaskInfo[pendingRPCs.length]; for(int j = 0; j < pendingRPCs.length; ++j) pending[j] = new TaskInfo(pendingRPCs[j].task(),pendingRPCs[j].taskNum(),pendingRPCs[j].target().index(), pendingRPCs[j].isDone()? task_status.DONE:task_status.CMP,pendingRPCs[j]._callCnt); _infos[H2O.SELF.index()] = new NodeTaskInfo(pending,ts); tryComplete(); } }
@Override public void lcompute() { int keysetSize = H2O.localKeySet().size(); int numNodes = H2O.CLOUD._memary.length; int nodeIdx = H2O.SELF.index(); Log.info("Removing "+keysetSize+" keys on this node; nodeIdx("+nodeIdx+") numNodes("+numNodes+")"); // Now remove all keys. Futures fs = new Futures(); for( Key key : H2O.localKeySet() ) DKV.remove(key, fs); fs.blockForPending(); Log.info("Keys remaining: "+H2O.store_size()); tryComplete(); }
public final void exec( int outputs, Frame fr, boolean run_local){ // Use first readable vector to gate home/not-home fr.checkCompatible(); // Check for compatible vectors if((_noutputs = outputs) > 0) _vid = fr.anyVec().group().reserveKeys(outputs); _fr = fr; // Record vectors to work on _nxx = (short)H2O.SELF.index(); _nhi = (short)H2O.CLOUD.size(); // Do Whole Cloud _run_local = run_local; // Run locally by copying data, or run globally? setupLocal0(); // Local setup compute2(); }
private final RPC<T> remote_compute( int nlo, int nhi ) { // No remote work? if( !(nlo < nhi) ) return null; int node = addShift(nlo); assert node != H2O.SELF.index(); T rpc = clone(); rpc.setCompleter(null); rpc._nhi = (short)nhi; addToPendingCount(1); // Not complete until the RPC returns // Set self up as needing completion by this RPC: when the ACK comes back // we'll get a wakeup. return new RPC(H2O.CLOUD._memary[node], rpc).addCompleter(this).call(); }
@Override public void compute2() { if(_srcId == H2O.SELF.index()) { doTest(); tryComplete(); } else { _done = true; final UDPDropTester t = (UDPDropTester) clone(); new RPC(H2O.CLOUD._memary[_srcId], t).addCompleter(new H2OCountedCompleter(this) { @Override public void compute2() { } @Override public void onCompletion(CountedCompleter cc) { copyOver(t); } }).call(); } } }
/** * Fork the task in strictly non-blocking fashion. * * Same functionality as dfork, but does not raise priority, so user is should * *never* block on it */ public final void asyncExec( int outputs, Frame fr, boolean run_local){ // Use first readable vector to gate home/not-home fr.checkCompatible(); // Check for compatible vectors if((_noutputs = outputs) > 0) _vid = fr.anyVec().group().reserveKeys(outputs); _fr = fr; // Record vectors to work on _nxx = (short)H2O.SELF.index(); _nhi = (short)H2O.CLOUD.size(); // Do Whole Cloud _run_local = run_local; // Run locally by copying data, or run globally? setupLocal0(); // Local setup H2O.submitTask(this); // Begin normal execution on a FJ thread } /** Invokes the map/reduce computation over the given Frame. This call is
@Override public void lcompute() { ddplyPass1 p1 = ddplyPass1.PASS1TMP.remove(_p1key); Futures fs = new Futures(); int cidx = H2O.SELF.index(); for( int i=0; i<_dss.length; i++ ) { // For all possible groups // Get the newchunk of local rows for a group Group g = new Group(_dss[i]); NewChunk nc = p1._groups == null ? null : p1._groups.get(g); if( nc != null && nc._len > 0 ) { // Fill in fields we punted on during construction nc._vec = _avs[i]; // Assign a proper vector nc.close(cidx,fs); // Close & compress chunk } else { // All nodes have a chunk, even if its empty DKV.put(_avs[i].chunkKey(cidx), new C0LChunk(0,0),fs); } } fs.blockForPending(); _p1key = null; // No need to return these _dss = null; tryComplete(); } @Override public void reduce( ddplyPass2 p2 ) {