public ResponseInfo(String redirect_url, long time, Status status) { this.h2o = H2O.NAME; this.node = H2O.SELF.toString(); this.redirect_url = redirect_url; this.time = time; this.status = status; } }
public NodeTaskInfo(TaskInfo [] pending, TaskInfo[][] ts) { _node = H2O.SELF.toString(); _remotes = ts; _pending = pending; } }
public final String addrString() { return _packh2o==null ? "multicast" : _packh2o.toString(); } public final String ioflavor() {
@Override public String elementToString(JsonElement element, String contextName) { String str = element.getAsString(); if( str.equals(H2O.SELF.toString()) ) { return "<a href='StoreView.html'>"+str+"</a>"; } String str2 = str.startsWith("/") ? str.substring(1) : str; String str3 = "<a href='http://" + str2 + "/StoreView.html'>" + str + "</a>"; return str3; } }
topDir + File.separator + "node" + i + H2O.CLOUD._memary[i].toString().replace(':', '_').replace('/', '_') + ".zip"; ZipEntry ze = new ZipEntry(filename);
@SuppressWarnings("divzero") @Override public void map( Chunk chk ) { _x = new int[256]; // One-time set histogram array byte[] bits = chk._mem; // Compute local histogram for( byte b : bits ) _x[b&0xFF]++; if(H2O.SELF.toString().equals(_throwAt)) throw new RuntimeException("test"); } // ADD together all results
sb.append("/* Java code is too large to display, download it directly.\n"); sb.append(" To obtain the code please invoke in your terminal:\n"); sb.append(" curl http:/").append(H2O.SELF.toString()).append("/h2o-model.jar > h2o-model.jar\n"); sb.append(" curl http:/").append(H2O.SELF.toString()).append("/2/").append(this.getClass().getSimpleName()).append("View.java?_modelKey=").append(_key).append(" > ").append(modelName).append(".java\n"); sb.append(" javac -cp h2o-model.jar -J-Xmx2g -J-XX:MaxPermSize=128m ").append(modelName).append(".java\n"); if (GEN_BENCHMARK_CODE)
sb.append("/* Java code is too large to display, download it directly.\n"); sb.append(" To obtain the code please invoke in your terminal:\n"); sb.append(" curl http:/").append(H2O.SELF.toString()).append("/h2o-model.jar > h2o-model.jar\n"); sb.append(" curl http:/").append(H2O.SELF.toString()).append("/2/").append(this.getClass().getSimpleName()).append("View.java?_modelKey=").append(_key).append(" > ").append(modelName).append(".java\n"); sb.append(" javac -cp h2o-model.jar -J-Xmx2g -J-XX:MaxPermSize=128m ").append(modelName).append(".java\n"); sb.append("*/");
@Override public Response serve() { String traces[] = new JStackCollectorTask().invokeOnAllNodes()._result; nodes = new StackSummary[H2O.CLOUD.size()]; for( int i=0; i<nodes.length; i++ ) nodes[i] = new StackSummary(H2O.CLOUD._memary[i].toString(),traces[i]); node_name = H2O.SELF.toString(); cloud_name = H2O.NAME; time = DateFormat.getInstance().format(new Date()); for( int i=0; i<nodes.length; i++ ) Log.debug(Log.Tag.Sys.WATER,nodes[i].name,nodes[i].traces); return Response.done(this); }
@Override public void execImpl() { ProfileCollectorTask.NodeProfile profiles[] = new ProfileCollectorTask(depth).invokeOnAllNodes()._result; nodes = new ProfileSummary[H2O.CLOUD.size()]; for( int i=0; i<nodes.length; i++ ) nodes[i] = new ProfileSummary(H2O.CLOUD._memary[i].toString(),profiles[i]); node_name = H2O.SELF.toString(); cloud_name = H2O.NAME; time = DateFormat.getInstance().format(new Date()); for( int i=0; i<nodes.length; i++ ) { Log.info(nodes[i].name); for (int j = 0; j < nodes[i].profile.counts.length; ++j) { Log.info(nodes[i].profile.counts[j]); Log.info(nodes[i].profile.stacktraces[j]); } } }
public String toString() { int udp_type = (int) (dataLo() & 0xff); // First byte is UDP packet type UDP.udp udpType = UDP.getUdp(udp_type); String operation = isSend() ? " SEND " : " RECV "; String host1 = addrString(); String host2 = recoH2O().toString(); String networkPart = isSend() ? (host2 + " -> " + host1) : (host1 + " -> " + host2); return "Node(" + _nodeId + ": " + ns() + ") " + udpType.toString() + operation + networkPart + (isDropped()?" DROPPED ":"") + ", data = '" + Long.toHexString(this.dataLo()) + ',' + Long.toHexString(this.dataHi()) + "'"; }
@Override public void map(Chunk [] chks){ int[][] emap = emap(_chunk2Enum[chks[0].cidx()]); final int cidx = chks[0].cidx(); for(int i = 0; i < chks.length; ++i) { Chunk chk = chks[i]; if(_gDomain[i] == null) // killed, replace with all NAs DKV.put(chk._vec.chunkKey(chk.cidx()),new C0DChunk(Double.NaN,chk._len)); else for( int j = 0; j < chk._len; ++j){ if( chk.isNA0(j) )continue; long l = chk.at80(j); if (l < 0 || l >= emap[i].length) reportBrokenEnum(chk, i, j, l, emap); if(emap[i][(int)l] < 0) throw new RuntimeException(H2O.SELF.toString() + ": missing enum at col:" + i + ", line: " + j + ", val = " + l + ", chunk=" + chk.getClass().getSimpleName()); chk.set0(j, emap[i][(int)l]); } chk.close(cidx, _fs); } } private void reportBrokenEnum( Chunk chk, int i, int j, long l, int[][] emap ) {
resJson.addProperty(JSON_SELF, H2O.SELF.toString()); cloudListJson.add(cloudJson); cloudJson.addProperty(JSON_CLOUD, TimeLine.CLOUD._memary[i].toString()); cloudJson.addProperty(JSON_SENDS, sends[i]); cloudJson.addProperty(JSON_RECVS, recvs[i]); eventJson.addProperty(JSON_SEND, h2o.toString()); String recv = event.packH2O() == null ? "multicast" : event.packH2O().toString(); eventJson.addProperty(JSON_RECV, recv); } else { eventJson.addProperty(JSON_SEND, event.packH2O().toString()); eventJson.addProperty(JSON_RECV, h2o.toString()); if( event.isDropped() ) eventJson.addProperty(JSON_DROP, "1");
@Test public void testInvokeThrow() { File file = find_test_file("target/h2o.jar"); Key h2okey = load_test_file(file); NFSFileVec nfs = DKV.get(h2okey).get(); try { for(int i = 0; i < H2O.CLOUD._memary.length; ++i){ ByteHistoThrow bh = new ByteHistoThrow(); bh._throwAt = H2O.CLOUD._memary[i].toString(); try { bh.doAll(nfs); // invoke should throw DistributedException wrapped up in RunTimeException fail("should've thrown"); } catch(RuntimeException e){ assertTrue(e.getMessage().contains("test")); } catch(Throwable ex){ ex.printStackTrace(); fail("Expected RuntimeException, got " + ex.toString()); } } } finally { // currently canceled RPC calls do not properly wait for all other nodes... // so once a map() call fails, other map calls can lazily load data after we call delete() try { Thread.sleep(100); } catch( InterruptedException ignore ) {} Lockable.delete(h2okey); } }
assert !ab.hasTCP():"ERROR: got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi); // All the resends should be UDP only assert !ab.hasTCP():"ERROR: got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi); // All the resends should be UDP only assert !ab.hasTCP():"ERROR: got tcp resend with existing in-progress task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi); // All the resends should be UDP only assert !ab.hasTCP():"ERROR: got tcp with existing task #, FROM " + ab._h2o.toString() + " AB: " + UDP.printx16(lo,hi); // All the resends should be UDP only old.resend_ack();
@Test public void testGetThrow() { File file = find_test_file("target/h2o.jar"); Key h2okey = load_test_file(file); NFSFileVec nfs = DKV.get(h2okey).get(); try { for(int i = 0; i < H2O.CLOUD._memary.length; ++i){ ByteHistoThrow bh = new ByteHistoThrow(); bh._throwAt = H2O.CLOUD._memary[i].toString(); try { bh.dfork(nfs).get(); // invoke should throw DistributedException wrapped up in RunTimeException fail("should've thrown"); } catch(ExecutionException e){ assertTrue(e.getMessage().contains("test")); } catch(Throwable ex){ ex.printStackTrace(); fail("Expected ExecutionException, got " + ex.toString()); } } } finally { // currently canceled RPC calls do not properly wait for all other nodes... // so once a map() call fails, other map calls can lazily load data after we call delete() try { Thread.sleep(100); } catch( InterruptedException ignore ) {} Lockable.delete(h2okey); } }
for(int i = 0; i < H2O.CLOUD._memary.length; ++i){ ByteHistoThrow bh = new ByteHistoThrow(); bh._throwAt = H2O.CLOUD._memary[i].toString(); final boolean [] ok = new boolean[]{false}; try {
/** Returns the response system json. That is the response type, time, * h2o basics and other automatic stuff. * @return */ protected JsonObject responseToJson() { JsonObject resp = new JsonObject(); resp.addProperty(STATUS,_status.toString()); resp.addProperty(JSON_H2O, H2O.NAME); resp.addProperty(NODE, H2O.SELF.toString()); resp.addProperty(REQUEST_TIME, _time); switch (_status) { case done: case error: break; case redirect: resp.addProperty(REDIRECT,_redirectName); if (_redirectArgs != null) resp.add(REDIRECT_ARGS,_redirectArgs); break; case poll: resp.addProperty(PROGRESS, _pollProgress); resp.addProperty(PROGRESS_TOTAL, _pollProgressElements); break; default: assert(false): "Unknown response type "+_status.toString(); } return resp; }
sb.p("// mkdir tmpdir").nl(); sb.p("// cd tmpdir").nl(); sb.p("// curl http:/").p(H2O.SELF.toString()).p("/h2o-model.jar > h2o-model.jar").nl(); sb.p("// curl http:/").p(H2O.SELF.toString()).p("/2/").p(this.getClass().getSimpleName()).p("View.java?_modelKey=").pobj(_key).p(" > ").p(modelName).p(".java").nl(); sb.p("// javac -cp h2o-model.jar -J-Xmx2g -J-XX:MaxPermSize=128m ").p(modelName).p(".java").nl(); if (GEN_BENCHMARK_CODE)
result.addProperty(NUM_KEYS, len-offset); result.addProperty(CLOUD_NAME, H2O.NAME); result.addProperty(NODE_NAME, H2O.SELF.toString()); Response r = Response.done(result); r.addHeader(