/** Block until it completes, even if run remotely */ public final T invoke( Key key ) { RPC<Atomic<T>> rpc = fork(key); return (T)(rpc == null ? this : rpc.get()); // Block for it }
static public void write_barrier() { for( H2ONode h2o : H2O.CLOUD._memary ) for( RPC rpc : h2o.tasks() ) if( rpc._dt instanceof TaskPutKey || rpc._dt instanceof Atomic ) rpc.get(); }
public static String fetchClazz(int id) { String clazz = RPC.call(H2O.CLOUD.leader(), new FetchClazz(id)).get()._clazz; assert clazz != null : "No class matching id "+id; return clazz; } @Override public void compute2() { _clazz = TypeMap.className(_id); tryComplete(); }
private void reduce3( RPC<T> rpc ) { if( rpc == null ) return; T mrt = rpc.get(); // This is a blocking remote call // Note: because _fs is transient it is not set or cleared by the RPC. // Because the MRT object is a clone of 'self' it's likely to contain a ptr // to the self _fs which will be not-null and still have local pending // blocks. Not much can be asserted there. _profile.gather(mrt._profile, rpc.size_rez()); // Unlike reduce2, results are in mrt directly not mrt._res. if( mrt._nhi != -1L ) { // Any results at all? if( _res == null ) _res = mrt; else _res.reduce4(mrt); } }
static public int fetchId(String s) { return RPC.call(H2O.CLOUD.leader(), new FetchId(s)).get()._id; } @Override public void compute2() { _id = TypeMap.onIce(_clazz); tryComplete(); }
@Override public RequestBuilders.Response serve(){ for (H2ONode node : H2O.CLOUD._memary) { GCTask t = new GCTask(); new RPC<GCTask>(node, t).call().get(); } return RequestBuilders.Response.doneEmpty(); } }
private final void donCompletion( CountedCompleter caller ) { // Distributed completion assert _lo == null || _lo.isDone(); assert _hi == null || _hi.isDone(); // Fold up results from left & right subtrees if( _lo != null ) reduce2(_lo.get()); if( _hi != null ) reduce2(_hi.get()); if( _local != null ) reduce2(_local ); // Note: in theory (valid semantics) we could push these "over the wire" // and block for them as we're blocking for the top-level initial split. // However, that would require sending "isDone" flags over the wire also. // MUCH simpler to just block for them all now, and send over the empty set // of not-yet-blocked things. if(_local != null && _local._fs != null ) _local._fs.blockForPending(); // Block on all other pending tasks, also _keys = null; // Do not return _keys over wire if( _top_level ) postGlobal(); }; // Override to do work after all the forks have returned
public static Key fetch(Key key) { FetchProducer fp = new FetchProducer(key); if (key.home()) fp.compute2(); else fp = RPC.call(key.home_node(), fp).get(); return fp._producer; } private FetchProducer(Key k) { _key = k; }
/** * Send a message from this node to all nodes in serial (including self), and receive it back * @param msg_size message size in bytes * @return Time in nanoseconds that it took to send and receive the message (one per node) */ private static double[] send_recv_all(int msg_size, int repeats) { byte[] payload = new byte[msg_size]; new Random().nextBytes(payload); final int siz = H2O.CLOUD.size(); double[] times = new double[siz]; for (int i = 0; i < siz; ++i) { //loop over compute nodes Log.debug("NetworkTest send_recv_all starting PingPong to node " + i + "..."); H2ONode node = H2O.CLOUD._memary[i]; Timer t = new Timer(); for (int l = 0; l < repeats; ++l) { Log.debug("NetworkTest send_recv_all starting msg_size " + msg_size + " bytes, iteration "+ l +" of "+ repeats + " ..."); PingPongTask ppt = new PingPongTask(payload); //same payload for all nodes new RPC<PingPongTask>(node, ppt).call().get(); //blocking send Log.debug("NetworkTest send_recv_all completed iteration "+ l +" of "+ repeats); } times[i] = (double) t.nanos() / repeats; Log.debug("NetworkTest send_recv_all completed PingPong to node " + i); } return times; }
public static Value get( H2ONode target, Key key, int priority ) { RPC<TaskGetKey> rpc, old; while( true ) { // Repeat until we get a unique TGK installed per key // Do we have an old TaskGetKey in-progress? rpc = TGKS.get(key); if( rpc != null && rpc._dt._priority >= priority ) break; old = rpc; // Make a new TGK. rpc = new RPC(target,new TaskGetKey(key,priority),1.0f); if( TGKS.putIfMatchUnlocked(key,rpc,old) == old ) { rpc.setTaskNum().call(); // Start the op break; // Successful install of a fresh RPC } } Value val = rpc.get()._val; // Block for, then fetch out the result TGKS.putIfMatchUnlocked(key,null,rpc); // Clear from cache return val; }
@Override protected Response serve() { if ((node_idx.value() < 0) || (node_idx.value() >= H2O.CLOUD.size())) { throw new IllegalArgumentException("Illegal node_idx for this H2O cluster (must be from 0 to " + H2O.CLOUD.size() + ")"); } H2ONode node = H2O.CLOUD._memary[node_idx.value()]; GetTicksTask ppt = new GetTicksTask(); Log.trace("GetTicksTask starting to node " + node_idx.value() + "..."); // Synchronous RPC call to get ticks from remote (possibly this) node. new RPC<GetTicksTask>(node, ppt).call().get(); Log.trace("GetTicksTask completed to node " + node_idx.value()); long[][] cpuTicks = ppt._cpuTicks; // Stuff tick information into json response. JsonArray j = new JsonArray(); for (long[] arr : cpuTicks) { JsonArray j2 = new JsonArray(); j2.add(new JsonPrimitive(arr[0])); j2.add(new JsonPrimitive(arr[1])); j2.add(new JsonPrimitive(arr[2])); j2.add(new JsonPrimitive(arr[3])); j.add(j2); } JsonObject o = new JsonObject(); o.add("cpuTicks", j); return Response.done(o); } }
static boolean hasGPU(H2ONode node, int gpu_id) { final boolean hasGPU; if (H2O.SELF.equals(node)) { hasGPU = hasGPU(gpu_id); } else { HasGPUTask t = new HasGPUTask(gpu_id); new RPC<>(node, t).call().get(); hasGPU = t._hasGPU; } Log.debug("Availability of GPU (id=" + gpu_id + ") on node " + node + ": " + hasGPU); return hasGPU; }
public byte[] getBoosterBytes() { final H2ONode boosterNode = getBoosterNode(); final byte[] boosterBytes; if (H2O.SELF.equals(boosterNode)) { boosterBytes = XGBoost.getRawArray(XGBoostUpdater.getUpdater(_modelKey).getBooster()); } else { Log.debug("Booster will be retrieved from a remote node, node=" + boosterNode); FetchBoosterTask t = new FetchBoosterTask(_modelKey); boosterBytes = new RPC<>(boosterNode, t).call().get()._boosterBytes; } return boosterBytes; }