@Override public byte[] getBytes( Object val ) { return ((Value)val).memOrLoad(); }
/** Return the bits for a particular tree */ public byte[] tree(int tree_id) { byte[][] ts = trees; if( ts == null ) trees = ts = new byte[tree_id+1][]; if( tree_id >= ts.length ) trees = ts = Arrays.copyOf(ts,tree_id+1); if( ts[tree_id] == null ) ts[tree_id] = DKV.get(t_keys[tree_id]).memOrLoad(); return ts[tree_id]; }
public StringBuilder getString( int len, StringBuilder sb ) { int newlines=0; byte[] b = memOrLoad(); final int LEN=Math.min(len,b.length); for( int i=0; i<LEN; i++ ) { byte c = b[i]; if( c == '&' ) sb.append("&"); else if( c == '<' ) sb.append("<"); else if( c == '>' ) sb.append(">"); else if( c == '\n' ) { sb.append("<br>"); if( newlines++ > 5 ) break; } else if( c == ',' && i+1<LEN && b[i+1]!=' ' ) sb.append(", "); else sb.append((char)c); } if( b.length > LEN ) sb.append("..."); return sb; }
public void map( Key key ) { _x = 1<<(DKV.get(key).memOrLoad()[0]); } public void reduce( DRemoteTask rbs ) { _x |= ((RemoteBitSet)rbs)._x; }
@Override public void store(Value v) { // Should be used only if ice goes to HDFS assert this == getIce(); assert !v.isPersisted(); byte[] m = v.memOrLoad(); assert (m == null || m.length == v._max); // Assert not saving partial files store(new Path(_iceRoot, getIceName(v)), m); v.setdsk(); // Set as write-complete to disk }
public AutoBuffer write(AutoBuffer bb) { byte p = _persist; if( onICE() ) p &= ~ON_dsk; // Not on the remote disk return bb.put1(p).put2(_type).putA1(memOrLoad()); }
/** Set persistence to HDFS from ICE */ public void setHdfs() { assert onICE(); byte[] mem = memOrLoad(); // Get into stable memory _persist = Value.HDFS|Value.NOTdsk; Persist.I[Value.HDFS].store(this); removeIce(); // Remove from ICE disk assert onHDFS(); // Flip to HDFS _mem = mem; // Close a race with the H2O cleaner zapping _mem while removing from ice }
@Override public void store(Value v) { // Only the home node does persistence on NFS if( !v._key.home() ) return; // A perhaps useless cutout: the upper layers should test this first. if( v.isPersisted() ) return; try { File f = getFileForKey(v._key); f.mkdirs(); FileOutputStream s = new FileOutputStream(f); try { byte[] m = v.memOrLoad(); assert (m == null || m.length == v._max); // Assert not saving partial files if( m != null ) new AutoBuffer(s.getChannel(), false, Value.NFS).putA1(m, m.length).close(); v.setdsk(); // Set as write-complete to disk } finally { s.close(); } } catch( IOException e ) { H2O.ignore(e); } }
public <T extends Iced> T get() { touch(); Iced pojo = (Iced)_pojo; // Read once! if( pojo != null ) return (T)pojo; pojo = TypeMap.newInstance(_type); pojo.read(new AutoBuffer(memOrLoad())); pojo.init(_key); return (T)(_pojo = pojo); } public <T extends Freezable> T get(Class<T> fc) {
public <T extends Freezable> T getFreezable() { touch(); Freezable pojo = _pojo; // Read once! if( pojo != null ) return (T)pojo; pojo = TypeMap.newFreezable(_type); pojo.read(new AutoBuffer(memOrLoad())); if( pojo instanceof Iced ) ((Iced)pojo).init(_key); return (T)(_pojo = pojo); }
@Override public void store(Value v) { assert !v.isPersisted(); new File(_dir, getIceDirectory(v._key)).mkdirs(); // Nuke any prior file. FileOutputStream s = null; try { s = new FileOutputStream(getFile(v)); } catch( FileNotFoundException e ) { String info = "Key: " + v._key.toString() + "\nEncoded: " + getFile(v); throw new RuntimeException(Log.err("Encoding a key to a file failed!\n" + info, e)); } try { byte[] m = v.memOrLoad(); // we are not single threaded anymore assert m != null && m.length == v._max : "Trying to save partial file: value key=" + v._key + ", length to save=" + m + ", value max size=" + v._max; // Assert not saving partial files new AutoBuffer(s.getChannel(), false, Value.ICE).putA1(m, m.length).close(); v.setdsk(); // Set as write-complete to disk } finally { Utils.close(s); } }
/** Creates a Stream for reading bytes */ public InputStream openStream(ProgressMonitor p) throws IOException { if(onNFS() ) return PersistNFS .openStream(_key ); if(onHDFS()) return PersistHdfs.openStream(_key,p); if(onS3() ) return PersistS3 .openStream(_key,p); if(onTachyon()) return PersistTachyon.openStream(_key,p); if( isFrame() ) throw new IllegalArgumentException("Tried to pass a Frame to openStream (maybe tried to parse a (already-parsed) Frame?)"); assert _type==TypeMap.PRIM_B : "Expected byte[] type but got "+TypeMap.className(_type); return new ByteArrayInputStream(memOrLoad()); }
/** Internal computation of depth and number of leaves. */ public void find_leaves_depth() { // if( _tl != null ) return; _td = new Counter(); _tl = new Counter(); for( Key tkey : t_keys ) { long dl = Tree.depth_leaves(new AutoBuffer(DKV.get(tkey).memOrLoad()), regression); _td.add((int) (dl >> 32)); _tl.add((int) dl); } } public Counter leaves() { find_leaves_depth(); return _tl; }
private void doLarge(Key k) { Value v = DKV.get(k); Assert.assertNull(v); Random r = new Random(1234567890123456789L); int total = 0; while( total < AutoBuffer.MTU*8 ) { byte[] kb = new byte[Key.KEY_LENGTH]; r.nextBytes(kb); Key nk = Key.make(kb); Append.append(k,nk); v = DKV.get(k); byte[] vb = v.memOrLoad(); Assert.assertArrayEquals(kb, Arrays.copyOfRange(vb, vb.length-kb.length, vb.length)); total = vb.length; } DKV.remove(k); }
public byte[] getFirstBytes() { Value v = this; if(isByteVec()){ ByteVec vec = get(); return vec.chunkForChunkIdx(0).getBytes(); } else if(isFrame()){ Frame fr = get(); return ((ByteVec)fr.vecs()[0]).chunkForChunkIdx(0).getBytes(); } // Return empty array if key has been deleted return v != null ? v.memOrLoad() : new byte[0]; }
@Override public Value atomic( Value val ) { byte[] bits1 = val.memOrLoad(); long l1 = UDP.get8(bits1,0); long l2 = UDP.get8(bits1,8); l1 += 2; l2 += 2; byte[] bits2 = new byte[16]; UDP.set8(bits2,0,l1); UDP.set8(bits2,8,l2); return new Value(_key,bits2); } }
private void doBasic(Key k) { Value v = DKV.get(k); Assert.assertNull(v); Key a1 = Key.make("tatomic 1"); Append.append(k,a1); Key[] ks = new AutoBuffer(DKV.get(k).memOrLoad()).getA(Key.class); Assert.assertEquals(1, ks.length); Assert.assertEquals(a1, ks[0]); Key a2 = Key.make("tatomic 2"); Append.append(k,a2); ks = new AutoBuffer(DKV.get(k).memOrLoad()).getA(Key.class); Assert.assertEquals(2, ks.length); Assert.assertEquals(a1, ks[0]); Assert.assertEquals(a2, ks[1]); DKV.remove(k); }
@Test public void testRemoteAtomic() { // Make an execution key homed to the remote node H2O cloud = H2O.CLOUD; H2ONode target = cloud._memary[0]; if( target == H2O.SELF ) target = cloud._memary[1]; Key key = Key.make("test6_remote",(byte)1,Key.DFJ_INTERNAL_USER,target); // It's a plain empty byte array - but too big for atomic update on purpose Value v1 = new Value(key,16); // Remote-put operation DKV.put(key,v1); DKV.write_barrier(); // Atomically run this function on a clone of the bits from the existing // Key and install the result as the new Value. This function may run // multiple times if there are collisions. Atomic q = new Atomic2(); q.invoke(key); // Run remotely; block till done Value val3 = DKV.get(key); assertNotSame(v1,val3); AutoBuffer ab = new AutoBuffer(val3.memOrLoad()); assertEquals(2,ab.get8(0)); assertEquals(2,ab.get8(8)); DKV.remove(key); // Cleanup after test }
@Override public void store(Value v) { // Should be used only if ice goes to HDFS assert this == H2O.getPM().getIce(); assert !v.isPersisted(); byte[] m = v.memOrLoad(); assert (m == null || m.length == v._max); // Assert not saving partial files store(new Path(_iceRoot, getIceName(v)), m); }
@Override public void store(Value v) throws IOException { if (!v._key.home()) return; final byte payload[] = v.memOrLoad(); final GcsBlob blob = GcsBlob.of(v._key); Log.debug("Storing: " + blob.toString()); final ByteBuffer buffer = ByteBuffer.wrap(payload); storageProvider.getStorage().create(blob.getBlobInfo()).writer().write(buffer); }