@Override public void save(Model m, Path f) throws IOException { assert m!=null : "Model cannot be null!"; AutoBuffer ab = null; // Save given mode to autobuffer m.getModelSerializer().save(m, saveHeader( m,ab=ab4write() ) ); // Spill it into disk _hfs.mkdirs(f.getParent()); FSDataOutputStream os = _hfs.create(f, _force); try { os.write(ab.buf()); } finally { os.close(); } }
/** * @param tree binary form of a singlenoderf.Tree * @return AutoBuffer that contain all bytes in the singlenoderf.Tree */ public static byte[] toDTreeCompressedTreeAB(byte[] tree, boolean regression) { AutoBuffer ab = new AutoBuffer(tree); AutoBuffer result = new AutoBuffer(); return toDTreeCompressedTree(ab, regression).buf(); }
public Value(Key k, Freezable pojo) { _key = k; _pojo = pojo; _type = (short)pojo.frozenType(); _mem = pojo.write(new AutoBuffer()).buf(); _max = _mem.length; _persist = ICE; _rwlock = new AtomicInteger(0); _replicas = k.home() ? new NonBlockingSetInt() : null; } // Nullary constructor for weaving
public JsonObject toJSON() { final String json = new String(writeJSON(new AutoBuffer()).buf()); if (json.length() == 0) return new JsonObject(); JsonObject jo = (JsonObject)new JsonParser().parse(json); jo.remove("Request2"); jo.remove("response_info"); return jo; }
public final byte[] memOrLoad() { byte[] mem = _mem; // Read once! if( mem != null ) return mem; Freezable pojo = _pojo; // Read once! if( pojo != null ) if( pojo instanceof Chunk ) return (_mem = ((Chunk)pojo).getBytes()); else return (_mem = pojo.write(new AutoBuffer()).buf()); if( _max == 0 ) return (_mem = new byte[0]); return (_mem = loadPersist()); } // Just an empty shell of a Value, no local data but the Value is "real".
private Response wrap( String http_code, Schema S, RequestStatics.RequestType type ) { // Convert Schema to desired output flavor switch( type ) { case json: return new Response(http_code, MIME_JSON, new String(S.writeJSON(new AutoBuffer()).buf())); /* case xml: //return new Response(http_code, MIME_XML , new String(S.writeXML (new AutoBuffer()).buf())); case java: throw H2O.unimpl(); case html: { RString html = new RString(_htmlTemplate); html.replace("CONTENTS", S.writeHTML(new water.util.DocGen.HTML()).toString()); return new Response(http_code, MIME_HTML, html.toString()); } */ default: throw H2O.fail(); } }
public JsonObject toJSON() { final String json = new String(writeJSON(new AutoBuffer()).buf()); if (json.length() == 0) return new JsonObject(); JsonObject jo = (JsonObject)new JsonParser().parse(json); jo.addProperty("id", this.getId()); return jo; } }
public Value(Key k, Iced pojo, byte be ) { _key = k; _pojo = pojo; _type = (short)pojo.frozenType(); _mem = (pojo instanceof Chunk)?((Chunk)pojo).getBytes():pojo.write(new AutoBuffer()).buf(); _max = _mem.length; // For the ICE backend, assume new values are not-yet-written. // For HDFS & NFS backends, assume we from global data and preserve the // passed-in persist bits byte p = (byte)(be&BACKEND_MASK); _persist = (p==ICE) ? p : be; _rwlock = new AtomicInteger(0); _replicas = k.home() ? new NonBlockingSetInt() : null; } public Value(Key k, Freezable pojo) {
public JsonObject toJSON() { final String json = new String(writeJSON(new AutoBuffer()).buf()); if (json.length() == 0) return new JsonObject(); JsonObject jo = (JsonObject)new JsonParser().parse(json); if (jo.has("model")) jo.getAsJsonObject("model").addProperty("model_category", this.model_category.toString()); return jo; } }
private void assertEqual(Iced test, String expJson) { AutoBuffer ab = new AutoBuffer(); String json = new String(test.writeJSON(ab).buf()); Assert.assertEquals(expJson, json); } }
protected String buildJSONResponseBox(Response response) { switch (response._status) { case done : RString result = new RString(_jsonResponseBox); result.replace("JSON_RESPONSE_BOX", response._response == null ? new String(response._req.writeJSON(new AutoBuffer()).buf()) : GSON_BUILDER.toJson(response.toJson())); return result.toString(); case error : case redirect: case poll : default : return ""; } }
static public Key make(byte[] kb, byte rf, byte systemType, H2ONode... replicas) { // no more than 3 replicas allowed to be stored in the key assert 0 <=replicas.length && replicas.length<=3; assert systemType<32; // only system keys allowed // Key byte layout is: // 0 - systemType, from 0-31 // 1 - replica-count, plus up to 3 bits for ip4 vs ip6 // 2-n - zero, one, two or 3 IP4 (4+2 bytes) or IP6 (16+2 bytes) addresses // 2-5- 4 bytes of chunk#, or -1 for masters // n+ - repeat of the original kb AutoBuffer ab = new AutoBuffer(); ab.put1(systemType).put1(replicas.length); for( H2ONode h2o : replicas ) h2o.write(ab); ab.put4(-1); ab.putA1(kb,kb.length); return make(Arrays.copyOf(ab.buf(),ab.position()),rf); }
public Key toKey() { AutoBuffer bs = new AutoBuffer(); bs.put4(_data_id); bs.put8(_seed); bs.put1(_producerId); _tree.write(bs); Key key = Key.make((byte)1,Key.DFJ_INTERNAL_USER, H2O.SELF); DKV.put(key,new Value(key, bs.buf())); return key; }
public TreeModel.CompressedTree compress() { // Log.info(Sys.RANDF, _tree.toString(new StringBuilder(), Integer.MAX_VALUE).toString()); int size = _tree.dtreeSize(); if (_tree instanceof LeafNode) { size += 3; } AutoBuffer ab = new AutoBuffer(size); if( _tree instanceof LeafNode) ab.put1(0).put2((char)65535); _tree.compress(ab); assert ab.position() == size: "Actual size doesn't agree calculated size."; char _nclass = (char)_data.classes(); return new TreeModel.CompressedTree(ab.buf(),_nclass,_seed); }
public void addKTrees( DTree[] trees) { // DEBUG: Print the generated K trees //SharedTree.printGenerateTrees(trees); assert nclasses()==trees.length; // Compress trees and record tree-keys _treeKeys = Arrays.copyOf(_treeKeys ,_ntrees+1); _treeKeysAux = Arrays.copyOf(_treeKeysAux ,_ntrees+1); Key[] keys = _treeKeys[_ntrees] = new Key[trees.length]; Key[] keysAux = _treeKeysAux[_ntrees] = new Key[trees.length]; Futures fs = new Futures(); for( int i=0; i<nclasses(); i++ ) if( trees[i] != null ) { CompressedTree ct = trees[i].compress(_ntrees,i,_domains); DKV.put(keys[i]=ct._key,ct,fs); _treeStats.updateBy(trees[i]); // Update tree shape stats CompressedTree ctAux = new CompressedTree(trees[i]._abAux.buf(),-1,-1,-1); keysAux[i] = ctAux._key = Key.make(createAuxKey(ct._key.toString())); DKV.put(ctAux,fs); } _ntrees++; // 1-based for errors; _scored_train[0] is for zero trees, not 1 tree _scored_train = ArrayUtils.copyAndFillOf(_scored_train, _ntrees+1, new ScoreKeeper()); _scored_valid = _scored_valid != null ? ArrayUtils.copyAndFillOf(_scored_valid, _ntrees+1, new ScoreKeeper()) : null; _training_time_ms = ArrayUtils.copyAndFillOf(_training_time_ms, _ntrees+1, System.currentTimeMillis()); fs.blockForPending(); }
public TreeModel.CompressedTree compress() { int sz = root().size(); if( root() instanceof LeafNode ) sz += 3; // Oops - tree-stump AutoBuffer ab = new AutoBuffer(sz); if( root() instanceof LeafNode ) // Oops - tree-stump ab.put1(0).put2((char)65535); // Flag it special so the decompress doesn't look for top-level decision root().compress(ab); // Compress whole tree assert ab.position() == sz; return new TreeModel.CompressedTree(ab.buf(),_nclass,_seed); } /** Save this tree into DKV store under default random Key. */
public CompressedTree compress(int tid, int cls, String[][] domains) { int sz = root().size(); if( root() instanceof LeafNode ) sz += 3; // Oops - tree-stump AutoBuffer ab = new AutoBuffer(sz); _abAux = new AutoBuffer(); if( root() instanceof LeafNode ) // Oops - tree-stump ab.put1(0).put2((char)65535); // Flag it special so the decompress doesn't look for top-level decision root().compress(ab, _abAux); // Compress whole tree assert ab.position() == sz; return new CompressedTree(ab.buf(), _seed,tid,cls); }