public final void freeMem() { assert isPersisted() || _pojo != null || _key._kb[0]==Key.DVEC; _mem = null; } // Free POJO (but always be able to rebuild the POJO)
public final void freePOJO() { assert isPersisted() || _mem != null; _pojo = null; }
public final boolean isEmpty() { return _max > 0 && _mem==null && _pojo == null && !isPersisted(); } public final byte[] getBytes() {
@Override public void delete(Value v) { assert !v.isPersisted(); // Upper layers already cleared out File f = getFile(v); f.delete(); }
/** Store complete Values to disk */ void storePersist() throws IOException { if( isPersisted() ) return; Persist.I[backend()].store(this); }
@Override public void onAck() { // remove local cache but NOT in case it is already on disk // (ie memory can be reclaimed and we assume we have plenty of disk space) if( _dontCache && !_xval.isPersisted() ) H2O.putIfMatch(_xkey, null, _xval); if( _xval != null ) _xval.completeRemotePut(); } @Override public byte priority() {
/** Load some or all of completely persisted Values */ byte[] loadPersist() { assert isPersisted(); return Persist.I[backend()].load(this); }
@Override public void delete(final Value v) { assert this == getIce(); assert !v.isPersisted(); // Upper layers already cleared out run(new Callable() { @Override public Object call() throws Exception { Path p = new Path(_iceRoot, getIceName(v)); FileSystem fs = FileSystem.get(p.toUri(), CONF); fs.delete(p, true); return null; } }, false, 0); }
@Override public Object call() throws Exception { FileSystem fs = FileSystem.get(p.toUri(), CONF); FSDataInputStream s = null; try { s = fs.open(p); // NOTE: // The following line degrades performance of HDFS load from S3 API: s.readFully(skip,b,0,b.length); // Google API's simple seek has better performance // Load of 300MB file via Google API ~ 14sec, via s.readFully ~ 5min (under the same condition) ByteStreams.skipFully(s, skip_); ByteStreams.readFully(s, b); assert v.isPersisted(); } finally { Utils.close(s); } return null; } }, true, v._max);
@Override public byte[] load(Value v) { File f = getFile(v); if( f.length() < v._max ) { // Should be fully on disk... // or it's a racey delete of a spilled value assert !v.isPersisted() : f.length() + " " + v._max + " " + v._key; return null; // No value } try { FileInputStream s = new FileInputStream(f); try { AutoBuffer ab = new AutoBuffer(s.getChannel(), true, Value.ICE); byte[] b = ab.getA1(v._max); ab.close(); return b; } finally { s.close(); } } catch( IOException e ) { // Broken disk / short-file??? throw new RuntimeException(Log.err("File load failed: ", e)); } }
@Override public void store(Value v) { // Should be used only if ice goes to HDFS assert this == getIce(); assert !v.isPersisted(); byte[] m = v.memOrLoad(); assert (m == null || m.length == v._max); // Assert not saving partial files store(new Path(_iceRoot, getIceName(v)), m); v.setdsk(); // Set as write-complete to disk }
/** Remove dead Values from disk */ void removeIce() { // do not yank memory, as we could have a racing get hold on to this // free_mem(); if( !isPersisted() || !onICE() ) return; // Never hit disk? clrdsk(); // Not persisted now Persist.I[backend()].delete(this); } /** Load some or all of completely persisted Values */
@Override public byte[] load(Value v) { long skip = 0; Key k = v._key; // Convert a chunk into a long-offset from the base file. if( k._kb[0] == Key.DVEC ) skip = water.fvec.NFSFileVec.chunkOffset(k); // The offset try { FileInputStream s = null; try { s = new FileInputStream(getFileForKey(k)); FileChannel fc = s.getChannel(); fc.position(skip); AutoBuffer ab = new AutoBuffer(fc, true, Value.NFS); byte[] b = ab.getA1(v._max); ab.close(); assert v.isPersisted(); return b; } finally { if( s != null ) s.close(); } } catch( IOException e ) { // Broken disk / short-file??? H2O.ignore(e); return null; } }
@Override public void store(Value v) { // Only the home node does persistence on NFS if( !v._key.home() ) return; // A perhaps useless cutout: the upper layers should test this first. if( v.isPersisted() ) return; try { File f = getFileForKey(v._key); f.mkdirs(); FileOutputStream s = new FileOutputStream(f); try { byte[] m = v.memOrLoad(); assert (m == null || m.length == v._max); // Assert not saving partial files if( m != null ) new AutoBuffer(s.getChannel(), false, Value.NFS).putA1(m, m.length).close(); v.setdsk(); // Set as write-complete to disk } finally { s.close(); } } catch( IOException e ) { H2O.ignore(e); } }
@Override public void store(Value v) { assert !v.isPersisted(); new File(_dir, getIceDirectory(v._key)).mkdirs(); // Nuke any prior file. FileOutputStream s = null; try { s = new FileOutputStream(getFile(v)); } catch( FileNotFoundException e ) { String info = "Key: " + v._key.toString() + "\nEncoded: " + getFile(v); throw new RuntimeException(Log.err("Encoding a key to a file failed!\n" + info, e)); } try { byte[] m = v.memOrLoad(); // we are not single threaded anymore assert m != null && m.length == v._max : "Trying to save partial file: value key=" + v._key + ", length to save=" + m + ", value max size=" + v._max; // Assert not saving partial files new AutoBuffer(s.getChannel(), false, Value.ICE).putA1(m, m.length).close(); v.setdsk(); // Set as write-complete to disk } finally { Utils.close(s); } }
@Override public Value atomic(Value val) { T old = val == null ? null : (T)(val.get().clone()); T nnn = atomic(old); // Atomic operation changes the data, so it can not be performed over values persisted on read-only data source // as we would not be able to write those changes back. assert val == null || val.onICE() || !val.isPersisted(); return nnn == null ? null : new Value(_key,nnn,val==null?Value.ICE:(byte)(val._persist&Value.BACKEND_MASK)); } @Override public void onSuccess( Value old ) { onSuccess(old==null?null:(T)old.get()); }
s = getObjectForKey(k, skip, v._max).getObjectContent(); assert v.isPersisted(); TimeLine.record_IOclose(start_ns, start_io_ms, 1/* read */, v._max, Value.S3); return b;
if( len == 0 || val.rawMem() != null || val.rawPOJO() != null || val.isPersisted() ) return val; assert !key.home(); // Master must have *something*; we got nothing & need to fetch
if( val.isPersisted() && m != null && p != null && !isChunk ) { if( !val.isPersisted() && !diskFull && (force || (lazyPersist() && lazy_clean(key)))) { try { if( force && val.isPersisted() ) { val.freeMem (); if( m != null ) freed += val._max; m = null; val.freePOJO(); if( p != null ) freed += val._max; p = null;
if(!_visited.add(cidx)) { Value v = H2O.get(in._vec.chunkKey(cidx)); if(v != null && v.isPersisted()) { v.freePOJO(); v.freeMem(); if(v != null && v.isPersisted()) { v.freePOJO(); v.freeMem();