private void fail(InsertException e, ClientContext context) { // Cancel all, then call the callback synchronized(this) { if(finished) return; finished = true; } cb.onFailure(e, this, context); }
@Override public boolean run(ClientContext context) { inserter.cb.onFailure(e, inserter, context); return true; }
@Override public boolean run(ClientContext context) { inserter.cb.onFailure(ie, inserter, context); return true; }
@Override public void cancel(ClientContext context) { synchronized(this) { if(cancelled) return; cancelled = true; } // Must call onFailure so get removeFrom()'ed cb.onFailure(new InsertException(InsertExceptionMode.CANCELLED), this, context); }
@Override public void onFailure(final InsertException e, ClientPutState c, ClientContext context) { if(persistent) { try { context.jobRunner.queue(new PersistentJob() { @Override public boolean run(ClientContext context) { inserter.cb.onFailure(e, inserter, context); return true; } }, NativeThread.NORM_PRIORITY+1); } catch (PersistenceDisabledException e1) { // Can't do anything } } else { inserter.cb.onFailure(e, inserter, context); } }
public ClientKeyBlock getBlock(ClientContext context, boolean calledByCB) { try { synchronized (this) { if(finished) return null; } return encode(context, calledByCB); } catch (InsertException e) { cb.onFailure(e, this, context); return null; } catch (Throwable t) { Logger.error(this, "Caught "+t, t); cb.onFailure(new InsertException(InsertExceptionMode.INTERNAL_ERROR, t, null), this, context); return null; } }
void onCompressed(CompressionOutput output, ClientContext context) { synchronized(this) { if(started) { Logger.error(this, "Already started, not starting again", new Exception("error")); return; } if(cancelled) { Logger.error(this, "Already cancelled, not starting"); return; } } try { onCompressedInner(output, context); } catch (InsertException e) { cb.onFailure(e, SingleFileInserter.this, context); } catch (Throwable t) { Logger.error(this, "Caught in OffThreadCompressor: "+t, t); System.err.println("Caught in OffThreadCompressor: "+t); t.printStackTrace(); // Try to fail gracefully cb.onFailure(new InsertException(InsertExceptionMode.INTERNAL_ERROR, t, null), SingleFileInserter.this, context); } }
@Override public void cancel(ClientContext context) { if(logMINOR) Logger.minor(this, "Cancel "+this); synchronized(this) { if(cancelled) return; cancelled = true; } if(freeData) { block.free(); } // Must call onFailure so get removeFrom()'ed cb.onFailure(new InsertException(InsertExceptionMode.CANCELLED), this, context); }
private void fail(final InsertException ie, ClientContext context, Bucket bestCompressedData) { if(persistent) { try { context.jobRunner.queue(new PersistentJob() { @Override public boolean run(ClientContext context) { inserter.cb.onFailure(ie, inserter, context); return true; } }, NativeThread.NORM_PRIORITY+1); } catch (PersistenceDisabledException e1) { Logger.error(this, "Database disabled compressing data", new Exception("error")); if(bestCompressedData != null && bestCompressedData != origData) bestCompressedData.free(); } } else { inserter.cb.onFailure(ie, inserter, context); } }
@Override public void cancel(ClientContext context) { synchronized(this) { if(finished) return; finished = true; } if(freeData) { sourceData.free(); sourceData = null; } super.unregister(context, getPriorityClass()); cb.onFailure(new InsertException(InsertExceptionMode.CANCELLED), this, context); }
@Override public boolean run(ClientContext context) { unregisterSender(); raf.close(); raf.free(); originalData.close(); if(freeData) originalData.free(); cb.onFailure(e, SplitFileInserter.this, context); return true; } });
@Override public void cancel(ClientContext context) { USKFetcherTag tag; synchronized(this) { if(finished) return; finished = true; tag = fetcher; fetcher = null; } if(tag != null) { tag.cancel(context); } if(sbi != null) { sbi.cancel(context); // will call onFailure, which will removeFrom() } if(freeData) { if(data == null) { Logger.error(this, "data == null in cancel() on "+this, new Exception("error")); } else { data.free(); synchronized(this) { data = null; } } } cb.onFailure(new InsertException(InsertExceptionMode.CANCELLED), this, context); }
private void complete(InsertException e, ClientContext context) { synchronized(this) { if(finished) return; finished = true; if(e != null && this.e != null && this.e != e) { if(e.getMode() == InsertExceptionMode.CANCELLED) { // Cancelled is okay, ignore it, we cancel after failure sometimes. // Ignore the new failure mode, use the old one e = this.e; if(persistent) { e = e.clone(); // Since we will remove it, we can't pass it on } } else { // Delete the old failure mode, use the new one this.e = e; } } if(e == null) { e = this.e; if(persistent && e != null) { e = e.clone(); // Since we will remove it, we can't pass it on } } } if(e != null) cb.onFailure(e, this, context); else cb.onSuccess(this, context); }
@Override public void onFailure(InsertException e, ClientPutState state, ClientContext context) { synchronized(this) { sbi = null; if(e.getMode() == InsertExceptionMode.COLLISION) { // Try the next slot edition++; consecutiveCollisions++; if(consecutiveCollisions > MAX_TRIED_SLOTS) scheduleFetcher(context); else scheduleInsert(context); } else { Bucket d = null; synchronized(this) { finished = true; if(freeData) { d = data; data = null; } } if(freeData) { d.free(); } cb.onFailure(e, state, context); } } }
private void fail(InsertException e, boolean forceFatal, ClientContext context) { synchronized(this) { if(finished) return; finished = true; } if(e.isFatal() || forceFatal) parent.fatallyFailedBlock(context); else parent.failedBlock(context); unregister(context, getPriorityClass()); if(freeData) { sourceData.free(); sourceData = null; } cb.onFailure(e, this, context); }
private void fail(InsertException e, ClientContext context) { if(logMINOR) Logger.minor(this, "Failing: "+e, e); ClientPutState oldSFI = null; ClientPutState oldMetadataPutter = null; synchronized(this) { if(finished){ return; } finished = true; oldSFI = sfi; oldMetadataPutter = metadataPutter; } if(oldSFI != null) oldSFI.cancel(context); if(oldMetadataPutter != null) oldMetadataPutter.cancel(context); synchronized(this) { if(freeData) block.free(); else { block.nullData(); } } cb.onFailure(e, this, context); }
private void scheduleInsert(ClientContext context) { long edNo = Math.max(edition, context.uskManager.lookupLatestSlot(pubUSK)+1); synchronized(this) { if(finished) return; edition = edNo; if(logMINOR) Logger.minor(this, "scheduling insert for "+pubUSK.getURI()+ ' ' +edition); sbi = new SingleBlockInserter(parent, data, compressionCodec, privUSK.getInsertableSSK(edition).getInsertURI(), ctx, realTimeFlag, this, isMetadata, sourceLength, token, false, true /* we don't use it */, tokenObject, context, persistent, false, extraInserts, cryptoAlgorithm, forceCryptoKey); } try { sbi.schedule(context); } catch (InsertException e) { synchronized(this) { finished = true; } if(freeData) { data.free(); synchronized(this) { data = null; } } cb.onFailure(e, this, context); } }
Logger.error(this, "Unable to insert USK date hints due to disk I/O error: "+e, e); if(!added) { cb.onFailure(new InsertException(InsertExceptionMode.BUCKET_ERROR, e, pubUSK.getSSK(edition).getURI()), this, context); return; Logger.error(this, "Unable to insert USK date hints due to error: "+e, e); if(!added) { cb.onFailure(e, this, context); return;