public void setupMasterSecret(MasterSecret persistentSecret) { if(clientContext.getPersistentMasterSecret() == null) clientContext.setPersistentMasterSecret(persistentSecret); persistentTempBucketFactory.setMasterSecret(persistentSecret); persistentRAFFactory.setMasterSecret(persistentSecret); }
private int resolve(MetadataUnresolvedException e, int x, FreenetURI key, String element2, ClientContext context) throws IOException { Metadata[] metas = e.mustResolve; for(Metadata m: metas) { try { Bucket bucket = m.toBucket(context.getBucketFactory(persistent)); String nameInArchive = ".metadata-"+(x++); containerItems.add(new ContainerElement(bucket, nameInArchive)); m.resolve(nameInArchive); } catch (MetadataUnresolvedException e1) { x = resolve(e, x, key, element2, context); } } return x; }
/** Change the priority class of the request (request includes inserts here). * @param newPriorityClass The new priority class for the request or insert. * @param ctx The ClientContext, contains essential transient objects such as the schedulers. */ public void setPriorityClass(short newPriorityClass, ClientContext ctx) { short oldPrio; synchronized(this) { oldPrio = priorityClass; this.priorityClass = newPriorityClass; } if(logMINOR) Logger.minor(this, "Changing priority class of "+this+" from "+oldPrio+" to "+newPriorityClass); ctx.getChkFetchScheduler(realTimeFlag).reregisterAll(this, oldPrio); ctx.getChkInsertScheduler(realTimeFlag).reregisterAll(this, oldPrio); ctx.getSskFetchScheduler(realTimeFlag).reregisterAll(this, oldPrio); ctx.getSskInsertScheduler(realTimeFlag).reregisterAll(this, oldPrio); }
@Override public ClientRequestScheduler getScheduler(ClientContext context) { if(isSSK) return context.getSskFetchScheduler(realTimeFlag); else return context.getChkFetchScheduler(realTimeFlag); }
private ClientRequestScheduler getScheduler(KeyBlock block, ClientContext context) { if(block instanceof CHKBlock) return context.getChkInsertScheduler(realTimeFlag); else if(block instanceof SSKBlock) return context.getSskInsertScheduler(realTimeFlag); else throw new IllegalArgumentException("Unknown block type "+block.getClass()+" : "+block); }
oos.close(); DataInputStream dis = new DataInputStream(new ByteArrayInputStream(baos.toByteArray())); ClientContext context = new ClientContext(0, null, null, null, null, null, null, null, null, null, r, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); context.setPersistentMasterSecret(secret); ObjectInputStream ois = new ObjectInputStream(dis); EncryptedRandomAccessBucket restored = (EncryptedRandomAccessBucket) ois.readObject();
@Override public void onFailed(final InsertException e) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { unregisterSender(); raf.close(); raf.free(); originalData.close(); if(freeData) originalData.free(); cb.onFailure(e, SplitFileInserter.this, context); return true; } }); }
clientContext = new ClientContext(node.bootID, clientLayerPersister, node.executor, archiveManager, persistentTempBucketFactory, tempBucketFactory, clientContext.init(requestStarters, alerts); FCPServer.maybeCreate(node, this, node.config, fcpPersistentRoot); clientContext.setDownloadCache(fcpServer); if (!killedDatabase()) fcpServer.load();
storage = new SplitFileInserterStorage(originalData, decompressedLength, this, compressionCodec, meta, isMetadata, archiveType, context.getRandomAccessBufferFactory(persistent), persistent, ctx, splitfileCryptoAlgorithm, splitfileCryptoKey, hashThisLayerOnly, hashes, context.getJobRunner(persistent), context.ticker, context.getChkInsertScheduler(realTime).fetchingKeys(), topDontCompress, topRequiredBlocks, topTotalBlocks, origDataSize, origCompressedDataSize); int mustSucceed = storage.topRequiredBlocks - topRequiredBlocks;
originalData.onResume(context); this.storage = new SplitFileInserterStorage(raf, originalData, this, context.fastWeakRandom, context.memoryLimitedJobRunner, context.getJobRunner(true), context.ticker, context.getChkInsertScheduler(realTime).fetchingKeys(), context.persistentFG, context.persistentFileTracker, context.getPersistentMasterSecret()); storage.onResume(context); this.sender = new SplitFileInserterSender(this, storage);
InsertCompressor.start(context, this, origData, oneBlockCompressedSize, context.getBucketFactory(persistent), persistent, wantHashes, !atLeast1254); } else { if(logMINOR) Logger.minor(this, "Not compressing "+origData+" size = "+origSize+" block size = "+blockSize); context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() {
/** * Schedule the fetch. * @param context * @param ignoreStore If true, don't check the datastore before re-registering the requests to * run. Should be true when rescheduling after a normal cooldown, false after recovering from * data corruption (the blocks may still be in the store), false otherwise. */ public void schedule(ClientContext context, boolean ignoreStore) { ClientRequestScheduler sched = context.getChkFetchScheduler(realTimeFlag); BlockSet blocks = parent.blockFetchContext.blocks; sched.register(this, new SendableGet[] { this }, persistent, blocks, ignoreStore); }
fctx = context.getDefaultPersistentFetchContext(); extensionCheck = null; if(dis.readBoolean()) { initialMetadata = BucketTools.restoreFrom(dis, context.persistentFG, context.persistentFileTracker, context.getPersistentMasterSecret()); new DataInputStream(checker.checksumReaderWithLength(dis, context.tempBucketFactory, 65536)); try { returnBucketDirect = BucketTools.restoreFrom(innerDIS, context.persistentFG, context.persistentFileTracker, context.getPersistentMasterSecret()); } catch (IOException e) { Logger.error(this, "Failed to restore completed download-to-temp-space request, restarting instead");
persistent ? context.jobRunner : context.dummyJobRunner, context.ticker, context.memoryLimitedJobRunner, checker, persistent, fileCompleteViaTruncation, context.getFileRandomAccessBufferFactory(persistent), context.getChkFetchScheduler(realTimeFlag).fetchingKeys()); } catch (InsufficientDiskSpaceException e) { throw new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE);
public PersistentJobRunnerImplTest() { jobRunner = new JobRunner(exec, ticker, 1000); context = new ClientContext(0, null, exec, null, null, null, null, null, null, null, null, ticker, null, null, null, null, null, null, null, null, null, null, null, null, null, null); jobRunner.start(context); jobRunner.onStarted(false); exec.waitForIdle(); jobRunner.grabHasCheckpointed(); }
private void innerMakePersistentGlobalRequest(FreenetURI fetchURI, boolean filterData, boolean persistRebootOnly, ReturnType returnType, String id, File returnFilename, boolean realTimeFlag) throws IdentifierCollisionException, NotAllowedException, IOException { FetchContext defaultFetchContext = core.clientContext.getDefaultPersistentFetchContext(); final ClientGet cg = new ClientGet(persistRebootOnly ? globalRebootClient : globalForeverClient, fetchURI, defaultFetchContext.localRequestOnly, defaultFetchContext.ignoreStore, filterData, QUEUE_MAX_RETRIES, QUEUE_MAX_RETRIES, QUEUE_MAX_DATA_SIZE, returnType, persistRebootOnly, id, Integer.MAX_VALUE, RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS, returnFilename, null, false, realTimeFlag, false, core); cg.register(false); cg.start(core.clientContext); }
public ClientPutBase(FreenetURI uri, String identifier, int verbosity, String charset, FCPConnectionHandler handler, PersistentRequestClient client, short priorityClass, Persistence persistence, String clientToken, boolean global, boolean getCHKOnly, boolean dontCompress, int maxRetries, boolean earlyEncode, boolean canWriteClientCache, boolean forkOnCacheable, boolean localRequestOnly, int extraInsertsSingleBlock, int extraInsertsSplitfileHeader, boolean realTimeFlag, String compressorDescriptor, InsertContext.CompatibilityMode compatMode, boolean ignoreUSKDatehints, NodeClientCore core) throws MalformedURLException { super(uri, identifier, verbosity, charset, handler, client, priorityClass, persistence, realTimeFlag, clientToken, global); ctx = core.clientContext.getDefaultPersistentInsertContext(); ctx.getCHKOnly = getCHKOnly; ctx.dontCompress = dontCompress; ctx.eventProducer.addEventListener(this); ctx.maxInsertRetries = maxRetries; ctx.canWriteClientCache = canWriteClientCache; ctx.compressorDescriptor = compressorDescriptor; ctx.forkOnCacheable = forkOnCacheable; ctx.extraInsertsSingleBlock = extraInsertsSingleBlock; ctx.extraInsertsSplitfileHeaderBlock = extraInsertsSplitfileHeader; ctx.localRequestOnly = localRequestOnly; ctx.setCompatibilityMode(compatMode); ctx.ignoreUSKDatehints = ignoreUSKDatehints; ctx.earlyEncode = earlyEncode; publicURI = this.uri.deriveRequestURIFromInsertURI(); }
public boolean wantKey(Key key) { boolean isSSK = key instanceof NodeSSK; if(this.clientContext.getFetchScheduler(isSSK, true).wantKey(key)) return true; if(this.clientContext.getFetchScheduler(isSSK, false).wantKey(key)) return true; return false; }
public void schedule(ClientContext context) { if(getParentGrabArray() != null) return; // If change priority will unregister first. context.getChkInsertScheduler(parent.realTime).registerInsert(this, persistent); }
dos.close(); DataInputStream dis = new DataInputStream(new ByteArrayInputStream(baos.toByteArray())); ClientContext context = new ClientContext(0, null, null, null, null, null, null, null, null, null, r, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null); context.setPersistentMasterSecret(secret); EncryptedRandomAccessBucket restored = (EncryptedRandomAccessBucket) BucketTools.restoreFrom(dis, context.persistentFG, context.persistentFileTracker, secret); assertEquals(buf.length, restored.size());