private int resolve(MetadataUnresolvedException e, int x, FreenetURI key, String element2, ClientContext context) throws IOException { Metadata[] metas = e.mustResolve; for(Metadata m: metas) { try { Bucket bucket = m.toBucket(context.getBucketFactory(persistent)); String nameInArchive = ".metadata-"+(x++); containerItems.add(new ContainerElement(bucket, nameInArchive)); m.resolve(nameInArchive); } catch (MetadataUnresolvedException e1) { x = resolve(e, x, key, element2, context); } } return x; }
/** * Start inserts for unresolved (too big) Metadata's. Eventually these will call back with an onEncode(), * meaning they have the CHK, and we can progress to resolveAndStartBase(). * @param e * @param container * @param context * @return * @throws InsertException * @throws IOException */ private void resolve(MetadataUnresolvedException e, ClientContext context) throws InsertException, IOException { new Error("RefactorME-resolve").printStackTrace(); Metadata[] metas = e.mustResolve; for(Metadata m: metas) { if(logMINOR) Logger.minor(this, "Resolving "+m); if(m.isResolved()) { Logger.error(this, "Already resolved: "+m+" in resolve() - race condition???"); continue; } try { MetaPutHandler ph = new MetaPutHandler(this, null, m, context.getBucketFactory(persistent())); ph.start(context); } catch (MetadataUnresolvedException e1) { resolve(e1, context); } } }
private ClientGetter makeGetter(NodeClientCore core, Bucket ret) throws IOException { if (binaryBlob && ret == null) { ret = core.clientContext.getBucketFactory(persistence == Persistence.FOREVER).makeBucket(fctx.maxOutputLength); } return new ClientGetter(this, uri, fctx, priorityClass, binaryBlob ? new NullBucket() : ret, binaryBlob ? new BinaryBlobWriter(ret) : null, false, initialMetadata, extensionCheck); }
private void makeMetadata(ClientContext context) { Bucket bucket = null; int x = 0; Metadata md = makeManifest(origMetadata, ""); while(true) { try { bucket = md.toBucket(context.getBucketFactory(persistent)); containerItems.add(new ContainerElement(bucket, ".metadata")); return; } catch (MetadataUnresolvedException e) { try { x = resolve(e, x, null, null, context); } catch (IOException e1) { fail(new InsertException(InsertExceptionMode.INTERNAL_ERROR, e, null), context); return; } } catch (IOException e) { fail(new InsertException(InsertExceptionMode.INTERNAL_ERROR, e, null), context); return; } } }
long maxLen = Math.max(ctx.maxTempLength, ctx.maxOutputLength); try { data = context.getBucketFactory(false).makeBucket(maxLen); output = data.getOutputStream(); if(decompressors != null) {
data = context.getBucketFactory(persistent).makeBucket(maxLen); output = data.getOutputStream(); if(decompressors != null) {
this.targetURI = redirectTarget; Metadata m = new Metadata(DocumentType.SIMPLE_REDIRECT, null, null, targetURI, cm); tempData = m.toBucket(core.clientContext.getBucketFactory(isPersistentForever())); isMetadata = true; } else
if(decode && block != null) { try { data = block.decode(context.getBucketFactory(parent.persistent()), 1025 /* it's an SSK */, true); } catch (KeyDecodeException e) { data = null;
InsertCompressor.start(context, this, origData, oneBlockCompressedSize, context.getBucketFactory(persistent), persistent, wantHashes, !atLeast1254); } else { if(logMINOR) Logger.minor(this, "Not compressing "+origData+" size = "+origSize+" block size = "+blockSize);
/** Convert a ClientKeyBlock to a Bucket. If an error occurs, report it via onFailure * and return null. */ protected Bucket extract(ClientKeyBlock block, ClientContext context) { Bucket data; try { data = block.decode(context.getBucketFactory(parent.persistent()), (int)(Math.min(ctx.maxOutputLength, Integer.MAX_VALUE)), false); } catch (KeyDecodeException e1) { if(logMINOR) Logger.minor(this, "Decode failure: "+e1, e1); onFailure(new FetchException(FetchExceptionMode.BLOCK_DECODE_ERROR, e1.getMessage()), false, context); return null; } catch (TooBigException e) { onFailure(new FetchException(FetchExceptionMode.TOO_BIG, e), false, context); return null; } catch (InsufficientDiskSpaceException e) { onFailure(new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE), false, context); return null; } catch (IOException e) { Logger.error(this, "Could not capture data - disk full?: "+e, e); onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, e), false, context); return null; } return data; }
finalData = context.getBucketFactory(persistent).makeBucket(maxLen); output = finalData.getOutputStream(); if(decompressors != null) {
Metadata m = new Metadata(DocumentType.SIMPLE_REDIRECT, null, null, targetURI, cm); try { tempData = m.toBucket(server.core.clientContext.getBucketFactory(isPersistentForever())); } catch (MetadataUnresolvedException e) {
long maxLen = Math.max(ctx.maxTempLength, ctx.maxOutputLength); try { finalResult = context.getBucketFactory(persistent()).makeBucket(maxLen); } catch (InsufficientDiskSpaceException e) { onFailure(new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE), state, context);
for(FreenetURI uri : hintURIs) { try { Bucket bucket = BucketTools.makeImmutableBucket(context.getBucketFactory(persistent), hintData); SingleBlockInserter sb = new SingleBlockInserter(parent, bucket, (short) -1, uri,
OutputStream os = null; try { RandomAccessBucket outputBucket = context.getBucketFactory(persistent).makeBucket(-1); os = new BufferedOutputStream(outputBucket.getOutputStream()); String mimeType = (archiveType == ARCHIVE_TYPE.TAR ?
bucket = baseMetadata.toBucket(context.getBucketFactory(persistent())); if(logMINOR) Logger.minor(this, "Metadata bucket is "+bucket.size()+" bytes long");
metadataBucket = BucketTools.makeImmutableBucket(context.getBucketFactory(persistent), metaBytes); } catch (IOException e1) { InsertException ex = new InsertException(InsertExceptionMode.BUCKET_ERROR, e1, null);
if(returnBucket == null) finalResult = context.getBucketFactory(persistent()).makeBucket(maxLen); else finalResult = returnBucket; if(logMINOR) Logger.minor(this, "Writing final data to "+finalResult+" return bucket is "+returnBucket);
RandomAccessBucket metadataBucket; try { metadataBucket = meta.toBucket(context.getBucketFactory(persistent)); } catch (IOException e) { Logger.error(this, "Caught "+e, e);