Examples of RandomAccessBucket


Examples of freenet.support.api.RandomAccessBucket

      System.out.println("SEED-TIME:" + (t2 - t1));
      csvLine.add(String.valueOf(t2 - t1));

      // Push one block only.
     
      RandomAccessBucket data = randomData(node);
      HighLevelSimpleClient client = node.clientCore.makeClient((short) 0, false, false);
      FreenetURI uri = new FreenetURI("KSK@" + uid + "-" + dateFormat.format(today.getTime()));
      System.out.println("PUSHING " + uri);
     
      try {
        InsertBlock block = new InsertBlock(data, new ClientMetadata(), uri);
        t1 = System.currentTimeMillis();
        client.insert(block, false, null);
        t2 = System.currentTimeMillis();
       
        System.out.println("PUSH-TIME-" + ":" + (t2 - t1));
        csvLine.add(String.valueOf(t2 - t1));
      } catch (InsertException e) {
        e.printStackTrace();
        csvLine.add("N/A");
      }

      data.free();

      node.park();

      // Node 2
      File innerDir2 = new File(dir, Integer.toString(DARKNET_PORT2));
View Full Code Here

Examples of freenet.support.api.RandomAccessBucket

      System.exit(exitCode);
    }
  }

  private static RandomAccessBucket randomData(Node node) throws IOException {
      RandomAccessBucket data = node.clientCore.tempBucketFactory.makeBucket(TEST_SIZE);
    OutputStream os = data.getOutputStream();
    try {
    byte[] buf = new byte[4096];
    for (long written = 0; written < TEST_SIZE;) {
      node.fastWeakRandom.nextBytes(buf);
      int toWrite = (int) Math.min(TEST_SIZE - written, buf.length);
View Full Code Here

Examples of freenet.support.api.RandomAccessBucket

   * @param container
   * @param context
   */
  private void resolveAndStartBase(ClientContext context) {
    //new Error("DEBUG_ME_resolveAndStartBase").printStackTrace();
      RandomAccessBucket bucket = null;
    synchronized(this) {
      if(hasResolvedBase) return;
    }
    while(true) {
      try {
          bucket = baseMetadata.toBucket(context.getBucketFactory(persistent()));
        if(logMINOR)
          Logger.minor(this, "Metadata bucket is "+bucket.size()+" bytes long");
        break;
      } catch (IOException e) {
        fail(new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null), context);
        return;
      } catch (MetadataUnresolvedException e) {
View Full Code Here

Examples of freenet.support.api.RandomAccessBucket

            String name = entry.getKey();
            Object o = entry.getValue();
            if(o instanceof ManifestElement) {
                manifestEntries.put(name, o);
            } else if(o instanceof Bucket) {
                RandomAccessBucket data = (RandomAccessBucket) o;
                manifestEntries.put(name, new ManifestElement(name, data, null,data.size()));
            } else if(o instanceof HashMap) {
                manifestEntries.put(name, bucketsByNameToManifestEntries(Metadata.forceMap(o)));
            } else
                throw new IllegalArgumentException(String.valueOf(o));
        }
View Full Code Here

Examples of freenet.support.api.RandomAccessBucket

    }

    // resolver
    private MetaPutHandler(BaseManifestPutter smp, PutHandler parent, Metadata toResolve, BucketFactory bf) throws MetadataUnresolvedException, IOException {
      super(smp, parent, null, null, runningPutHandlers);
      RandomAccessBucket b = toResolve.toBucket(bf);
      metadata = toResolve;
      // Treat as splitfile for purposes of determining number of reinserts.
      InsertBlock ib = new InsertBlock(b, null, FreenetURI.EMPTY_CHK_URI);
      this.origSFI = new SingleFileInserter(this, this, ib, true, ctx, realTimeFlag, false, false, toResolve, null, true, null, true, persistent(), 0, 0, null, cryptoAlgorithm, null, -1);
      if(logMINOR) Logger.minor(this, "Inserting subsidiary metadata: "+origSFI+" for "+toResolve);
View Full Code Here

Examples of freenet.support.api.RandomAccessBucket

      // So it is passed on.
      origHashes = hashes;
    } else {
      hashes = origHashes; // Inherit so it goes all the way to the top.
    }
    RandomAccessBucket bestCompressedData = output.data;
    long bestCompressedDataSize = bestCompressedData.size();
    RandomAccessBucket data = bestCompressedData;
    COMPRESSOR_TYPE bestCodec = output.bestCodec;
   
    boolean shouldFreeData = freeData;
    if(bestCodec != null) {
      if(logMINOR) Logger.minor(this, "The best compression algorithm is "+bestCodec+ " we have gained"+ (100-(bestCompressedDataSize*100/origSize)) +"% ! ("+origSize+'/'+bestCompressedDataSize+')');
      shouldFreeData = true; // must be freed regardless of whether the original data was to be freed
      if(freeData) {
        block.getData().free();
      }
      block.nullData();
    } else {
      data = block.getData();
      bestCompressedDataSize = origSize;
    }

    int blockSize;
    int oneBlockCompressedSize;
   
    boolean isCHK = false;
    String type = block.desiredURI.getKeyType();
    boolean isUSK = false;
    if(type.equals("SSK") || type.equals("KSK") || (isUSK = type.equals("USK"))) {
      blockSize = SSKBlock.DATA_LENGTH;
      oneBlockCompressedSize = SSKBlock.MAX_COMPRESSED_DATA_LENGTH;
    } else if(type.equals("CHK")) {
      blockSize = CHKBlock.DATA_LENGTH;
      oneBlockCompressedSize = CHKBlock.MAX_COMPRESSED_DATA_LENGTH;
      isCHK = true;
    } else {
      throw new InsertException(InsertExceptionMode.INVALID_URI, "Unknown key type: "+type, null);
    }
   
    // Compressed data ; now insert it
    // We do NOT need to switch threads here: the actual compression is done by InsertCompressor on the RealCompressor thread,
    // which then switches either to the database thread or to a new executable to run this method.
   
    if(parent == cb) {
      short codecID = bestCodec == null ? -1 : bestCodec.metadataID;
      ctx.eventProducer.produceEvent(new FinishedCompressionEvent(codecID, origSize, bestCompressedDataSize), context);
      if(logMINOR) Logger.minor(this, "Compressed "+origSize+" to "+data.size()+" on "+this+" data = "+data);
    }
   
    // Insert it...
    short codecNumber = bestCodec == null ? -1 : bestCodec.metadataID;
    long compressedDataSize = data.size();
    boolean fitsInOneBlockAsIs = bestCodec == null ? compressedDataSize <= blockSize : compressedDataSize <= oneBlockCompressedSize;
    boolean fitsInOneCHK = bestCodec == null ? compressedDataSize <= CHKBlock.DATA_LENGTH : compressedDataSize <= CHKBlock.MAX_COMPRESSED_DATA_LENGTH;

    if((fitsInOneBlockAsIs || fitsInOneCHK) && origSize > Integer.MAX_VALUE)
      throw new InsertException(InsertExceptionMode.INTERNAL_ERROR, "2GB+ should not encode to one block!", null);

    boolean noMetadata = ((block.clientMetadata == null) || block.clientMetadata.isTrivial()) && targetFilename == null;
    if((noMetadata || metadata) && archiveType == null) {
      if(fitsInOneBlockAsIs) {
        if(persistent && (data instanceof NotPersistentBucket))
          data = fixNotPersistent(data, context);
        // Just insert it
        ClientPutState bi =
          createInserter(parent, data, codecNumber, ctx, cb, metadata, (int)origSize, -1, true, context, shouldFreeData, forSplitfile);
        if(logMINOR)
          Logger.minor(this, "Inserting without metadata: "+bi+" for "+this);
        cb.onTransition(this, bi, context);
        if(ctx.earlyEncode && bi instanceof SingleBlockInserter && isCHK)
          ((SingleBlockInserter)bi).getBlock(context, true);
        bi.schedule(context);
        if(!isUSK)
          cb.onBlockSetFinished(this, context);
        synchronized(this) {
            started = true;
        }
        if(persistent) {
          block.nullData();
          block = null;
        }
        return;
      }
    }
    if (fitsInOneCHK) {
      // Insert single block, then insert pointer to it
      if(persistent && (data instanceof NotPersistentBucket)) {
        data = fixNotPersistent(data, context);
      }
      if(reportMetadataOnly) {
        SingleBlockInserter dataPutter = new SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, realTimeFlag, cb, metadata, (int)origSize, -1, true, true, token, context, persistent, shouldFreeData, forSplitfile ? ctx.extraInsertsSplitfileHeaderBlock : ctx.extraInsertsSingleBlock, cryptoAlgorithm, forceCryptoKey);
        if(logMINOR)
          Logger.minor(this, "Inserting with metadata: "+dataPutter+" for "+this);
        Metadata meta = makeMetadata(archiveType, dataPutter.getURI(context), hashes);
        cb.onMetadata(meta, this, context);
        cb.onTransition(this, dataPutter, context);
        dataPutter.schedule(context);
        if(!isUSK)
          cb.onBlockSetFinished(this, context);
        synchronized(this) {
          // Don't delete them because they are being passed on.
          origHashes = null;
        }
      } else {
        MultiPutCompletionCallback mcb =
          new MultiPutCompletionCallback(cb, parent, token, persistent, false, ctx.earlyEncode);
        SingleBlockInserter dataPutter = new SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, realTimeFlag, mcb, metadata, (int)origSize, -1, true, false, token, context, persistent, shouldFreeData, forSplitfile ? ctx.extraInsertsSplitfileHeaderBlock : ctx.extraInsertsSingleBlock, cryptoAlgorithm, forceCryptoKey);
        if(logMINOR)
          Logger.minor(this, "Inserting data: "+dataPutter+" for "+this);
        Metadata meta = makeMetadata(archiveType, dataPutter.getURI(context), hashes);
        RandomAccessBucket metadataBucket;
        try {
          metadataBucket = meta.toBucket(context.getBucketFactory(persistent));
        } catch (IOException e) {
          Logger.error(this, "Caught "+e, e);
          throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
View Full Code Here

Examples of freenet.support.api.RandomAccessBucket

  private RandomAccessBucket fixNotPersistent(RandomAccessBucket data, ClientContext context) throws InsertException {
    boolean skip = false;
    try {
      if(!skip) {
      if(logMINOR) Logger.minor(this, "Copying data from "+data+" length "+data.size());
      RandomAccessBucket newData = context.persistentBucketFactory.makeBucket(data.size());
      BucketTools.copy(data, newData);
      data.free();
      data = newData;
      }
    } catch (IOException e) {
View Full Code Here

Examples of freenet.support.api.RandomAccessBucket

    return data;
  }

  private void tryCompress(ClientContext context) throws InsertException {
    // First, determine how small it needs to be
      RandomAccessBucket origData = block.getData();
      RandomAccessBucket data = origData;
    int blockSize;
    int oneBlockCompressedSize;
    boolean dontCompress = ctx.dontCompress;
   
    long origSize = data.size();
    String type = block.desiredURI.getKeyType().toUpperCase();
    if(type.equals("SSK") || type.equals("KSK") || type.equals("USK")) {
      blockSize = SSKBlock.DATA_LENGTH;
      oneBlockCompressedSize = SSKBlock.MAX_COMPRESSED_DATA_LENGTH;
    } else if(type.equals("CHK")) {
      blockSize = CHKBlock.DATA_LENGTH;
      oneBlockCompressedSize = CHKBlock.MAX_COMPRESSED_DATA_LENGTH;
    } else {
      throw new InsertException(InsertExceptionMode.INVALID_URI, "Unknown key type: "+type, null);
    }
   
    // We always want SHA256, even for small files.
    long wantHashes = 0;
    CompatibilityMode cmode = ctx.getCompatibilityMode();
    boolean atLeast1254 = (cmode == CompatibilityMode.COMPAT_CURRENT || cmode.ordinal() >= CompatibilityMode.COMPAT_1255.ordinal());
    if(atLeast1254) {
      // We verify this. We want it for *all* files.
      wantHashes |= HashType.SHA256.bitmask;
      // FIXME: If the user requests it, calculate the others for small files.
      // FIXME maybe the thresholds should be configurable.
      if(data.size() >= 1024*1024 && !metadata) {
        // SHA1 is common and MD5 is cheap.
        wantHashes |= HashType.SHA1.bitmask;
        wantHashes |= HashType.MD5.bitmask;
      }
      if(data.size() >= 4*1024*1024 && !metadata) {
        // Useful for cross-network, and cheap.
        wantHashes |= HashType.ED2K.bitmask;
        // Very widely supported for cross-network.
        wantHashes |= HashType.TTH.bitmask;
        // For completeness.
        wantHashes |= HashType.SHA512.bitmask;
      }
    }
    boolean tryCompress = (origSize > blockSize) && (!ctx.dontCompress) && (!dontCompress);
    if(tryCompress) {
      InsertCompressor.start(context, this, origData, oneBlockCompressedSize, context.getBucketFactory(persistent), persistent, wantHashes, !atLeast1254);
    } else {
      if(logMINOR) Logger.minor(this, "Not compressing "+origData+" size = "+origSize+" block size = "+blockSize);
      HashResult[] hashes = null;
      if(wantHashes != 0) {
        // Need to get the hashes anyway
        NullOutputStream nos = new NullOutputStream();
        MultiHashOutputStream hasher = new MultiHashOutputStream(nos, wantHashes);
        try {
          BucketTools.copyTo(data, hasher, data.size());
        } catch (IOException e) {
          throw new InsertException(InsertExceptionMode.BUCKET_ERROR, "I/O error generating hashes", e, null);
        }
        hashes = hasher.getResults();
      }
View Full Code Here

Examples of freenet.support.api.RandomAccessBucket

            return;
          }
        }
      }
     
      RandomAccessBucket metadataBucket;
      try {
        metadataBucket = BucketTools.makeImmutableBucket(context.getBucketFactory(persistent), metaBytes);
      } catch (IOException e1) {
        InsertException ex = new InsertException(InsertExceptionMode.BUCKET_ERROR, e1, null);
        fail(ex, context);
        return;
      }
      ClientMetadata m = meta.getClientMetadata();
      CompatibilityMode cmode = ctx.getCompatibilityMode();
      if(!(cmode == CompatibilityMode.COMPAT_CURRENT || cmode.ordinal() >= CompatibilityMode.COMPAT_1255.ordinal()))
        m = null;
      if(metadataThreshold > 0 && metaBytes.length < metadataThreshold) {
        // FIXME what to do about m ???
        // I.e. do the other layers of metadata already include the content type?
        // It's probably already included in the splitfile, but need to check that, and test it.
        synchronized(this) {
          metaInsertSuccess = true;
        }
        cb.onMetadata(metadataBucket, state, context);
        return;
      }
      InsertBlock newBlock = new InsertBlock(metadataBucket, m, block.desiredURI);
      synchronized(this) {
          // Only the bottom layer in a multi-level splitfile pyramid has randomised keys. The rest are unpredictable anyway, and this ensures we only need to supply one key when reinserting.
          metadataPutter = new SingleFileInserter(parent, this, newBlock, true, ctx, realTimeFlag, false, false, token, archiveType, true, metaPutterTargetFilename, true, persistent, origDataLength, origCompressedDataLength, origHashes, cryptoAlgorithm, forceCryptoKey, metadataThreshold);
          if(origHashes != null) {
              // It gets passed on, and the last one deletes it.
              SingleFileInserter.this.origHashes = null;
          }
          // If EarlyEncode, then start the metadata insert ASAP, to get the key.
          // Otherwise, wait until the data is fetchable (to improve persistence).
          if(logMINOR)
              Logger.minor(this, "Created metadata putter for "+this+" : "+metadataPutter+" bucket "+metadataBucket+" size "+metadataBucket.size());
          if(!(ctx.earlyEncode || splitInsertSuccess)) return;
      }
      if(logMINOR) Logger.minor(this, "Putting metadata on "+metadataPutter+" from "+sfi+" ("+((SplitFileInserter)sfi).getLength()+ ')');
      if(!startMetadata(context)) {
        Logger.error(this, "onMetadata() yet unable to start metadata due to not having all URIs?!?!");
View Full Code Here

Examples of freenet.support.api.RandomAccessBucket

        if(insertURI.getKeyType().equals("CHK") || keyType.equals("SSK"))
          fnam = file.getFilename();
        else
          fnam = null;
        /* copy bucket data */
        final RandomAccessBucket copiedBucket = core.persistentTempBucketFactory.makeBucket(file.getData().size());
        BucketTools.copy(file.getData(), copiedBucket);
        final CountDownLatch done = new CountDownLatch(1);
        try {
          core.clientLayerPersister.queue(new PersistentJob() {

View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.