Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataInputStream.readInt()


    in = fs.open(idsPath);
    l = in.readInt();
    ids = new int[l];
    for (int i = 0; i < l; i++) {
      ids[i] = in.readInt();
    }
    in.close();

    in = fs.open(idToTermPath);
    l = in.readInt();
View Full Code Here


    docCount = 0;

    FSDataInputStream in = fs.open(file);

    // The docno offset.
    docnoOffset = in.readInt();

    // The size of the document collection.
    int sz = in.readInt() + 1;

    LOG.info("Docno offset: " + docnoOffset);
View Full Code Here

    // The docno offset.
    docnoOffset = in.readInt();

    // The size of the document collection.
    int sz = in.readInt() + 1;

    LOG.info("Docno offset: " + docnoOffset);
    LOG.info("Number of docs: " + (sz - 1));

    // Initialize an array to hold all the doc lengths.
View Full Code Here

      ids[i] = in.readInt();
    }
    in.close();

    in = fs.open(idToTermPath);
    l = in.readInt();
    idsToTerm = new int[l];
    for (int i = 0; i < l; i++) {
      idsToTerm[i] = in.readInt();
    }
    in.close();
View Full Code Here

    // Initialize an array to hold all the doc lengths.
    lengths = new short[sz];

    // Read each doc length.
    for (int i = 1; i < sz; i++) {
      int l = in.readInt();
      docLengthSum += l;

      lengths[i] = l > (Short.MAX_VALUE - Short.MIN_VALUE) ? Short.MAX_VALUE
          : (short) (l + Short.MIN_VALUE);
      docCount++;
View Full Code Here

    in = fs.open(idToTermPath);
    l = in.readInt();
    idsToTerm = new int[l];
    for (int i = 0; i < l; i++) {
      idsToTerm[i] = in.readInt();
    }
    in.close();
  }

  @Override
View Full Code Here

    String forwardIndexPath = (weighted ? env.getWeightedIntDocVectorsForwardIndex()
        : env.getIntDocVectorsForwardIndex());
    FSDataInputStream posInput = fs.open(new Path(forwardIndexPath));

    docnoOffset = posInput.readInt();
    collectionDocumentCount = posInput.readInt();

    positions = new long[collectionDocumentCount];
    for (int i = 0; i < collectionDocumentCount; i++) {
      positions[i] = posInput.readLong();
View Full Code Here

    String forwardIndexPath = (weighted ? env.getWeightedIntDocVectorsForwardIndex()
        : env.getIntDocVectorsForwardIndex());
    FSDataInputStream posInput = fs.open(new Path(forwardIndexPath));

    docnoOffset = posInput.readInt();
    collectionDocumentCount = posInput.readInt();

    positions = new long[collectionDocumentCount];
    for (int i = 0; i < collectionDocumentCount; i++) {
      positions[i] = posInput.readLong();
    }
View Full Code Here

    RetrievalEnvironment env = new RetrievalEnvironment(indexPath, fs);
    path = env.getTermDocVectorsDirectory();

    FSDataInputStream posInput = fs.open(new Path(env.getTermDocVectorsForwardIndex()));

    docnoOffset = posInput.readInt();
    collectionDocumentCount = posInput.readInt();

    positions = new long[collectionDocumentCount];
    for (int i = 0; i < collectionDocumentCount; i++) {
      positions[i] = posInput.readLong();
View Full Code Here

    path = env.getTermDocVectorsDirectory();

    FSDataInputStream posInput = fs.open(new Path(env.getTermDocVectorsForwardIndex()));

    docnoOffset = posInput.readInt();
    collectionDocumentCount = posInput.readInt();

    positions = new long[collectionDocumentCount];
    for (int i = 0; i < collectionDocumentCount; i++) {
      positions[i] = posInput.readLong();
    }
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.