Package com.facebook.hive.orc.OrcProto

Examples of com.facebook.hive.orc.OrcProto.RowIndexEntry


      int length = 0;
      int i = 0;
      numChunks = 1;
      chunkStarts[0] = base;
      int compressedIndex;
      RowIndexEntry rowIndexEntry;
      int distinctStrides = 0;
      int previousStrideStart = 0;
      for (i = 0; i < rowIndexEntries.size(); i++) {
        rowIndexEntry = rowIndexEntries.get(i);
        compressedIndex = (int) rowIndexEntry.getPositions(startIndex);
        // chunkStarts contains unique values of the compressedIndex
        // note that base + compressedIndex = the file offset, and cunkStarts contains file
        // offsets
        if (compressedIndex != previousStrideStart) {
          previousStrideStart = compressedIndex;
          distinctStrides++;
          if (distinctStrides == readStrides) {
            // If the comprssedIndex is new (should be monotonically increasing)
            // convert it to a file offset
            chunkStarts[numChunks] = base + compressedIndex;

            // the length of the previous chunk
            length = (int) (chunkStarts[numChunks] - chunkStarts[numChunks - 1]);
            // update max length if necessary
            maxLength = maxLength < length ? length : maxLength;
            numChunks++;
            distinctStrides = 0;
          }
        }
        compressedStrides[i] = numChunks - 1;
        compressedIndeces[i] = compressedIndex;
        uncompressedIndeces[i] = (int) rowIndexEntry.getPositions(startIndex + 1);
      }

      // The final value in chunkStarts is the offset of the end of the stream data
      chunkStarts[numChunks] = base + limit;
      // Compute the length of the final stride
View Full Code Here


      int length = 0;
      int i = 0;
      numChunks = 1;
      chunkStarts[0] = base;
      int compressedIndex;
      RowIndexEntry rowIndexEntry;
      int distinctStrides = 0;
      int previousStrideStart = 0;
      for (i = 0; i < rowIndexEntries.size(); i++) {
        rowIndexEntry = rowIndexEntries.get(i);
        compressedIndex = (int) rowIndexEntry.getPositions(startIndex);
        // chunkStarts contains unique values of the compressedIndex
        // note that base + compressedIndex = the file offset, and cunkStarts contains file
        // offsets
        if (compressedIndex != previousStrideStart) {
          previousStrideStart = compressedIndex;
          distinctStrides++;
          if (distinctStrides == readStrides) {
            // If the comprssedIndex is new (should be monotonically increasing)
            // convert it to a file offset
            chunkStarts[numChunks] = base + compressedIndex;

            // the length of the previous chunk
            length = (int) (chunkStarts[numChunks] - chunkStarts[numChunks - 1]);
            // update max length if necessary
            maxLength = maxLength < length ? length : maxLength;
            numChunks++;
            distinctStrides = 0;
          }
        }
        compressedStrides[i] = numChunks - 1;
        compressedIndeces[i] = compressedIndex;
        uncompressedIndeces[i] = (int) rowIndexEntry.getPositions(startIndex + 1);
      }

      // The final value in chunkStarts is the offset of the end of the stream data
      chunkStarts[numChunks] = base + limit;
      // Compute the length of the final stride
View Full Code Here

      int length = 0;
      int i = 0;
      numChunks = 1;
      chunkStarts[0] = base;
      int compressedIndex;
      RowIndexEntry rowIndexEntry;
      int distinctStrides = 0;
      int previousStrideStart = 0;
      for (i = 0; i < rowIndexEntries.size(); i++) {
        rowIndexEntry = rowIndexEntries.get(i);
        compressedIndex = (int) rowIndexEntry.getPositions(startIndex);
        // chunkStarts contains unique values of the compressedIndex
        // note that base + compressedIndex = the file offset, and cunkStarts contains file
        // offsets
        if (compressedIndex != previousStrideStart) {
          previousStrideStart = compressedIndex;
          distinctStrides++;
          if (distinctStrides == readStrides) {
            // If the comprssedIndex is new (should be monotonically increasing)
            // convert it to a file offset
            chunkStarts[numChunks] = base + compressedIndex;

            // the length of the previous chunk
            length = (int) (chunkStarts[numChunks] - chunkStarts[numChunks - 1]);
            // update max length if necessary
            maxLength = maxLength < length ? length : maxLength;
            numChunks++;
            distinctStrides = 0;
          }
        }
        compressedStrides[i] = numChunks - 1;
        compressedIndeces[i] = compressedIndex;
        uncompressedIndeces[i] = (int) rowIndexEntry.getPositions(startIndex + 1);
      }

      // The final value in chunkStarts is the offset of the end of the stream data
      chunkStarts[numChunks] = base + limit;
      // Compute the length of the final stride
View Full Code Here

      int length = 0;
      int i = 0;
      numChunks = 1;
      chunkStarts[0] = base;
      int compressedIndex;
      RowIndexEntry rowIndexEntry;
      int distinctStrides = 0;
      int previousStrideStart = 0;
      for (i = 0; i < rowIndexEntries.size(); i++) {
        rowIndexEntry = rowIndexEntries.get(i);
        compressedIndex = (int) rowIndexEntry.getPositions(startIndex);
        // chunkStarts contains unique values of the compressedIndex
        // note that base + compressedIndex = the file offset, and cunkStarts contains file
        // offsets
        if (compressedIndex != previousStrideStart) {
          previousStrideStart = compressedIndex;
          distinctStrides++;
          if (distinctStrides == readStrides) {
            // If the comprssedIndex is new (should be monotonically increasing)
            // convert it to a file offset
            chunkStarts[numChunks] = base + compressedIndex;

            // the length of the previous chunk
            length = (int) (chunkStarts[numChunks] - chunkStarts[numChunks - 1]);
            // update max length if necessary
            maxLength = maxLength < length ? length : maxLength;
            numChunks++;
            distinctStrides = 0;
          }
        }
        compressedStrides[i] = numChunks - 1;
        compressedIndeces[i] = compressedIndex;
        uncompressedIndeces[i] = (int) rowIndexEntry.getPositions(startIndex + 1);
      }

      // The final value in chunkStarts is the offset of the end of the stream data
      chunkStarts[numChunks] = base + limit;
      // Compute the length of the final stride
View Full Code Here

      int length = 0;
      int i = 0;
      numChunks = 1;
      chunkStarts[0] = base;
      int compressedIndex;
      RowIndexEntry rowIndexEntry;
      int distinctStrides = 0;
      int previousStrideStart = 0;
      for (i = 0; i < rowIndexEntries.size(); i++) {
        rowIndexEntry = rowIndexEntries.get(i);
        compressedIndex = (int) rowIndexEntry.getPositions(startIndex);
        // chunkStarts contains unique values of the compressedIndex
        // note that base + compressedIndex = the file offset, and cunkStarts contains file
        // offsets
        if (compressedIndex != previousStrideStart) {
          previousStrideStart = compressedIndex;
          distinctStrides++;
          if (distinctStrides == readStrides) {
            // If the comprssedIndex is new (should be monotonically increasing)
            // convert it to a file offset
            chunkStarts[numChunks] = base + compressedIndex;

            // the length of the previous chunk
            length = (int) (chunkStarts[numChunks] - chunkStarts[numChunks - 1]);
            // update max length if necessary
            maxLength = maxLength < length ? length : maxLength;
            numChunks++;
            distinctStrides = 0;
          }
        }
        compressedStrides[i] = numChunks - 1;
        compressedIndeces[i] = compressedIndex;
        uncompressedIndeces[i] = (int) rowIndexEntry.getPositions(startIndex + 1);
      }

      // The final value in chunkStarts is the offset of the end of the stream data
      chunkStarts[numChunks] = base + limit;
      // Compute the length of the final stride
View Full Code Here

      int length = 0;
      int i = 0;
      numChunks = 1;
      chunkStarts[0] = base;
      int compressedIndex;
      RowIndexEntry rowIndexEntry;
      int distinctStrides = 0;
      int previousStrideStart = 0;
      for (i = 0; i < rowIndexEntries.size(); i++) {
        rowIndexEntry = rowIndexEntries.get(i);
        compressedIndex = (int) rowIndexEntry.getPositions(startIndex);
        // chunkStarts contains unique values of the compressedIndex
        // note that base + compressedIndex = the file offset, and cunkStarts contains file
        // offsets
        if (compressedIndex != previousStrideStart) {
          previousStrideStart = compressedIndex;
          distinctStrides++;
          if (distinctStrides == readStrides) {
            // If the comprssedIndex is new (should be monotonically increasing)
            // convert it to a file offset
            chunkStarts[numChunks] = base + compressedIndex;

            // the length of the previous chunk
            length = (int) (chunkStarts[numChunks] - chunkStarts[numChunks - 1]);
            // update max length if necessary
            maxLength = maxLength < length ? length : maxLength;
            numChunks++;
            distinctStrides = 0;
          }
        }
        compressedStrides[i] = numChunks - 1;
        compressedIndeces[i] = compressedIndex;
        uncompressedIndeces[i] = (int) rowIndexEntry.getPositions(startIndex + 1);
      }

      // The final value in chunkStarts is the offset of the end of the stream data
      chunkStarts[numChunks] = base + limit;
      // Compute the length of the final stride
View Full Code Here

TOP

Related Classes of com.facebook.hive.orc.OrcProto.RowIndexEntry

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.