Examples of RowIndexEntry


Examples of com.facebook.hive.orc.OrcProto.RowIndexEntry

      int length = 0;
      int i = 0;
      numChunks = 1;
      chunkStarts[0] = base;
      int compressedIndex;
      RowIndexEntry rowIndexEntry;
      int distinctStrides = 0;
      int previousStrideStart = 0;
      for (i = 0; i < rowIndexEntries.size(); i++) {
        rowIndexEntry = rowIndexEntries.get(i);
        compressedIndex = (int) rowIndexEntry.getPositions(startIndex);
        // chunkStarts contains unique values of the compressedIndex
        // note that base + compressedIndex = the file offset, and cunkStarts contains file
        // offsets
        if (compressedIndex != previousStrideStart) {
          previousStrideStart = compressedIndex;
          distinctStrides++;
          if (distinctStrides == readStrides) {
            // If the comprssedIndex is new (should be monotonically increasing)
            // convert it to a file offset
            chunkStarts[numChunks] = base + compressedIndex;

            // the length of the previous chunk
            length = (int) (chunkStarts[numChunks] - chunkStarts[numChunks - 1]);
            // update max length if necessary
            maxLength = maxLength < length ? length : maxLength;
            numChunks++;
            distinctStrides = 0;
          }
        }
        compressedStrides[i] = numChunks - 1;
        compressedIndeces[i] = compressedIndex;
        uncompressedIndeces[i] = (int) rowIndexEntry.getPositions(startIndex + 1);
      }

      // The final value in chunkStarts is the offset of the end of the stream data
      chunkStarts[numChunks] = base + limit;
      // Compute the length of the final stride
View Full Code Here

Examples of com.facebook.hive.orc.OrcProto.RowIndexEntry

      int length = 0;
      int i = 0;
      numChunks = 1;
      chunkStarts[0] = base;
      int compressedIndex;
      RowIndexEntry rowIndexEntry;
      int distinctStrides = 0;
      int previousStrideStart = 0;
      for (i = 0; i < rowIndexEntries.size(); i++) {
        rowIndexEntry = rowIndexEntries.get(i);
        compressedIndex = (int) rowIndexEntry.getPositions(startIndex);
        // chunkStarts contains unique values of the compressedIndex
        // note that base + compressedIndex = the file offset, and cunkStarts contains file
        // offsets
        if (compressedIndex != previousStrideStart) {
          previousStrideStart = compressedIndex;
          distinctStrides++;
          if (distinctStrides == readStrides) {
            // If the comprssedIndex is new (should be monotonically increasing)
            // convert it to a file offset
            chunkStarts[numChunks] = base + compressedIndex;

            // the length of the previous chunk
            length = (int) (chunkStarts[numChunks] - chunkStarts[numChunks - 1]);
            // update max length if necessary
            maxLength = maxLength < length ? length : maxLength;
            numChunks++;
            distinctStrides = 0;
          }
        }
        compressedStrides[i] = numChunks - 1;
        compressedIndeces[i] = compressedIndex;
        uncompressedIndeces[i] = (int) rowIndexEntry.getPositions(startIndex + 1);
      }

      // The final value in chunkStarts is the offset of the end of the stream data
      chunkStarts[numChunks] = base + limit;
      // Compute the length of the final stride
View Full Code Here

Examples of com.facebook.hive.orc.OrcProto.RowIndexEntry

      int length = 0;
      int i = 0;
      numChunks = 1;
      chunkStarts[0] = base;
      int compressedIndex;
      RowIndexEntry rowIndexEntry;
      int distinctStrides = 0;
      int previousStrideStart = 0;
      for (i = 0; i < rowIndexEntries.size(); i++) {
        rowIndexEntry = rowIndexEntries.get(i);
        compressedIndex = (int) rowIndexEntry.getPositions(startIndex);
        // chunkStarts contains unique values of the compressedIndex
        // note that base + compressedIndex = the file offset, and cunkStarts contains file
        // offsets
        if (compressedIndex != previousStrideStart) {
          previousStrideStart = compressedIndex;
          distinctStrides++;
          if (distinctStrides == readStrides) {
            // If the comprssedIndex is new (should be monotonically increasing)
            // convert it to a file offset
            chunkStarts[numChunks] = base + compressedIndex;

            // the length of the previous chunk
            length = (int) (chunkStarts[numChunks] - chunkStarts[numChunks - 1]);
            // update max length if necessary
            maxLength = maxLength < length ? length : maxLength;
            numChunks++;
            distinctStrides = 0;
          }
        }
        compressedStrides[i] = numChunks - 1;
        compressedIndeces[i] = compressedIndex;
        uncompressedIndeces[i] = (int) rowIndexEntry.getPositions(startIndex + 1);
      }

      // The final value in chunkStarts is the offset of the end of the stream data
      chunkStarts[numChunks] = base + limit;
      // Compute the length of the final stride
View Full Code Here

Examples of com.facebook.hive.orc.OrcProto.RowIndexEntry

      int length = 0;
      int i = 0;
      numChunks = 1;
      chunkStarts[0] = base;
      int compressedIndex;
      RowIndexEntry rowIndexEntry;
      int distinctStrides = 0;
      int previousStrideStart = 0;
      for (i = 0; i < rowIndexEntries.size(); i++) {
        rowIndexEntry = rowIndexEntries.get(i);
        compressedIndex = (int) rowIndexEntry.getPositions(startIndex);
        // chunkStarts contains unique values of the compressedIndex
        // note that base + compressedIndex = the file offset, and cunkStarts contains file
        // offsets
        if (compressedIndex != previousStrideStart) {
          previousStrideStart = compressedIndex;
          distinctStrides++;
          if (distinctStrides == readStrides) {
            // If the comprssedIndex is new (should be monotonically increasing)
            // convert it to a file offset
            chunkStarts[numChunks] = base + compressedIndex;

            // the length of the previous chunk
            length = (int) (chunkStarts[numChunks] - chunkStarts[numChunks - 1]);
            // update max length if necessary
            maxLength = maxLength < length ? length : maxLength;
            numChunks++;
            distinctStrides = 0;
          }
        }
        compressedStrides[i] = numChunks - 1;
        compressedIndeces[i] = compressedIndex;
        uncompressedIndeces[i] = (int) rowIndexEntry.getPositions(startIndex + 1);
      }

      // The final value in chunkStarts is the offset of the end of the stream data
      chunkStarts[numChunks] = base + limit;
      // Compute the length of the final stride
View Full Code Here

Examples of org.apache.cassandra.db.RowIndexEntry

                AbstractCompactedRow row = nni.next();
                if (row.isEmpty())
                    continue;

                RowIndexEntry indexEntry = writer.append(row);
                totalkeysWritten++;

                if (DatabaseDescriptor.getPreheatKeyCache())
                {
                    for (SSTableReader sstable : toCompact)
View Full Code Here

Examples of org.apache.cassandra.db.RowIndexEntry

                {
                    row.close();
                    continue;
                }

                RowIndexEntry indexEntry = writer.append(row);
                totalkeysWritten++;

                if (DatabaseDescriptor.getPreheatKeyCache())
                {
                    for (SSTableReader sstable : toCompact)
View Full Code Here

Examples of org.apache.cassandra.db.RowIndexEntry

    public class KeyCacheSerializer implements CacheSerializer<KeyCacheKey, RowIndexEntry>
    {
        public void serialize(KeyCacheKey key, DataOutput out) throws IOException
        {
            RowIndexEntry entry = CacheService.instance.keyCache.get(key);
            if (entry == null)
                return;
            ByteBufferUtil.writeWithLength(key.key, out);
            Descriptor desc = key.desc;
            out.writeInt(desc.generation);
View Full Code Here

Examples of org.apache.cassandra.db.RowIndexEntry

            if (reader == null)
            {
                RowIndexEntry.serializer.skipPromotedIndex(input);
                return null;
            }
            RowIndexEntry entry = RowIndexEntry.serializer.deserialize(input, reader.descriptor.version);
            return Futures.immediateFuture(Pair.create(new KeyCacheKey(reader.descriptor, key), entry));
        }
View Full Code Here

Examples of org.apache.cassandra.db.RowIndexEntry

            {
                DecoratedKey dk = cfs.partitioner.decorateKey(key);

                for (SSTableReader sstable : cfs.getSSTables())
                {
                    RowIndexEntry entry = sstable.getPosition(dk, Operator.EQ, false);
                    if (entry != null)
                        keyCache.put(new KeyCacheKey(sstable.descriptor, key), entry);
                }
            }
        }
View Full Code Here

Examples of org.apache.cassandra.db.RowIndexEntry

            long indexPosition;
            while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
            {
                ByteBuffer key = ByteBufferUtil.readWithShortLength(primaryIndex);
                RowIndexEntry indexEntry = RowIndexEntry.serializer.deserialize(primaryIndex, descriptor.version);
                DecoratedKey decoratedKey = partitioner.decorateKey(key);
                if (first == null)
                    first = decoratedKey;
                last = decoratedKey;
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.