Package org.apache.hadoop.hive.serde2.columnar

Examples of org.apache.hadoop.hive.serde2.columnar.BytesRefWritable


    byte[][] columnRandom;

    BytesRefArrayWritable bytes = new BytesRefArrayWritable(columnNum);
    columnRandom = new byte[columnNum][];
    for (int i = 0; i < columnNum; i++) {
      BytesRefWritable cu = new BytesRefWritable();
      bytes.set(i, cu);
    }

    for (int i = 0; i < rowCount; i++) {
      nextRandomRow(columnRandom, bytes);
View Full Code Here


            "Currently the writer can only accept BytesRefArrayWritable");

      BytesRefArrayWritable columns = (BytesRefArrayWritable) val;
      int size = columns.size();
      for (int i = 0; i < size; i++) {
        BytesRefWritable cu = columns.get(i);
        int plainLen = cu.getLength();
        columnBufferSize += plainLen;
        columnValuePlainLength[i] += plainLen;
        columnBuffers[i].append(cu);
      }
View Full Code Here

      fetchColumnTempBuf.reset(currentKey.allCellValLenBuffer[columnID]
          .getData(), currentKey.allCellValLenBuffer[columnID].getLength());
      for (int i = 0; i < recordsNumInValBuffer; i++) {
        int length = WritableUtils.readVInt(fetchColumnTempBuf);

        BytesRefWritable currentCell = rest.get(i);
        currentCell.set(currentValue.loadedColumnsValueBuffer[columnID]
            .getData(), columnNextRowStart, length);
        columnNextRowStart = columnNextRowStart + length;
      }
      return rest;
    }
View Full Code Here

      // DataOutputStream to BytesWritable

      ret.resetValid(columnNumber);

      for (int i = 0, readIndex = 0; i < columnNumber; i++) {
        BytesRefWritable ref = ret.unCheckedGet(i);

        if (skippedColIDs[i]) {
          if (ref != BytesRefWritable.ZeroBytesRefWritable)
            ret.set(i, BytesRefWritable.ZeroBytesRefWritable);
          continue;
        }

        int columnCurrentRowStart = (int) columnRowReadIndex[i];
        int length = (int) WritableUtils.readVLong(colValLenBufferReadIn[i]);
        columnRowReadIndex[i] = columnCurrentRowStart + length;

        ref.set(currentValue.loadedColumnsValueBuffer[readIndex].getData(),
            columnCurrentRowStart, length);
        readIndex++;
      }
      rowFetched = true;
    }
View Full Code Here

   */
  private void printRecord(BytesRefArrayWritable value, StringBuilder buf)
      throws IOException {
    int n = value.size();
    if (n > 0) {
      BytesRefWritable v = value.unCheckedGet(0);
      ByteBuffer bb = ByteBuffer.wrap(v.getData(), v.getStart(), v.getLength());
      buf.append(decoder.decode(bb));
      for (int i = 1; i < n; i++) {
        // do not put the TAB for the last column
        buf.append(RCFileCat.TAB);

        v = value.unCheckedGet(i);
        bb = ByteBuffer.wrap(v.getData(), v.getStart(), v.getLength());
        buf.append(decoder.decode(bb));
      }
      buf.append(RCFileCat.NEWLINE);
    }
  }
View Full Code Here

      // Data
      BytesRefArrayWritable braw = new BytesRefArrayWritable(8);
      String[] data = {"123", "456", "789", "1000", "5.3", "hive and hadoop", "1.", "NULL"};
      for (int i = 0; i < 8; i++) {
        braw.set(i, new BytesRefWritable(data[i].getBytes()));
      }
      // Test
      deserializeAndSerializeColumnar(serDe, braw, data);
      System.out.println("test: testColumnarSerde - OK");
View Full Code Here

      }

      BytesRefArrayWritable columns = (BytesRefArrayWritable) val;
      int size = columns.size();
      for (int i = 0; i < size; i++) {
        BytesRefWritable cu = columns.get(i);
        int plainLen = cu.getLength();
        columnBufferSize += plainLen;
        columnValuePlainLength[i] += plainLen;
        columnBuffers[i].append(cu);
      }
View Full Code Here

      }
      for (int i = 0; i < recordsNumInValBuffer; i++) {
        colAdvanceRow(selColIdx, selCol);
        int length = selCol.prvLength;

        BytesRefWritable currentCell = rest.get(i);

        if (decompressed) {
          currentCell.set(uncompData, columnNextRowStart, length);
        } else {
          currentCell.set(decompCallBack, columnNextRowStart, length);
        }
        columnNextRowStart = columnNextRowStart + length;
      }
      return rest;
    }
View Full Code Here

      if (currentValue.numCompressed > 0) {
        for (int j = 0; j < selectedColumns.length; ++j) {
          SelectedColumn col = selectedColumns[j];
          int i = col.colIndex;

          BytesRefWritable ref = ret.unCheckedGet(i);

          colAdvanceRow(j, col);

          if (currentValue.decompressedFlag[j]) {
            ref.set(currentValue.loadedColumnsValueBuffer[j].getData(),
                col.rowReadIndex, col.prvLength);
          } else {
            ref.set(currentValue.lazyDecompressCallbackObjs[j],
                col.rowReadIndex, col.prvLength);
          }
          col.rowReadIndex += col.prvLength;
        }
      } else {
        // This version of the loop eliminates a condition check and branch
        // and is measurably faster (20% or so)
        for (int j = 0; j < selectedColumns.length; ++j) {
          SelectedColumn col = selectedColumns[j];
          int i = col.colIndex;

          BytesRefWritable ref = ret.unCheckedGet(i);

          colAdvanceRow(j, col);
          ref.set(currentValue.loadedColumnsValueBuffer[j].getData(),
                col.rowReadIndex, col.prvLength);
          col.rowReadIndex += col.prvLength;
        }
      }
      rowFetched = true;
View Full Code Here

                    file.getAbsolutePath()));

            BytesRefArrayWritable bytes = new BytesRefArrayWritable(columnCount);

            for (int c = 0; c < columnCount; c++) {
                bytes.set(c, new BytesRefWritable());
            }

            try {

                for (int r = 0; r < recordCounts; r++) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.serde2.columnar.BytesRefWritable

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.