Package org.apache.hadoop.hive.serde2.columnar

Examples of org.apache.hadoop.hive.serde2.columnar.BytesRefWritable


    cloneConf.setInt(RCFile.RECORD_INTERVAL_CONF_STR, intervalRecordCount);
    RCFile.Writer writer = new RCFile.Writer(fs, cloneConf, testFile, null, codec);   

    BytesRefArrayWritable bytes = new BytesRefArrayWritable(bytesArray.length);
    for (int i = 0; i < bytesArray.length; i++) {
      BytesRefWritable cu = null;
      cu = new BytesRefWritable(bytesArray[i], 0, bytesArray[i].length);
      bytes.set(i, cu);
    }
    for (int i = 0; i < writeCount; i++) {
      writer.append(bytes);
    }
View Full Code Here


    RCFile.Writer writer = new RCFile.Writer(fs, cloneConf, testFile, null, codec);

    BytesRefArrayWritable bytes = new BytesRefArrayWritable(bytesArray.length);
    for (int i = 0; i < bytesArray.length; i++) {
      BytesRefWritable cu = null;
      cu = new BytesRefWritable(bytesArray[i], 0, bytesArray[i].length);
      bytes.set(i, cu);
    }
    for (int i = 0; i < writeCount; i++) {
      if (i == intervalRecordCount) {
        System.out.println("write position:" + writer.getLength());
View Full Code Here

    resetRandomGenerators();

    BytesRefArrayWritable bytes = new BytesRefArrayWritable(columnNum);
    columnRandom = new byte[columnNum][];
    for (int i = 0; i < columnNum; i++) {
      BytesRefWritable cu = new BytesRefWritable();
      bytes.set(i, cu);
    }

    // zero length key is not allowed by block compress writer, so we use a byte
    // writable
View Full Code Here

    byte[][] columnRandom;

    BytesRefArrayWritable bytes = new BytesRefArrayWritable(columnNum);
    columnRandom = new byte[columnNum][];
    for (int i = 0; i < columnNum; i++) {
      BytesRefWritable cu = new BytesRefWritable();
      bytes.set(i, cu);
    }

    for (int i = 0; i < rowCount; i++) {
      nextRandomRow(columnRandom, bytes);
View Full Code Here

        byte[][] columnRandom;

        BytesRefArrayWritable bytes = new BytesRefArrayWritable(columnNum);
        columnRandom = new byte[columnNum][];
        for (int i = 0; i < columnNum; i++) {
            BytesRefWritable cu = new BytesRefWritable();
            bytes.set(i, cu);
        }

        for (int i = 0; i < rowCount; i++) {
            nextRandomRow(columnRandom, bytes, columnCount);
View Full Code Here

  }

  private BytesRefArrayWritable writeBytesToFile(byte[][] record, RCFile.Writer writer) throws IOException {
    BytesRefArrayWritable bytes = new BytesRefArrayWritable(record.length);
    for (int i = 0; i < record.length; i++) {
      BytesRefWritable cu = new BytesRefWritable(record[i], 0, record[i].length);
      bytes.set(i, cu);
    }
    writer.append(bytes);
    return bytes;
  }
View Full Code Here

      // Data
      BytesRefArrayWritable braw = new BytesRefArrayWritable(8);
      String[] data = {"123", "456", "789", "1000", "5.3", "hive and hadoop", "1.", "NULL"};
      for (int i = 0; i < 8; i++) {
        braw.set(i, new BytesRefWritable(data[i].getBytes()));
      }
      // Test
      deserializeAndSerializeColumnar(serDe, braw, data);
      System.out.println("test: testColumnarSerde - OK");
View Full Code Here

  public HiveIndexResult(String indexFile, JobConf conf) throws IOException,
      HiveException {
    job = conf;

    bytesRef[0] = new BytesRefWritable();
    bytesRef[1] = new BytesRefWritable();
    ignoreHdfsLoc = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_INDEX_IGNORE_HDFS_LOC);

    if (indexFile != null) {
      Path indexFilePath = new Path(indexFile);
      FileSystem fs = FileSystem.get(conf);
View Full Code Here

      fetchColumnTempBuf.reset(currentKey.allCellValLenBuffer[columnID]
          .getData(), currentKey.allCellValLenBuffer[columnID].getLength());
      for (int i = 0; i < recordsNumInValBuffer; i++) {
        int length = getColumnNextValueLength(columnID);

        BytesRefWritable currentCell = rest.get(i);
        if (currentValue.decompressedFlag[columnID]) {
          currentCell.set(currentValue.loadedColumnsValueBuffer[columnID]
              .getData(), columnNextRowStart, length);
        } else {
          currentCell.set(currentValue.lazyDecompressCallbackObjs[columnID],
              columnNextRowStart, length);
        }
        columnNextRowStart = columnNextRowStart + length;
      }
      return rest;
View Full Code Here

      // DataOutputStream to BytesWritable

      for (int j = 0; j < prjColIDs.length; ++j) {
        int i = prjColIDs[j];

        BytesRefWritable ref = ret.unCheckedGet(i);

        int columnCurrentRowStart = columnRowReadIndex[i];
        int length = getColumnNextValueLength(i);
        columnRowReadIndex[i] = columnCurrentRowStart + length;

        if (currentValue.decompressedFlag[j]) {
          ref.set(currentValue.loadedColumnsValueBuffer[j].getData(),
              columnCurrentRowStart, length);
        } else {
          ref.set(currentValue.lazyDecompressCallbackObjs[j],
              columnCurrentRowStart, length);
        }
      }
      rowFetched = true;
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.serde2.columnar.BytesRefWritable

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.