Package org.apache.hadoop.hive.ql.io

Examples of org.apache.hadoop.hive.ql.io.RecordIdentifier$StructInfo


          new ValidTxnListImpl(jobConf.get(ValidTxnList.VALID_TXNS_KEY));

      AcidInputFormat.RawReader<V> reader =
          aif.getRawReader(jobConf, jobConf.getBoolean(IS_MAJOR, false), split.getBucket(),
              txnList, split.getBaseDir(), split.getDeltaDirs());
      RecordIdentifier identifier = reader.createKey();
      V value = reader.createValue();
      getWriter(reporter, reader.getObjectInspector(), split.getBucket());
      while (reader.next(identifier, value)) {
        writer.write(value);
        reporter.progress();
View Full Code Here


        fs.mkdirs(dir);
        partFile = AcidUtils.createBucketFile(dir, bucket);
      }
      FSDataOutputStream out = fs.create(partFile);
      for (int i = 0; i < numRecords; i++) {
        RecordIdentifier ri = new RecordIdentifier(maxTxn - 1, bucket, i);
        ri.write(out);
        out.writeBytes("mary had a little lamb its fleece was white as snow\n");
      }
      out.close();
    }
  }
View Full Code Here

      return true;
    }

    @Override
    public RecordIdentifier createKey() {
      return new RecordIdentifier();
    }
View Full Code Here

    right.setValues(1, 2, 3, 4);
    assertTrue(left.compareTo(right) < 0);
    assertTrue(right.compareTo(left) > 0);

    // ensure that we are consistent when comparing to the base class
    RecordIdentifier ri = new RecordIdentifier(1, 2, 3);
    assertEquals(1, ri.compareTo(left));
    assertEquals(-1, left.compareTo(ri));
    assertEquals(false, ri.equals(left));
    assertEquals(false, left.equals(ri));
  }
View Full Code Here

  @Test
  public void testReaderPair() throws Exception {
    ReaderKey key = new ReaderKey();
    Reader reader = createMockReader();
    RecordIdentifier minKey = new RecordIdentifier(10, 20, 30);
    RecordIdentifier maxKey = new RecordIdentifier(40, 50, 60);
    ReaderPair pair = new ReaderPair(key, reader, 20, minKey, maxKey,
        new Reader.Options());
    RecordReader recordReader = pair.recordReader;
    assertEquals(10, key.getTransactionId());
    assertEquals(20, key.getBucketId());
View Full Code Here

  @Test
  public void testOriginalReaderPair() throws Exception {
    ReaderKey key = new ReaderKey();
    Reader reader = createMockOriginalReader();
    RecordIdentifier minKey = new RecordIdentifier(0, 10, 1);
    RecordIdentifier maxKey = new RecordIdentifier(0, 10, 3);
    boolean[] includes = new boolean[]{true, true};
    ReaderPair pair = new OriginalReaderPair(key, reader, 10, minKey, maxKey,
        new Reader.Options().include(includes));
    RecordReader recordReader = pair.recordReader;
    assertEquals(0, key.getTransactionId());
View Full Code Here

        false, 10, createMaximalTxnList(),
        new Reader.Options().range(1000, 1000), null);
    RecordReader rr = merger.getCurrentReader().recordReader;
    assertEquals(0, merger.getOtherReaders().size());

    assertEquals(new RecordIdentifier(10, 20, 30), merger.getMinKey());
    assertEquals(new RecordIdentifier(40, 50, 60), merger.getMaxKey());
    RecordIdentifier id = merger.createKey();
    OrcStruct event = merger.createValue();

    assertEquals(true, merger.next(id, event));
    assertEquals(10, id.getTransactionId());
    assertEquals(20, id.getBucketId());
    assertEquals(40, id.getRowId());
    assertEquals("third", getValue(event));

    assertEquals(true, merger.next(id, event));
    assertEquals(40, id.getTransactionId());
    assertEquals(50, id.getBucketId());
    assertEquals(60, id.getRowId());
    assertEquals("fourth", getValue(event));

    assertEquals(false, merger.next(id, event));
    assertEquals(1.0, merger.getProgress(), 0.01);
    merger.close();
View Full Code Here

        OrcFile.readerOptions(conf));
    OrcRawRecordMerger merger =
        new OrcRawRecordMerger(conf, true, baseReader, false, BUCKET,
            createMaximalTxnList(), new Reader.Options(),
            AcidUtils.getPaths(directory.getCurrentDirectories()));
    RecordIdentifier key = merger.createKey();
    OrcStruct value = merger.createValue();
    assertEquals(false, merger.next(key, value));
  }
View Full Code Here

        new OrcRawRecordMerger(conf, true, baseReader, false, BUCKET,
            createMaximalTxnList(), new Reader.Options(),
            AcidUtils.getPaths(directory.getCurrentDirectories()));
    assertEquals(null, merger.getMinKey());
    assertEquals(null, merger.getMaxKey());
    RecordIdentifier id = merger.createKey();
    OrcStruct event = merger.createValue();

    assertEquals(true, merger.next(id, event));
    assertEquals(OrcRecordUpdater.UPDATE_OPERATION,
        OrcRecordUpdater.getOperation(event));
View Full Code Here

    }
    RecordIdentifier[] result = new RecordIdentifier[stripes.length];
    for(int i=0; i < stripes.length; ++i) {
      if (stripes[i].length() != 0) {
        String[] parts = stripes[i].split(",");
        result[i] = new RecordIdentifier();
        result[i].setValues(Long.parseLong(parts[0]),
            Integer.parseInt(parts[1]), Long.parseLong(parts[2]));
      }
    }
    return result;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.io.RecordIdentifier$StructInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.