Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HColumnDescriptor


      int [] numVersions)
  throws IOException {
    HTableDescriptor desc = new HTableDescriptor(tableName);
    int i = 0;
    for(byte [] family : families) {
      HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions[i],
          HColumnDescriptor.DEFAULT_COMPRESSION,
          HColumnDescriptor.DEFAULT_IN_MEMORY,
          HColumnDescriptor.DEFAULT_BLOCKCACHE,
          Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL, false);
      desc.addFamily(hcd);
View Full Code Here


    init(methodName, HBaseConfiguration.create());
  }

  private void init(String methodName, Configuration conf)
  throws IOException {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    // some of the tests write 4 versions and then flush
    // (with HBASE-4241, lower versions are collected on flush)
    hcd.setMaxVersions(4);
    init(methodName, conf, hcd);
  }
View Full Code Here

   */
  public void testCreateWriter() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    FileSystem fs = FileSystem.get(conf);

    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setCompressionType(Compression.Algorithm.GZ);
    hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
    init(getName(), conf, hcd);

    // Test createWriterInTmp()
    StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
    Path path = writer.getPath();
    writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
    writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
    writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
    writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
    writer.close();

    // Verify that compression and encoding settings are respected
    HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
    assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
    assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
    reader.close();
  }
View Full Code Here

    EnvironmentEdgeManagerTestHelper.injectEdge(edge);
   
    Configuration conf = HBaseConfiguration.create();
    // Enable the expired store file deletion
    conf.setBoolean("hbase.store.delete.expired.storefile", true);
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setTimeToLive(ttl);
    init(getName(), conf, hcd);

    long sleepTime = this.store.getScanInfo().getTtl() / storeFileNum;
    long timeStamp;
    // There are 4 store files and the max time stamp difference among these
View Full Code Here

    assertFalse(store.throttleCompaction(anyValue));

    // HTD overrides XML.
    --anyValue;
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    htd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
    init(getName() + "-htd", conf, htd, hcd);
    assertTrue(store.throttleCompaction(anyValue + 1));
    assertFalse(store.throttleCompaction(anyValue));

    // HCD overrides them both.
    --anyValue;
    hcd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
    init(getName() + "-hcd", conf, htd, hcd);
    assertTrue(store.throttleCompaction(anyValue + 1));
    assertFalse(store.throttleCompaction(anyValue));
  }
View Full Code Here

      admin.disableTable(compactTable);
      admin.deleteTable(compactTable);
    }

    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(compactTable));
    htd.addFamily(new HColumnDescriptor(A));
    htd.addCoprocessor(EvenOnlyCompactor.class.getName());
    admin.createTable(htd);

    HTable table = new HTable(util.getConfiguration(), compactTable);
    for (long i=1; i<=10; i++) {
View Full Code Here

    byte[] ... families)
  throws IOException {
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
    int i=0;
    for(byte [] family : families) {
      HColumnDescriptor hcd = new HColumnDescriptor(family);
      hcd.setMaxVersions(maxVersions != null ? maxVersions[i++] : 1);
      htd.addFamily(hcd);
    }
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
    region = TEST_UTIL.createLocalHRegion(info, htd);
  }
View Full Code Here

    byte[] row1 = Bytes.toBytes("row1");
    byte[] fam1 = Bytes.toBytes("fam1");
    byte[] qf1 = Bytes.toBytes("col");
    byte[] val1 = Bytes.toBytes("value1");
    // Create Table
    HColumnDescriptor hcd = new HColumnDescriptor(fam1).setMaxVersions(Integer.MAX_VALUE)
        .setBloomFilterType(BloomType.ROWCOL);

    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
    htd.addFamily(hcd);
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
View Full Code Here

  public void testAllColumnsWithBloomFilter() throws IOException {
    byte[] TABLE = Bytes.toBytes("testAllColumnsWithBloomFilter");
    byte[] FAMILY = Bytes.toBytes("family");

    // Create table
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY).setMaxVersions(Integer.MAX_VALUE)
        .setBloomFilterType(BloomType.ROWCOL);
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
    htd.addFamily(hcd);
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
    this.region = TEST_UTIL.createLocalHRegion(info, htd);
View Full Code Here

  @Test
  public void testDeleteRowWithBloomFilter() throws IOException {
    byte[] familyName = Bytes.toBytes("familyName");

    // Create Table
    HColumnDescriptor hcd = new HColumnDescriptor(familyName).setMaxVersions(Integer.MAX_VALUE)
        .setBloomFilterType(BloomType.ROWCOL);

    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
    htd.addFamily(hcd);
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HColumnDescriptor

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.