Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HColumnDescriptor


    LOG.info("HFile at " + hfilePath + " no longer fits inside a single " +
    "region. Splitting...");

    String uniqueName = getUniqueName(table.getTableName());
    HColumnDescriptor familyDesc = table.getTableDescriptor().getFamily(item.family);
    Path botOut = new Path(tmpDir, uniqueName + ".bottom");
    Path topOut = new Path(tmpDir, uniqueName + ".top");
    splitStoreFile(getConf(), hfilePath, familyDesc, splitKey,
        botOut, topOut);
View Full Code Here


    if (familyDirStatuses == null) {
      throw new FileNotFoundException("No families found in " + hfofDir);
    }

    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor hcd = null;

    // Add column families
    // Build a set of keys
    byte[][] keys = null;
    TreeMap<byte[], Integer> map = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
   
    for (FileStatus stat : familyDirStatuses) {
      if (!stat.isDir()) {
        LOG.warn("Skipping non-directory " + stat.getPath());
        continue;
      }
      Path familyDir = stat.getPath();
      // Skip _logs, etc
      if (familyDir.getName().startsWith("_")) continue;
      byte[] family = familyDir.getName().getBytes();
    
      hcd = new HColumnDescriptor(family);
      htd.addFamily(hcd);
     
      Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
      for (Path hfile : hfiles) {
        if (hfile.getName().startsWith("_")) continue;
        HFile.Reader reader = HFile.createReader(fs, hfile,
            new CacheConfig(getConf()));
        final byte[] first, last;
        try {
          if (hcd.getCompressionType() != reader.getCompressionAlgorithm()) {
            hcd.setCompressionType(reader.getCompressionAlgorithm());
            LOG.info("Setting compression " + hcd.getCompressionType().name() +
                     " for family " + hcd.toString());
          }
          reader.loadFileInfo();
          first = reader.getFirstRowKey();
          last =  reader.getLastRowKey();
View Full Code Here

        if (admin.tableExists(tableName)) {
          throw new AlreadyExists("table name already in use");
        }
        HTableDescriptor desc = new HTableDescriptor(tableName);
        for (ColumnDescriptor col : columnFamilies) {
          HColumnDescriptor colDesc = ThriftUtilities.colDescFromThrift(col);
          desc.addFamily(colDesc);
        }
        admin.createTable(desc);
      } catch (IOException e) {
        LOG.warn(e.getMessage(), e);
View Full Code Here

      HTableDescriptor htd = new HTableDescriptor(tableName);
      for (Map.Entry<QName,Object> e: model.getAny().entrySet()) {
        htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
      }
      for (ColumnSchemaModel family: model.getColumns()) {
        HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
        for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
          hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
        }
        htd.addFamily(hcd);
      }
      if (admin.tableExists(tableName)) {
        admin.disableTable(tableName);
View Full Code Here

    try {
      HTableDescriptor htd = admin.getTableDescriptor(tableName);
      admin.disableTable(tableName);
      try {
        for (ColumnSchemaModel family: model.getColumns()) {
          HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
          for (Map.Entry<QName,Object> e: family.getAny().entrySet()) {
            hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
          }
          if (htd.hasFamily(hcd.getName())) {
            admin.modifyColumn(tableName, hcd.getName(), hcd);
          } else {
            admin.addColumn(model.getName(), hcd);           
          }
        }
      } catch (IOException e) {
View Full Code Here

  protected void setUp() throws Exception {
    super.setUp();
    HBaseAdmin admin = new HBaseAdmin(conf);
    if (!admin.tableExists(TABLE)) {
      HTableDescriptor htd = new HTableDescriptor(TABLE);
      htd.addFamily(new HColumnDescriptor(USER));
      admin.createTable(htd);
      HTable table = new HTable(conf, TABLE);
      Put put = new Put(Bytes.toBytes(ADMIN_TOKEN));
      put.add(USER, NAME, Bytes.toBytes(ADMIN_USERNAME));
      put.add(USER, ADMIN, Bytes.toBytes(true));
View Full Code Here

  protected void setUp() throws Exception {
    super.setUp();
    HBaseAdmin admin = new HBaseAdmin(conf);
    if (!admin.tableExists(TABLE)) {
      HTableDescriptor htd = new HTableDescriptor(TABLE);
      htd.addFamily(new HColumnDescriptor(USER));
      admin.createTable(htd);
      HTable table = new HTable(TABLE);
      Put put = new Put(Bytes.toBytes(USER_TOKEN));
      put.add(USER, NAME, Bytes.toBytes(USER_USERNAME));
      put.add(USER, TOKENS_RATE, Bytes.toBytes(RATE));
View Full Code Here

        HConstants.HREGION_LOGDIR_NAME);

    manager.addSource(slaveId);

    htd = new HTableDescriptor(test);
    HColumnDescriptor col = new HColumnDescriptor("f1");
    col.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    htd.addFamily(col);
    col = new HColumnDescriptor("f2");
    col.setScope(HConstants.REPLICATION_SCOPE_LOCAL);
    htd.addFamily(col);

    hri = new HRegionInfo(htd.getName(), r1, r2);

View Full Code Here

    HLog hlog = new HLog(fs, logDir, oldLogDir, conf, listeners,
      URLEncoder.encode("regionserver:60020", "UTF8"));

    manager.init();
    HTableDescriptor htd = new HTableDescriptor();
    htd.addFamily(new HColumnDescriptor(f1));
    // Testing normal log rolling every 20
    for(long i = 1; i < 101; i++) {
      if(i > 1 && i % 20 == 0) {
        hlog.rollWriter();
      }
View Full Code Here

 
  @Test
  public void testBulkDeleteFamily() throws Throwable {
    byte[] tableName = Bytes.toBytes("testBulkDeleteFamily");
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor(FAMILY1));
    htd.addFamily(new HColumnDescriptor(FAMILY2));
    TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5);
    HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
    List<Put> puts = new ArrayList<Put>(100);
    for (int j = 0; j < 100; j++) {
      Put put = new Put(Bytes.toBytes(j));
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HColumnDescriptor

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.