Examples of FSTableDescriptors


Examples of org.apache.hadoop.hbase.util.FSTableDescriptors

    if(FSTableDescriptors.getTableInfoPath(fs, newTablePath) == null) {
      LOG.info("Creating new tableDesc for ACL");
      HTableDescriptor newDesc = new HTableDescriptor(oldDesc);
      newDesc.setName(newTableName);
      new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
        newTablePath, newDesc, true);
    }


    ServerName fakeServer = new ServerName("nsupgrade",96,123);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.FSTableDescriptors

      // to defaults).
      FSUtils.setFsDefault(this.conf, FSUtils.getRootDir(this.conf));
      // Get fs instance used by this RS
      this.fs = new HFileSystem(this.conf, this.useHBaseChecksum);
      this.rootDir = FSUtils.getRootDir(this.conf);
      this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true);
      this.hlog = setupWALAndReplication();
      // Init in here rather than in constructor after thread name has been set
      this.metricsRegionServer = new MetricsRegionServer(new MetricsRegionServerWrapperImpl(this));

      spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration());
View Full Code Here

Examples of org.apache.hadoop.hbase.util.FSTableDescriptors

    this.masterActiveTime = System.currentTimeMillis();
    // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
    this.fileSystemManager = new MasterFileSystem(this, this, masterRecovery);

    this.tableDescriptors =
      new FSTableDescriptors(this.fileSystemManager.getFileSystem(),
      this.fileSystemManager.getRootDir());

    // publish cluster ID
    status.setStatus("Publishing Cluster ID in ZooKeeper");
    ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
View Full Code Here

Examples of org.apache.hadoop.hbase.util.FSTableDescriptors

    this.masterActiveTime = System.currentTimeMillis();
    // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
    this.fileSystemManager = new MasterFileSystem(this, this, metrics, masterRecovery);

    this.tableDescriptors =
      new FSTableDescriptors(this.fileSystemManager.getFileSystem(),
      this.fileSystemManager.getRootDir());

    // publish cluster ID
    status.setStatus("Publishing Cluster ID in ZooKeeper");
    ClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
View Full Code Here

Examples of org.apache.hadoop.hbase.util.FSTableDescriptors

      // to defaults).
      this.conf.set("fs.defaultFS", this.conf.get("hbase.rootdir"));
      // Get fs instance used by this RS
      this.fs = new HFileSystem(this.conf, this.useHBaseChecksum);
      this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
      this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true);
      this.hlog = setupWALAndReplication();
      // Init in here rather than in constructor after thread name has been set
      this.metrics = new RegionServerMetrics();
      this.dynamicMetrics = RegionServerDynamicMetrics.newInstance(this);
      this.rsHost = new RegionServerCoprocessorHost(this, this.conf);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.FSTableDescriptors

      // to defaults).
      this.conf.set("fs.defaultFS", this.conf.get("hbase.rootdir"));
      // Get fs instance used by this RS
      this.fs = new HFileSystem(this.conf, this.useHBaseChecksum);
      this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
      this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true);
      this.hlog = setupWALAndReplication();
      // Init in here rather than in constructor after thread name has been set
      this.metrics = new RegionServerMetrics();
      this.dynamicMetrics = RegionServerDynamicMetrics.newInstance(this);
      this.rsHost = new RegionServerCoprocessorHost(this, this.conf);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.FSTableDescriptors

      // Get fs instance used by this RS.  Do we use checksum verification in the hbase? If hbase
      // checksum verification enabled, then automatically switch off hdfs checksum verification.
      boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
      this.fs = new HFileSystem(this.conf, useHBaseChecksum);
      this.rootDir = FSUtils.getRootDir(this.conf);
      this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true);
      this.hlog = setupWALAndReplication();
      // Init in here rather than in constructor after thread name has been set
      this.metricsRegionServer = new MetricsRegionServer(new MetricsRegionServerWrapperImpl(this));

      spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration());
View Full Code Here

Examples of org.apache.hadoop.hbase.util.FSTableDescriptors

    HTableDescriptor htdEnabled = new HTableDescriptor(TableName.valueOf(enabledTable));
    htdEnabled.addFamily(new HColumnDescriptor(FAMILY));

    FileSystem filesystem = FileSystem.get(conf);
    Path rootdir = FSUtils.getRootDir(conf);
    FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir);
    // Write the .tableinfo
    fstd.createTableDescriptor(htdEnabled);

    HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getTableName(), null, null);
    createRegion(hriEnabled, rootdir, conf, htdEnabled);

    List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(
        TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS);

    TableName disabledTable = TableName.valueOf("disabledTable");
    HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
    htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
    // Write the .tableinfo
    fstd.createTableDescriptor(htdDisabled);
    HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getTableName(), null, null);
    createRegion(hriDisabled, rootdir, conf, htdDisabled);
    List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
        TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.FSTableDescriptors

    byte [] enabledTable = Bytes.toBytes("enabledTable");
    HTableDescriptor htdEnabled = new HTableDescriptor(TableName.valueOf(enabledTable));
    htdEnabled.addFamily(new HColumnDescriptor(FAMILY));
    FileSystem filesystem = FileSystem.get(conf);
    Path rootdir = FSUtils.getRootDir(conf);
    FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir);
    // Write the .tableinfo
    fstd.createTableDescriptor(htdEnabled);
    HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getTableName(),
        null, null);
    createRegion(hriEnabled, rootdir, conf, htdEnabled);

    List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(
        TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS);

    TableName disabledTable =
        TableName.valueOf("disabledTable");
    HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
    htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
    // Write the .tableinfo
    fstd.createTableDescriptor(htdDisabled);
    HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getTableName(), null, null);
    createRegion(hriDisabled, rootdir, conf, htdDisabled);

    List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
        TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.FSTableDescriptors

    if(FSTableDescriptors.getTableInfoPath(fs, newTablePath) == null) {
      LOG.info("Creating new tableDesc for ACL");
      HTableDescriptor newDesc = new HTableDescriptor(oldDesc);
      newDesc.setName(newTableName);
      new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
        newTablePath, newDesc, true);
    }


    ServerName fakeServer = ServerName.valueOf("nsupgrade", 96, 123);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.