Package org.apache.hadoop.hbase.util

Examples of org.apache.hadoop.hbase.util.FSTableDescriptors$FileStatusFileNameComparator


      TableName.valueOf(Bytes.toBytes("offlineTable")));
    offlineTable.addFamily(new HColumnDescriptor(Bytes.toBytes("family")));

    FileSystem filesystem = FileSystem.get(conf);
    Path rootdir = FSUtils.getRootDir(conf);
    FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir);
    fstd.createTableDescriptor(offlineTable);

    HRegionInfo hriOffline = new HRegionInfo(offlineTable.getTableName(), null, null);
    createRegion(hriOffline, rootdir, conf, offlineTable);
    MetaEditor.addRegionToMeta(master.getCatalogTracker(), hriOffline);
View Full Code Here


  public void consolidate() throws IOException {
    if (getSnapshotFormat(desc) == SnapshotManifestV1.DESCRIPTOR_VERSION) {
      Path rootDir = FSUtils.getRootDir(conf);
      LOG.info("Using old Snapshot Format");
      // write a copy of descriptor to the snapshot directory
      new FSTableDescriptors(fs, rootDir)
        .createTableDescriptorForTableDirectory(workingDir, htd, false);
    } else {
      LOG.debug("Convert to Single Snapshot Manifest");
      convertToV2SingleManifest();
    }
View Full Code Here

      // Get fs instance used by this RS.  Do we use checksum verification in the hbase? If hbase
      // checksum verification enabled, then automatically switch off hdfs checksum verification.
      boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
      this.fs = new HFileSystem(this.conf, useHBaseChecksum);
      this.rootDir = FSUtils.getRootDir(this.conf);
      this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true);
      this.hlog = setupWALAndReplication();
      // Init in here rather than in constructor after thread name has been set
      this.metricsRegionServer = new MetricsRegionServer(new MetricsRegionServerWrapperImpl(this));

      spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration());
View Full Code Here

    this.masterActiveTime = System.currentTimeMillis();
    // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
    this.fileSystemManager = new MasterFileSystem(this, this, masterRecovery);

    this.tableDescriptors =
      new FSTableDescriptors(this.fileSystemManager.getFileSystem(),
      this.fileSystemManager.getRootDir());

    // publish cluster ID
    status.setStatus("Publishing Cluster ID in ZooKeeper");
    ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
View Full Code Here

      org.apache.hadoop.hbase.util.FSTableDescriptorMigrationToSubdir
        .migrateFSTableDescriptorsIfNecessary(fs, rd);
    }

    // Create tableinfo-s for hbase:meta if not already there.
    new FSTableDescriptors(fs, rd).createTableDescriptor(HTableDescriptor.META_TABLEDESC);

    return rd;
  }
View Full Code Here

    Path tempdir = fileSystemManager.getTempDir();
    FileSystem fs = fileSystemManager.getFileSystem();

    // 1. Create Table Descriptor
    Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
    new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
      tempTableDir, this.hTableDescriptor, false);
    Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName);

    // 2. Create Regions
    List<HRegionInfo> regionInfos = handleCreateHdfsRegions(tempdir, tableName);
View Full Code Here

    }

    private RegionData[] createTable(final HTableDescriptor htd, final int nregions)
        throws IOException {
      Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName());
      new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, false);

      assertTrue(nregions % 2 == 0);
      RegionData[] regions = new RegionData[nregions];
      for (int i = 0; i < regions.length; i += 2) {
        byte[] startKey = Bytes.toBytes(0 + i * 2);
 
View Full Code Here

        this.rootDir = rootDir;
        this.htd = htd;
        this.desc = desc;
        this.tableRegions = tableRegions;
        this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
        new FSTableDescriptors(conf)
          .createTableDescriptorForTableDirectory(snapshotDir, htd, false);
      }
View Full Code Here

    r1fs.commitStoreFile(TEST_FAMILY, storeFile);

    Path tableDir = FSUtils.getTableDir(archiveDir, tableWithRefsName);
    HTableDescriptor htd = new HTableDescriptor(tableWithRefsName);
    htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
    new FSTableDescriptors(fs, rootDir)
        .createTableDescriptorForTableDirectory(tableDir, htd, false);

    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
    FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf);
    SnapshotDescriptionUtils.writeSnapshotInfo(sd, snapshotDir, fs);
View Full Code Here

  public void testRestore(final Path snapshotDir, final SnapshotDescription sd,
      final HTableDescriptor htdClone) throws IOException {
    LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
    FSUtils.logFileSystemState(fs, rootDir, LOG);

    new FSTableDescriptors(conf).createTableDescriptor(htdClone);
    RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sd, htdClone);
    helper.restoreHdfsRegions();

    LOG.debug("post-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
    FSUtils.logFileSystemState(fs, rootDir, LOG);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.util.FSTableDescriptors$FileStatusFileNameComparator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.