Package org.apache.hadoop.conf

Examples of org.apache.hadoop.conf.Configuration


    }
    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, numSlaves);
    }

    Configuration c = new Configuration(this.conf);
    this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves);
    // Don't leave here till we've done a successful scan of the .META.
    HTable t = new HTable(c, HConstants.META_TABLE_NAME);
    ResultScanner s = t.getScanner(new Scan());
    while (s.next() != null) {
View Full Code Here


   * @throws IOException
   */
  public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
    this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
    // Don't leave here till we've done a successful scan of the .META.
    HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
    ResultScanner s = t.getScanner(new Scan());
    while (s.next() != null) {
      // do nothing
    }
    LOG.info("HBase has been restarted");
View Full Code Here

   * @throws IOException
   */
  public HTable createTable(byte[] tableName, byte[][] families)
  throws IOException {
    return createTable(tableName, families,
        new Configuration(getConfiguration()));
  }
View Full Code Here

      HColumnDescriptor hcd = new HColumnDescriptor(family)
          .setMaxVersions(numVersions);
      desc.addFamily(hcd);
    }
    getHBaseAdmin().createTable(desc);
    return new HTable(new Configuration(getConfiguration()), tableName);
  }
View Full Code Here

          .setMaxVersions(numVersions)
          .setBlocksize(blockSize);
      desc.addFamily(hcd);
    }
    getHBaseAdmin().createTable(desc);
    return new HTable(new Configuration(getConfiguration()), tableName);
  }
View Full Code Here

          .setMaxVersions(numVersions[i]);
      desc.addFamily(hcd);
      i++;
    }
    getHBaseAdmin().createTable(desc);
    return new HTable(new Configuration(getConfiguration()), tableName);
  }
View Full Code Here

   *
   * @throws IOException When reading the rows fails.
   */
  public List<byte[]> getMetaTableRows() throws IOException {
    // TODO: Redo using MetaReader class
    HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
    List<byte[]> rows = new ArrayList<byte[]>();
    ResultScanner s = t.getScanner(new Scan());
    for (Result result : s) {
      LOG.info("getMetaTableRows: row -> " +
        Bytes.toStringBinary(result.getRow()));
View Full Code Here

   *
   * @throws IOException When reading the rows fails.
   */
  public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
    // TODO: Redo using MetaReader.
    HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
    List<byte[]> rows = new ArrayList<byte[]>();
    ResultScanner s = t.getScanner(new Scan());
    for (Result result : s) {
      byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
      if (val == null) {
View Full Code Here

   * @throws IOException When starting the cluster fails.
   */
  public void startMiniMapReduceCluster(final int servers) throws IOException {
    LOG.info("Starting mini mapreduce cluster...");
    // These are needed for the new and improved Map/Reduce framework
    Configuration c = getConfiguration();
    String logDir = c.get("hadoop.log.dir");
    String tmpDir = c.get("hadoop.tmp.dir");
    if (logDir == null) {
      logDir = tmpDir;
    }
    System.setProperty("hadoop.log.dir", logDir);
    c.set("mapred.output.dir", tmpDir);
    mrCluster = new MiniMRCluster(servers,
      FileSystem.get(conf).getUri().toString(), 1);
    LOG.info("Mini mapreduce cluster started");
    JobConf mrClusterJobConf = mrCluster.createJobConf();
    c.set("mapred.job.tracker", mrClusterJobConf.get("mapred.job.tracker"));
    /* this for mrv2 support */
    conf.set("mapreduce.framework.name", "yarn");
    String rmAdress = mrClusterJobConf.get("yarn.resourcemanager.address");
    if (rmAdress != null) {
      conf.set("yarn.resourcemanager.address", rmAdress);
View Full Code Here

    * @param checkStatus - true to check if the we can create a HTable with the
    *                    current configuration.
    */
  public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
    throws Exception {
    Configuration c = new Configuration(this.conf);
    String quorumServers = ZKConfig.getZKQuorumServersString(c);
    int sessionTimeout = 500;
    ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
    byte[] password = zk.getSessionPasswd();
    long sessionID = zk.getSessionId();

    // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
    //  so we create a first watcher to be sure that the
    //  event was sent. We expect that if our watcher receives the event
    //  other watchers on the same machine will get is as well.
    // When we ask to close the connection, ZK does not close it before
    //  we receive all the events, so don't have to capture the event, just
    //  closing the connection should be enough.
    ZooKeeper monitor = new ZooKeeper(quorumServers,
      1000, new org.apache.zookeeper.Watcher(){
      @Override
      public void process(WatchedEvent watchedEvent) {
        LOG.info("Monitor ZKW received event="+watchedEvent);
      }
    } , sessionID, password);

    // Making it expire
    ZooKeeper newZK = new ZooKeeper(quorumServers,
        sessionTimeout, EmptyWatcher.instance, sessionID, password);
    newZK.close();
    LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));

     // Now closing & waiting to be sure that the clients get it.
     monitor.close();

    if (checkStatus) {
      new HTable(new Configuration(conf), HConstants.META_TABLE_NAME).close();
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.conf.Configuration

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.