Package org.apache.hadoop.conf

Examples of org.apache.hadoop.conf.Configuration$Resource


   *
   * @throws IOException When reading the rows fails.
   */
  public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
    // TODO: Redo using MetaReader.
    HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
    List<byte[]> rows = new ArrayList<byte[]>();
    ResultScanner s = t.getScanner(new Scan());
    for (Result result : s) {
      byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
      if (val == null) {
View Full Code Here


   * @throws IOException When starting the cluster fails.
   */
  public void startMiniMapReduceCluster(final int servers) throws IOException {
    LOG.info("Starting mini mapreduce cluster...");
    // These are needed for the new and improved Map/Reduce framework
    Configuration c = getConfiguration();
    String logDir = c.get("hadoop.log.dir");
    String tmpDir = c.get("hadoop.tmp.dir");
    if (logDir == null) {
      logDir = tmpDir;
    }
    System.setProperty("hadoop.log.dir", logDir);
    c.set("mapred.output.dir", tmpDir);
    mrCluster = new MiniMRCluster(servers,
      FileSystem.get(conf).getUri().toString(), 1);
    LOG.info("Mini mapreduce cluster started");
    JobConf mrClusterJobConf = mrCluster.createJobConf();
    c.set("mapred.job.tracker", mrClusterJobConf.get("mapred.job.tracker"));
    /* this for mrv2 support */
    conf.set("mapreduce.framework.name", "yarn");
    String rmAdress = mrClusterJobConf.get("yarn.resourcemanager.address");
    if (rmAdress != null) {
      conf.set("yarn.resourcemanager.address", rmAdress);
View Full Code Here

    * @param checkStatus - true to check if the we can create a HTable with the
    *                    current configuration.
    */
  public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus)
    throws Exception {
    Configuration c = new Configuration(this.conf);
    String quorumServers = ZKConfig.getZKQuorumServersString(c);
    int sessionTimeout = 500;
    ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
    byte[] password = zk.getSessionPasswd();
    long sessionID = zk.getSessionId();

    // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
    //  so we create a first watcher to be sure that the
    //  event was sent. We expect that if our watcher receives the event
    //  other watchers on the same machine will get is as well.
    // When we ask to close the connection, ZK does not close it before
    //  we receive all the events, so don't have to capture the event, just
    //  closing the connection should be enough.
    ZooKeeper monitor = new ZooKeeper(quorumServers,
      1000, new org.apache.zookeeper.Watcher(){
      @Override
      public void process(WatchedEvent watchedEvent) {
        LOG.info("Monitor ZKW received event="+watchedEvent);
      }
    } , sessionID, password);

    // Making it expire
    ZooKeeper newZK = new ZooKeeper(quorumServers,
        sessionTimeout, EmptyWatcher.instance, sessionID, password);
    newZK.close();
    LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));

     // Now closing & waiting to be sure that the clients get it.
     monitor.close();

    if (checkStatus) {
      new HTable(new Configuration(conf), HConstants.META_TABLE_NAME).close();
    }
  }
View Full Code Here

   * @throws IOException
   */
  public synchronized HBaseAdmin getHBaseAdmin()
  throws IOException {
    if (hbaseAdmin == null){
      hbaseAdmin = new HBaseAdmin(new Configuration(getConfiguration()));
    }
    return hbaseAdmin;
  }
View Full Code Here

    if (this.ourClusterKey.equals(otherClusterKey)) {
      LOG.debug("Not connecting to " + peerId + " because it's us");
      return null;
    }
    // Construct the connection to the new peer
    Configuration otherConf = new Configuration(this.conf);
    try {
      ZKUtil.applyClusterKeyToConf(otherConf, otherClusterKey);
    } catch (IOException e) {
      LOG.error("Can't get peer because:", e);
      return null;
View Full Code Here

   */
  @Test
  public void testCleaningRace() throws Exception {
    final long TEST_TIME = 20 * 1000;

    Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
    Path rootDir = UTIL.getDataTestDir("testCleaningRace");
    FileSystem fs = UTIL.getTestFileSystem();

    Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
    Path regionDir = new Path("table", "abcdef");
View Full Code Here

     * hbase/ManyMiniCluster.java
     *
     */
    String hbaseDir = new File(workDir,"hbase").getAbsolutePath();
    String hbaseRoot = "file://" + hbaseDir;
    Configuration hbaseConf =  HBaseConfiguration.create();

    hbaseConf.set(HConstants.HBASE_DIR, hbaseRoot);
    hbaseConf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181);
    hbaseConf.set(HConstants.ZOOKEEPER_QUORUM, "0.0.0.0");
    hbaseConf.setInt("hbase.master.info.port", -1);
    hbaseConf.setInt("hbase.zookeeper.property.maxClientCnxns",500);
    String zookeeperDir = new File(workDir,"zk").getAbsolutePath();
    int zookeeperPort = 2181;
    zookeeperCluster = new MiniZooKeeperCluster();
    Method m;
    Class<?> zkParam[] = {Integer.TYPE};
    try{
      m = MiniZooKeeperCluster.class.getDeclaredMethod("setDefaultClientPort",
          zkParam);
    } catch (NoSuchMethodException e) {
      m = MiniZooKeeperCluster.class.getDeclaredMethod("setClientPort",
          zkParam);
    }

    m.invoke(zookeeperCluster, new Object[] {new Integer(zookeeperPort)});
    zookeeperCluster.startup(new File(zookeeperDir));
    hbaseCluster = new MiniHBaseCluster(hbaseConf, 1);
    HMaster master = hbaseCluster.getMaster();
    Object serverName = master.getServerName();
    String hostAndPort;
    if(serverName instanceof String) {
      System.out.println("Server name is string, using HServerAddress.");
      m = HMaster.class.getDeclaredMethod("getMasterAddress",
          new Class<?>[]{});
      Class<?> clazz = Class.forName("org.apache.hadoop.hbase.HServerAddress");
      /*
       * Call method to get server address
       */
      Object serverAddr = clazz.cast(m.invoke(master, new Object[]{}));
      //returns the address as hostname:port
      hostAndPort = serverAddr.toString();
    } else {
      System.out.println("ServerName is org.apache.hadoop.hbase.ServerName," +
          "using getHostAndPort()");
      Class<?> clazz = Class.forName("org.apache.hadoop.hbase.ServerName");
      m = clazz.getDeclaredMethod("getHostAndPort", new Class<?>[] {});
      hostAndPort = m.invoke(serverName, new Object[]{}).toString();
    }

    hbaseConf.set("hbase.master", hostAndPort);
    testUtility = new HBaseTestingUtility(hbaseConf);
    testUtility.setZkCluster(zookeeperCluster);
    hbaseCluster.startMaster();
    Map<String, String> ctxMap = new HashMap<String, String>();
    ctxMap.put("table", tableName);
View Full Code Here

public final class OozieUtils {

    private OozieUtils() {}

    public static Properties toProperties(String properties) {
        Configuration conf = new Configuration(false);
        conf.addResource(new ByteArrayInputStream(properties.getBytes()));
        Properties jobprops = new Properties();
        for (Map.Entry<String, String> entry : conf) {
            jobprops.put(entry.getKey(), entry.getValue());
        }
        return jobprops;
View Full Code Here

  private void doOpen() throws IOException {
    if ((filePath == null) || (writer == null) || (formatter == null)) {
      throw new IOException("Invalid file settings");
    }

    Configuration config = new Configuration();
    // disable FileSystem JVM shutdown hook
    config.setBoolean("fs.automatic.close", false);

    // Hadoop is not thread safe when doing certain RPC operations,
    // including getFileSystem(), when running under Kerberos.
    // open() must be called by one thread at a time in the JVM.
    // NOTE: tried synchronizing on the underlying Kerberos principal previously
View Full Code Here

    }
    return false;
  }

  private static CompressionCodec getCodec(String codecName) {
    Configuration conf = new Configuration();
    List<Class<? extends CompressionCodec>> codecs = CompressionCodecFactory
        .getCodecClasses(conf);
    // Wish we could base this on DefaultCodec but appears not all codec's
    // extend DefaultCodec(Lzo)
    CompressionCodec codec = null;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.conf.Configuration$Resource

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.