Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.IntegrationTestingUtility


  }

  private IntegrationTestingUtility getTestingUtil() {
    if (this.util == null) {
      if (getConf() == null) {
        this.util = new IntegrationTestingUtility();
      } else {
        this.util = new IntegrationTestingUtility(getConf());
      }
    }
    return util;
  }
View Full Code Here


  @BeforeClass
  public static void setUp() throws Exception {
    // Set up the integration test util
    if (util == null) {
      util = new IntegrationTestingUtility();
    }

    // Make sure there are three servers.
    util.initializeCluster(3);
View Full Code Here

  }

  public static void main(String[] args) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    IntegrationTestingUtility.setUseDistributedCluster(conf);
    IntegrationTestingUtility util = new IntegrationTestingUtility(conf);
    util.initializeCluster(1);

    ChaosMonkey monkey = new ChaosMonkey(util);
    int ret = ToolRunner.run(conf, monkey, args);
    System.exit(ret);
  }
View Full Code Here

  }

  @BeforeClass
  public static void provisionCluster() throws Exception {
    if (null == util) {
      util = new IntegrationTestingUtility();
    }
    util.initializeCluster(1);
    if (!util.isDistributedCluster()) {
      // also need MR when running without a real cluster
      util.startMiniMapReduceCluster();
View Full Code Here

  }

  public static void main(String[] args) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    IntegrationTestingUtility.setUseDistributedCluster(conf);
    util = new IntegrationTestingUtility(conf);
    // not using ToolRunner to avoid unnecessary call to setConf()
    args = new GenericOptionsParser(conf, args).getRemainingArgs();
    int status = new IntegrationTestImportTsv().run(args);
    System.exit(status);
  }
View Full Code Here

      conf.set("mapreduce.job.cache.archives.visibilities",
        context.getConfiguration().get("mapreduce.job.cache.archives.visibilities", ""));

      // can't use IntegrationTest instance of util because it hasn't been
      // instantiated on the JVM running this method. Create our own.
      IntegrationTestingUtility util =
          new IntegrationTestingUtility(conf);

      // this is why we're here: launch a child job. The rest of this should
      // look a lot like TestImportTsv#testMROnTable.
      final String table = format("%s-%s-child", NAME, context.getJobID());
      final String cf = "FAM";

      String[] args = {
          "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B",
          "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
          table
      };

      try {
        util.createTable(table, cf);
        LOG.info("testRunFromOutputCommitter: launching child job.");
        TestImportTsv.doMROnTableTest(util, cf, null, args, 1);
      } catch (Exception e) {
        throw new IOException("Underlying MapReduce job failed. Aborting commit.", e);
      } finally {
        util.deleteTable(table);
      }
    }
View Full Code Here

  }

  private IntegrationTestingUtility getTestingUtil() {
    if (this.util == null) {
      if (getConf() == null) {
        this.util = new IntegrationTestingUtility();
      } else {
        this.util = new IntegrationTestingUtility(getConf());
      }
    }
    return util;
  }
View Full Code Here

  }

  private IntegrationTestingUtility getTestingUtil() {
    if (this.util == null) {
      if (getConf() == null) {
        this.util = new IntegrationTestingUtility();
      } else {
        this.util = new IntegrationTestingUtility(getConf());
      }
    }
    return util;
  }
View Full Code Here

  }

  public static void main(String[] args) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    IntegrationTestingUtility.setUseDistributedCluster(conf);
    IntegrationTestingUtility util = new IntegrationTestingUtility(conf);
    util.initializeCluster(1);

    ChaosMonkey monkey = new ChaosMonkey(util, EVERY_MINUTE_RANDOM_ACTION_POLICY);
    int ret = ToolRunner.run(conf, monkey, args);
    System.exit(ret);
  }
View Full Code Here

     * @return url to be used by clients to connect to the mini cluster.
     */
    private static String initClusterDistributedMode(Configuration conf) {
        setTestConfigForDistribuedCluster(conf);
        try {
            IntegrationTestingUtility util =  new IntegrationTestingUtility(conf);
            util.initializeCluster(NUM_SLAVES_BASE);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
        return JDBC_PROTOCOL + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.IntegrationTestingUtility

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.