Package org.apache.hadoop.conf

Examples of org.apache.hadoop.conf.Configuration


  private static final byte [][] FAMILIES = new byte [][] {Bytes.toBytes("a"),
    Bytes.toBytes("b"), Bytes.toBytes("c")};
  private static int countOfRegions;

  @BeforeClass public static void beforeAllTests() throws Exception {
    Configuration c = TEST_UTIL.getConfiguration();
    c.setClass(HConstants.REGION_SERVER_IMPL, TestZKBasedOpenCloseRegionRegionServer.class,
              HRegionServer.class);
    c.setBoolean("dfs.support.append", true);
    c.setInt("hbase.regionserver.info.port", 0);
    TEST_UTIL.startMiniCluster(2);
    TEST_UTIL.createTable(Bytes.toBytes(TABLENAME), FAMILIES);
    HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
    countOfRegions = TEST_UTIL.createMultiRegions(t, getTestFamily());
    waitUntilAllRegionsAssigned();
View Full Code Here


      extends GoraMapper<String, WebPage, String, WebPage> {
    private FetchSchedule schedule;

    @Override
    public void setup(Context context) throws IOException {
      Configuration conf = context.getConfiguration();
      schedule = FetchScheduleFactory.getFetchSchedule(conf);
      // scoreInjected = conf.getFloat("db.score.injected", 1.0f);
    }
View Full Code Here

    context.write(entry, page);
  }

  @Override
  public void setup(Context context) {
    Configuration conf = context.getConfiguration();
    filters = new URLFilters(conf);
    curTime =
      conf.getLong(GeneratorJob.GENERATOR_CUR_TIME, System.currentTimeMillis());
    normalizers =
      new URLNormalizers(conf, URLNormalizers.SCOPE_GENERATE_HOST_COUNT);
    filter = conf.getBoolean(GeneratorJob.GENERATOR_FILTER, true);
    normalise = conf.getBoolean(GeneratorJob.GENERATOR_NORMALISE, true);
    schedule = FetchScheduleFactory.getFetchSchedule(conf);
    scoringFilters = new ScoringFilters(conf);
  }
View Full Code Here

  private List<ScoreDatum> inlinkedScoreData = new ArrayList<ScoreDatum>();
  private int maxLinks;

  @Override
  protected void setup(Context context) throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    retryMax = conf.getInt("db.fetch.retry.max", 3);
    additionsAllowed = conf.getBoolean(CRAWLDB_ADDITIONS_ALLOWED, true);
    maxInterval = conf.getInt("db.fetch.interval.max", 0 );
    schedule = FetchScheduleFactory.getFetchSchedule(conf);
    scoringFilters = new ScoringFilters(conf);
    maxLinks = conf.getInt("db.update.max.inlinks", 10000);
  }
View Full Code Here

   * even when the current file gets renamed.
   */
  @Test
  public void testHDFSLinkReadDuringRename() throws Exception {
    HBaseTestingUtility testUtil = new HBaseTestingUtility();
    Configuration conf = testUtil.getConfiguration();
    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    FileSystem fs = cluster.getFileSystem();
    assertEquals("hdfs", fs.getUri().getScheme());
View Full Code Here

  private static MiniHBaseCluster cluster = null;

  @BeforeClass
  public static void setupBeforeClass() throws Exception {
    // set configure to indicate which cp should be loaded
    Configuration conf = util.getConfiguration();
    conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
        "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver");

    util.startMiniCluster();
    cluster = util.getMiniHBaseCluster();
  }
View Full Code Here

   * and the deleted file doesn't exists anymore (FileNotFoundException).
   */
  @Test
  public void testHDFSLinkReadDuringDelete() throws Exception {
    HBaseTestingUtility testUtil = new HBaseTestingUtility();
    Configuration conf = testUtil.getConfiguration();
    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    FileSystem fs = cluster.getFileSystem();
    assertEquals("hdfs", fs.getUri().getScheme());
View Full Code Here

  @Test
  public void bulkLoadHFileTest() throws Exception {
    String testName = TestRegionObserverInterface.class.getName()+".bulkLoadHFileTest";
    byte[] tableName = TEST_TABLE;
    Configuration conf = util.getConfiguration();
    HTable table = util.createTable(tableName, new byte[][] {A, B, C});

    verifyMethodResult(SimpleRegionObserver.class,
        new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
        tableName,
View Full Code Here

        + "- further testing won't prove anything.", time < ignoreExpectedTime);

    // setup the mocks
    HConnectionManager.HConnectionImplementation mockConnection = Mockito
        .mock(HConnectionManager.HConnectionImplementation.class);
    Configuration conf = HBaseConfiguration.create();
    // setup the conf to match the expected properties
    conf.setInt("hbase.client.retries.number", numRetries);
    conf.setLong("hbase.client.pause", pauseTime);
    // mock the master admin to our mock
    HMasterInterface mockMaster = Mockito.mock(HMasterInterface.class);
    Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
    Mockito.when(mockConnection.getMaster()).thenReturn(mockMaster);
    // set the max wait time for the snapshot to complete
View Full Code Here

  private static byte[] TEST_FAMILY = Bytes.toBytes("fam1");
  private static byte[] TEST_FAMILY2 = Bytes.toBytes("fam2");

  @BeforeClass
  public static void setupBeforeClass() throws Exception {
    Configuration conf = UTIL.getConfiguration();
    conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
        CPMasterObserver.class.getName());
    // Enable snapshot
    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
    // We need more than one data server on this test
    UTIL.startMiniCluster(2);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.conf.Configuration

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.