Package org.apache.hadoop.conf

Examples of org.apache.hadoop.conf.Configuration$Resource


    assertNotNull(HFileArchiveUtil.getTableArchivePath(new Path("root", new Path("table"))));
  }

  @Test
  public void testGetArchivePath() throws Exception {
    Configuration conf = new Configuration();
    FSUtils.setRootDir(conf, new Path("root"));
    assertNotNull(HFileArchiveUtil.getArchivePath(conf));
  }
View Full Code Here


    assertNotNull(HFileArchiveUtil.getArchivePath(conf));
  }
 
  @Test
  public void testRegionArchiveDir() {
    Configuration conf = null;
    Path tableDir = new Path("table");
    Path regionDir = new Path("region");
    assertNotNull(HFileArchiveUtil.getRegionArchiveDir(conf, tableDir, regionDir));
  }
View Full Code Here

  @Test
  public void testGetStoreArchivePath(){
      byte[] family = Bytes.toBytes("Family");
    Path tabledir = new Path("table");
    HRegionInfo region = new HRegionInfo(Bytes.toBytes("table"));
    Configuration conf = null;
    assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, region, tabledir, family));
    conf = new Configuration();
    assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, region, tabledir, family));

    // do a little mocking of a region to get the same results
    HRegion mockRegion = Mockito.mock(HRegion.class);
    Mockito.when(mockRegion.getRegionInfo()).thenReturn(region);
    Mockito.when(mockRegion.getTableDir()).thenReturn(tabledir);

    assertNotNull(HFileArchiveUtil.getStoreArchivePath(null, mockRegion, family));
    conf = new Configuration();
    assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, mockRegion, family));
  }
View Full Code Here

  private static final byte [][] FAMILIES = new byte [][] {Bytes.toBytes("a"),
    Bytes.toBytes("b"), Bytes.toBytes("c")};
  private static int countOfRegions;

  @BeforeClass public static void beforeAllTests() throws Exception {
    Configuration c = TEST_UTIL.getConfiguration();
    c.setClass(HConstants.REGION_SERVER_IMPL, TestZKBasedOpenCloseRegionRegionServer.class,
              HRegionServer.class);
    c.setBoolean("dfs.support.append", true);
    c.setInt("hbase.regionserver.info.port", 0);
    TEST_UTIL.startMiniCluster(2);
    TEST_UTIL.createTable(Bytes.toBytes(TABLENAME), FAMILIES);
    HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
    countOfRegions = TEST_UTIL.createMultiRegions(t, getTestFamily());
    waitUntilAllRegionsAssigned();
View Full Code Here

      extends GoraMapper<String, WebPage, String, WebPage> {
    private FetchSchedule schedule;

    @Override
    public void setup(Context context) throws IOException {
      Configuration conf = context.getConfiguration();
      schedule = FetchScheduleFactory.getFetchSchedule(conf);
      // scoreInjected = conf.getFloat("db.score.injected", 1.0f);
    }
View Full Code Here

    context.write(entry, page);
  }

  @Override
  public void setup(Context context) {
    Configuration conf = context.getConfiguration();
    filters = new URLFilters(conf);
    curTime =
      conf.getLong(GeneratorJob.GENERATOR_CUR_TIME, System.currentTimeMillis());
    normalizers =
      new URLNormalizers(conf, URLNormalizers.SCOPE_GENERATE_HOST_COUNT);
    filter = conf.getBoolean(GeneratorJob.GENERATOR_FILTER, true);
    normalise = conf.getBoolean(GeneratorJob.GENERATOR_NORMALISE, true);
    schedule = FetchScheduleFactory.getFetchSchedule(conf);
    scoringFilters = new ScoringFilters(conf);
  }
View Full Code Here

  private List<ScoreDatum> inlinkedScoreData = new ArrayList<ScoreDatum>();
  private int maxLinks;

  @Override
  protected void setup(Context context) throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    retryMax = conf.getInt("db.fetch.retry.max", 3);
    additionsAllowed = conf.getBoolean(CRAWLDB_ADDITIONS_ALLOWED, true);
    maxInterval = conf.getInt("db.fetch.interval.max", 0 );
    schedule = FetchScheduleFactory.getFetchSchedule(conf);
    scoringFilters = new ScoringFilters(conf);
    maxLinks = conf.getInt("db.update.max.inlinks", 10000);
  }
View Full Code Here

   * even when the current file gets renamed.
   */
  @Test
  public void testHDFSLinkReadDuringRename() throws Exception {
    HBaseTestingUtility testUtil = new HBaseTestingUtility();
    Configuration conf = testUtil.getConfiguration();
    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    FileSystem fs = cluster.getFileSystem();
    assertEquals("hdfs", fs.getUri().getScheme());
View Full Code Here

  private static MiniHBaseCluster cluster = null;

  @BeforeClass
  public static void setupBeforeClass() throws Exception {
    // set configure to indicate which cp should be loaded
    Configuration conf = util.getConfiguration();
    conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
        "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver");

    util.startMiniCluster();
    cluster = util.getMiniHBaseCluster();
  }
View Full Code Here

   * and the deleted file doesn't exists anymore (FileNotFoundException).
   */
  @Test
  public void testHDFSLinkReadDuringDelete() throws Exception {
    HBaseTestingUtility testUtil = new HBaseTestingUtility();
    Configuration conf = testUtil.getConfiguration();
    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    FileSystem fs = cluster.getFileSystem();
    assertEquals("hdfs", fs.getUri().getScheme());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.conf.Configuration$Resource

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.