Package org.apache.hadoop.conf

Examples of org.apache.hadoop.conf.Configuration$Resource


   *
   * @param job - Job handle
   * @throws IOException - Exception if any
   */
  private void configureOutputFormat(Job job) throws IOException {
    final Configuration configuration = job.getConfiguration();
    Path targetPath = inputOptions.getTargetPath();
    FileSystem targetFS = targetPath.getFileSystem(configuration);
    targetPath = targetPath.makeQualified(targetFS.getUri(),
                                          targetFS.getWorkingDirectory());

View Full Code Here


   *
   * @return Returns the working folder information
   * @throws Exception - EXception if any
   */
  private Path createMetaFolderPath() throws Exception {
    Configuration configuration = getConf();
    Path stagingDir = JobSubmissionFiles.getStagingDir(
            new Cluster(configuration), configuration);
    Path metaFolderPath = new Path(stagingDir, PREFIX + String.valueOf(rand.nextInt()));
    if (LOG.isDebugEnabled())
      LOG.debug("Meta folder location: " + metaFolderPath);
    configuration.set(DistCpConstants.CONF_LABEL_META_FOLDER, metaFolderPath.toString());   
    return metaFolderPath;
  }
View Full Code Here

   * Loads properties from distcp-default.xml into configuration
   * object
   * @return Configuration which includes properties from distcp-default.xml
   */
  private static Configuration getDefaultConf() {
    Configuration config = new Configuration();
    config.addResource(DISTCP_DEFAULT_XML);
    return config;
  }
View Full Code Here

    archiveURIs = StringUtils.stringToURI(archives);
  }
 
  protected void setJobConf() throws IOException {
    msg("hadoopAliasConf_ = " + hadoopAliasConf_);
    config_ = new Configuration();
    if (!cluster_.equals("default")) {
      config_.addFinalResource(new Path(getHadoopAliasConfFile()));
    } else {
      // use only defaults: hadoop-default.xml and hadoop-site.xml
    }
View Full Code Here

  private static final File FILE = new File(DIR, "test.seq");

  @BeforeClass
  public static void testWriteSequenceFile() throws IOException {
    FILE.delete();
    Configuration c = new Configuration();
    URI uri = FILE.toURI();
    SequenceFile.Writer writer
      = new SequenceFile.Writer(FileSystem.get(uri, c), c,
                                new Path(uri.toString()),
                                LongWritable.class, Text.class);
View Full Code Here

  }

  @Override
  public void open(String filePath, CompressionCodec codec,
      CompressionType cType, FlumeFormatter fmt) throws IOException {
    Configuration conf = new Configuration();
    Path dstPath = new Path(filePath);
    FileSystem hdfs = dstPath.getFileSystem(conf);

    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
    (dstPath)) {
      fsOut = hdfs.append(dstPath);
    } else {
      fsOut = hdfs.create(dstPath);
    }
View Full Code Here

        }
      }
    }

    ZKPermissionWatcher zkw = this.authManager.getZKPermissionWatcher();
    Configuration conf = regionEnv.getConfiguration();
    for (byte[] tableName: tableSet) {
      try {
        ListMultimap<String,TablePermission> perms =
          AccessControlLists.getTablePermissions(conf, tableName);
        byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms, conf);
View Full Code Here

    TEST_UTIL.startMiniDFSCluster(1);
    Path testdir = TEST_UTIL.getDataTestDir("TestNamespaceUpgrade");
    // Untar our test dir.
    File untar = untar(new File(testdir.toString()));
    // Now copy the untar up into hdfs so when we start hbase, we'll run from it.
    Configuration conf = TEST_UTIL.getConfiguration();
    FsShell shell = new FsShell(conf);
    FileSystem fs = FileSystem.get(conf);
    // find where hbase will root itself, so we can copy filesystem there
    Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
    if (!fs.isDirectory(hbaseRootDir.getParent())) {
      // mkdir at first
      fs.mkdirs(hbaseRootDir.getParent());
    }
    if(org.apache.hadoop.util.VersionInfo.getVersion().startsWith("2.")) {
      LOG.info("Hadoop version is 2.x, pre-migrating snapshot dir");
      FileSystem localFS = FileSystem.getLocal(conf);
      if(!localFS.rename(new Path(untar.toString(), HConstants.OLD_SNAPSHOT_DIR_NAME),
          new Path(untar.toString(), HConstants.SNAPSHOT_DIR_NAME))) {
        throw new IllegalStateException("Failed to move snapshot dir to 2.x expectation");
      }
    }
    doFsCommand(shell,
      new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()});
    doFsCommand(shell, new String [] {"-lsr", "/"});
    // See whats in minihdfs.
    Configuration toolConf = TEST_UTIL.getConfiguration();
    conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString());
    ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"});
    assertTrue(FSUtils.getVersion(fs, hbaseRootDir).equals(HConstants.FILE_SYSTEM_VERSION));
    doFsCommand(shell, new String [] {"-lsr", "/"});
    TEST_UTIL.startMiniHBaseCluster(1, 1);
View Full Code Here

   * Sets the HBase table.
   *
   * @param htable  The {@link HTable} to scan.
   */
  public void setHTable(HTable htable) {
    Configuration conf = htable.getConfiguration();
    logScannerActivity = conf.getBoolean(
      ScannerCallable.LOG_SCANNER_ACTIVITY, false);
    logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
    this.htable = htable;
  }
View Full Code Here

    return new TableSnapshotRegionRecordReader();
  }

  @Override
  public List<InputSplit> getSplits(JobContext job) throws IOException, InterruptedException {
    Configuration conf = job.getConfiguration();
    String snapshotName = getSnapshotName(conf);

    Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
    FileSystem fs = rootDir.getFileSystem(conf);

    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
    SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);

    Set<String> snapshotRegionNames
      = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
    if (snapshotRegionNames == null) {
      throw new IllegalArgumentException("Snapshot seems empty");
    }

    // load table descriptor
    HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs,
        snapshotDir);

    Scan scan = TableMapReduceUtil.convertStringToScan(conf
      .get(TableInputFormat.SCAN));
    Path tableDir = new Path(conf.get(TABLE_DIR_KEY));

    List<InputSplit> splits = new ArrayList<InputSplit>();
    for (String regionName : snapshotRegionNames) {
      // load region descriptor
      Path regionDir = new Path(snapshotDir, regionName);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.conf.Configuration$Resource

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.