Package org.apache.hadoop.conf

Examples of org.apache.hadoop.conf.Configuration


  /**
   * @see org.apache.hadoop.hbase.regionserver.HRegionServerCommandLine
   */
  public static void main(String[] args) throws Exception {
  VersionInfo.logVersion();
    Configuration conf = HBaseConfiguration.create();
    @SuppressWarnings("unchecked")
    Class<? extends HRegionServer> regionServerClass = (Class<? extends HRegionServer>) conf
        .getClass(HConstants.REGION_SERVER_IMPL, HRegionServer.class);

    new HRegionServerCommandLine(regionServerClass).doMain(args);
  }
View Full Code Here


   * <p>
   * Lingering reference file prevents a region from opening. It has to
   * be fixed before a cluster can start properly.
   */
  private void offlineReferenceFileRepair() throws IOException {
    Configuration conf = getConf();
    Path hbaseRoot = FSUtils.getRootDir(conf);
    FileSystem fs = hbaseRoot.getFileSystem(conf);
    Map<String, Path> allFiles = FSUtils.getTableStoreFilePathMap(fs, hbaseRoot);
    for (Path path: allFiles.values()) {
      boolean isReference = false;
View Full Code Here

   *
   * @return an open .META. HRegion
   */
  private HRegion createNewRootAndMeta() throws IOException {
    Path rootdir = new Path(getConf().get(HConstants.HBASE_DIR));
    Configuration c = getConf();
    HRegionInfo rootHRI = new HRegionInfo(HRegionInfo.ROOT_REGIONINFO);
    MasterFileSystem.setInfoFamilyCachingForRoot(false);
    HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
    MasterFileSystem.setInfoFamilyCachingForMeta(false);
    HRegion root = HRegion.createHRegion(rootHRI, rootdir, c,
View Full Code Here

   * @param args
   * @throws Exception
   */
  public static void main(String[] args) throws Exception {
    // create a fsck object
    Configuration conf = HBaseConfiguration.create();
    Path hbasedir = new Path(conf.get(HConstants.HBASE_DIR));
    URI defaultFs = hbasedir.getFileSystem(conf).getUri();
    conf.set("fs.defaultFS", defaultFs.toString());     // for hadoop 0.21+
    conf.set("fs.default.name", defaultFs.toString())// for hadoop 0.20

    int ret = ToolRunner.run(new HBaseFsck(conf), args);
    System.exit(ret);
  }
View Full Code Here

        new Context(context.getSubProperties(EventSerializer.CTX_PREFIX));
  }

  @Override
  public void open(String filePath, FlumeFormatter fmt) throws IOException {
    Configuration conf = new Configuration();
    Path dstPath = new Path(filePath);
    FileSystem hdfs = dstPath.getFileSystem(conf);

    boolean appending = false;
    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
            (dstPath)) {
      outStream = hdfs.append(dstPath);
      appending = true;
    } else {
      outStream = hdfs.create(dstPath);
View Full Code Here

  }

  @Override
  public void open(String filePath, CompressionCodec codeC,
      CompressionType compType, FlumeFormatter fmt) throws IOException {
    Configuration conf = new Configuration();
    Path dstPath = new Path(filePath);
    FileSystem hdfs = dstPath.getFileSystem(conf);

    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
            (dstPath)) {
      FSDataOutputStream outStream = hdfs.append(dstPath);
      writer = SequenceFile.createWriter(conf, outStream, fmt.getKeyClass(),
          fmt.getValueClass(), compType, codeC);
    } else {
View Full Code Here

    UnsupportedOperationException {
    this.master = master;
    checkSnapshotSupport(master.getConfiguration(), master.getMasterFileSystem());

    // get the configuration for the coordinator
    Configuration conf = master.getConfiguration();
    this.wakeFrequency = conf.getInt(SNAPSHOT_WAKE_MILLIS_KEY, SNAPSHOT_WAKE_MILLIS_DEFAULT);
    long keepAliveTime = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, SNAPSHOT_TIMEOUT_MILLIS_DEFAULT);

    // setup the default procedure coordinator
    String name = master.getServerName().toString();
    ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, keepAliveTime, opThreads, wakeFrequency);
    ProcedureCoordinatorRpcs comms = new ZKProcedureCoordinatorRpcs(
View Full Code Here

            } catch (IndexOutOfBoundsException ex) {
              // ignore
            }
            if (cfgSpec != null) {
              cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1);
              Configuration newConf = new Configuration(conf);
              Matcher m = HConstants.CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec);
              while (m.find()) {
                newConf.set(m.group(1), m.group(2));
              }
              configured.add(load(path, className, priority, newConf));
            } else {
              configured.add(load(path, className, priority, conf));
            }
View Full Code Here

    if (LOG.isDebugEnabled()) {
      LOG.debug("Files for region: " + b);
      listPaths(fs, b.getRegionDir());
    }

    Configuration conf = a.getConf();
    HTableDescriptor tabledesc = a.getTableDesc();
    HLog log = a.getLog();
    Path tableDir = a.getTableDir();
    // Presume both are of same region type -- i.e. both user or catalog
    // table regions.  This way can use comparator.
View Full Code Here

        printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
      }
      majorCompact = true;
    }
    final Path tableDir = new Path(args[0]);
    final Configuration c = HBaseConfiguration.create();
    final FileSystem fs = FileSystem.get(c);
    final Path logdir = new Path(c.get("hbase.tmp.dir"),
        "hlog" + tableDir.getName()
        + EnvironmentEdgeManager.currentTimeMillis());
    final Path oldLogDir = new Path(c.get("hbase.tmp.dir"),
        HConstants.HREGION_OLDLOGDIR_NAME);
    final HLog log = new HLog(fs, logdir, oldLogDir, c);
    try {
      processTable(fs, tableDir, log, c, majorCompact);
    } finally {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.conf.Configuration

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.