Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem$FSSelect


   * Deletes a table's directory from the file system if exists. Used in unit
   * tests.
   */
  public static void deleteTableDescriptorIfExists(String tableName,
      Configuration conf) throws IOException {
    FileSystem fs = FSUtils.getCurrentFileSystem(conf);
    FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName);
    // The below deleteDirectory works for either file or directory.
    if (status != null && fs.exists(status.getPath())) {
      FSUtils.deleteDirectory(fs, status.getPath());
    }
  }
View Full Code Here


   * @param forceCreation True if we are to overwrite existing file.
   */
  static boolean createTableDescriptor(final HTableDescriptor htableDescriptor,
      final Configuration conf, boolean forceCreation)
  throws IOException {
    FileSystem fs = FSUtils.getCurrentFileSystem(conf);
    return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor,
        forceCreation);
  }
View Full Code Here

    System.err.println("         For example: HLog --split hdfs://example.com:9000/hbase/.logs/DIR");
  }

  private static void split(final Configuration conf, final Path p)
  throws IOException {
    FileSystem fs = FileSystem.get(conf);
    if (!fs.exists(p)) {
      throw new FileNotFoundException(p.toString());
    }
    final Path baseDir = new Path(conf.get(HConstants.HBASE_DIR));
    final Path oldLogDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
    if (!fs.getFileStatus(p).isDir()) {
      throw new IOException(p + " is not a directory");
    }

    HLogSplitter logSplitter = HLogSplitter.createLogSplitter(
        conf, baseDir, p, oldLogDir, fs);
View Full Code Here

   */
  void bulkLoadHFile(String srcPathStr) throws IOException {
    Path srcPath = new Path(srcPathStr);

    // Move the file if it's on another filesystem
    FileSystem srcFs = srcPath.getFileSystem(conf);
    FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;
    //We can't compare FileSystem instances as
    //equals() includes UGI instance as part of the comparison
    //and won't work when doing SecureBulkLoad
    //TODO deal with viewFS
    if (!srcFs.getUri().equals(desFs.getUri())) {
      LOG.info("File " + srcPath + " on different filesystem than " +
          "destination store - moving to this filesystem.");
      Path tmpPath = getTmpPath();
      FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
      LOG.info("Copied to temporary path on dst filesystem: " + tmpPath);
View Full Code Here

   * will be responsible to add the regions returned by this method to META and do the assignment.
   */
  @Override
  protected List<HRegionInfo> handleCreateHdfsRegions(final Path tableRootDir, final String tableName)
      throws IOException {
    FileSystem fs = fileSystemManager.getFileSystem();
    Path rootDir = fileSystemManager.getRootDir();
    Path tableDir = new Path(tableRootDir, tableName);

    try {
      // 1. Execute the on-disk Clone
View Full Code Here

  public void setConf(Configuration conf) {
    super.setConf(conf);
    try {
      long cacheRefreshPeriod = conf.getLong(HFILE_CACHE_REFRESH_PERIOD_CONF_KEY,
        DEFAULT_HFILE_CACHE_REFRESH_PERIOD);
      final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
      Path rootDir = FSUtils.getRootDir(conf);
      cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
          "snapshot-hfile-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
            public Collection<String> filesUnderSnapshot(final Path snapshotDir)
                throws IOException {
View Full Code Here

  public void setConf(Configuration conf) {
    super.setConf(conf);
    try {
      long cacheRefreshPeriod = conf.getLong(
        HLOG_CACHE_REFRESH_PERIOD_CONF_KEY, DEFAULT_HLOG_CACHE_REFRESH_PERIOD);
      final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
      Path rootDir = FSUtils.getRootDir(conf);
      cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
          "snapshot-log-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
            public Collection<String> filesUnderSnapshot(final Path snapshotDir)
                throws IOException {
View Full Code Here

   * the DistributedCache.
   */
  public static void addDependencyJars(Configuration conf,
      Class... classes) throws IOException {

    FileSystem localFs = FileSystem.getLocal(conf);

    Set<String> jars = new HashSet<String>();

    // Add jars that are already in the tmpjars variable
    jars.addAll( conf.getStringCollection("tmpjars") );

    // Add jars containing the specified classes
    for (Class clazz : classes) {
      if (clazz == null) continue;

      String pathStr = findOrCreateJar(clazz);
      if (pathStr == null) {
        LOG.warn("Could not find jar for class " + clazz +
                 " in order to ship it to the cluster.");
        continue;
      }
      Path path = new Path(pathStr);
      if (!localFs.exists(path)) {
        LOG.warn("Could not validate jar file " + path + " for class "
                 + clazz);
        continue;
      }
      jars.add(path.makeQualified(localFs).toString());
View Full Code Here

  public static void debugLsr(Configuration conf,
      Path p, ErrorReporter errors) throws IOException {
    if (!LOG.isDebugEnabled() || p == null) {
      return;
    }
    FileSystem fs = p.getFileSystem(conf);

    if (!fs.exists(p)) {
      // nothing
      return;
    }
    errors.print(p.toString());

    if (fs.isFile(p)) {
      return;
    }

    if (fs.getFileStatus(p).isDir()) {
      FileStatus[] fss= fs.listStatus(p);
      for (FileStatus status : fss) {
        debugLsr(conf, status.getPath(), errors);
      }
    }
  }
View Full Code Here

   * likely violate table integrity but will be dealt with by merging
   * overlapping regions.
   */
  private void adoptHdfsOrphan(HbckInfo hi) throws IOException {
    Path p = hi.getHdfsRegionDir();
    FileSystem fs = p.getFileSystem(getConf());
    FileStatus[] dirs = fs.listStatus(p);
    if (dirs == null) {
      LOG.warn("Attempt to adopt ophan hdfs region skipped becuase no files present in " +
          p + ". This dir could probably be deleted.");
      return ;
    }

    String tableName = Bytes.toString(hi.getTableName());
    TableInfo tableInfo = tablesInfo.get(tableName);
    Preconditions.checkNotNull("Table " + tableName + "' not present!", tableInfo);
    HTableDescriptor template = tableInfo.getHTD();

    // find min and max key values
    Pair<byte[],byte[]> orphanRegionRange = null;
    for (FileStatus cf : dirs) {
      String cfName= cf.getPath().getName();
      // TODO Figure out what the special dirs are
      if (cfName.startsWith(".") || cfName.equals("splitlog")) continue;

      FileStatus[] hfiles = fs.listStatus(cf.getPath());
      for (FileStatus hfile : hfiles) {
        byte[] start, end;
        HFile.Reader hf = null;
        try {
          CacheConfig cacheConf = new CacheConfig(getConf());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FileSystem$FSSelect

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.