Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.Trash$Emptier


      // Path or FileSystem. So need to achieve this by creating a dummy conf.
      // this needs to be filtered out based on version
      Configuration dupConf = new Configuration(conf);
      FileSystem.setDefaultUri(dupConf, fs.getUri());

      Trash trashTmp = new Trash(dupConf);
      if (trashTmp.moveToTrash(f)) {
        LOG.info("Moved to trash: " + f);
        return true;
      }
      if (fs.delete(f, true)) {
        LOG.info("Deleted the diretory " + f);
View Full Code Here


  private void startTrashEmptier(Configuration conf) throws IOException {
    long trashInterval = conf.getLong("fs.trash.interval", 0);
    if(trashInterval == 0)
      return;
    this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
    this.emptier.setDaemon(true);
    this.emptier.start();
  }
View Full Code Here

      //compare sorted lsr list and sorted dst list
      final Text lsrpath = new Text();
      final Text dstpath = new Text();
      final Text dstfrom = new Text();
      final Trash trash = new Trash(dstfs, conf);
      Path lastpath = null;

      boolean hasnext = dstin.next(dstpath, dstfrom);
      while (lsrin.next(lsrpath, NullWritable.get())) {
        int dst_cmp_lsr = dstpath.compareTo(lsrpath);
        while (hasnext && dst_cmp_lsr < 0) {
          hasnext = dstin.next(dstpath, dstfrom);
          dst_cmp_lsr = dstpath.compareTo(lsrpath);
        }
       
        if (dst_cmp_lsr == 0) {
          //lsrpath exists in dst, skip it
          hasnext = dstin.next(dstpath, dstfrom);
        } else {
          //lsrpath does not exist, delete it
          final Path rmpath = new Path(dstroot.getPath(), lsrpath.toString());
          ++deletedPathsCount;
          if ((lastpath == null || !isAncestorPath(lastpath, rmpath))) {
            if (!(trash.moveToTrash(rmpath) || dstfs.delete(rmpath, true))) {
              throw new IOException("Failed to delete " + rmpath);
            }
            lastpath = rmpath;
          }
        }
View Full Code Here

        if (status.isDir() && !recursive) {
          LOG.severe("To remove directory, please use fs rm --recursive instead");
          return;
        }
        if (!skipTrash) {
            Trash trash = new Trash(fs, getHadoopConfiguration());
            trash.moveToTrash(p);
        }
        fs.delete(p, recursive);
      }
    } catch (Throwable t) {
      LOG.severe("run HDFS shell failed. Message is: " + t.getMessage());
View Full Code Here

        if (status.isDirectory() && !recursive) {
          LOG.severe("To remove directory, please use 'fs rm </path/to/dir> --recursive' instead");
          return;
        }
        if (!skipTrash) {
          Trash trash = new Trash(fs, getHadoopConfiguration());
          trash.moveToTrash(p);
        }
        fs.delete(p, recursive);
      }
    }
    catch (Exception t) {
View Full Code Here

        if (fs.exists(new Path(conf.get(MRConfig.TEMP_DIR))))
            fs.delete(new Path(conf.get(MRConfig.TEMP_DIR)), true);
        if(fs.exists(new Path("./tmp")))
          fs.delete(new Path("./tmp"), true);
       
        (new Trash(conf)).expunge()//empty trash;
       
       
        //always using compound file format to speed up;
        conf.setBoolean(SenseiJobConfig.USE_COMPOUND_FILE, true);
       
View Full Code Here

  }
 

  public static void moveToTrash(Configuration conf,Path path) throws IOException
  {
       Trash t=new Trash(conf);
       boolean isMoved=t.moveToTrash(path);
       t.expunge();
       if(!isMoved)
       {
         logger.error("Trash is not enabled or file is already in the trash.");
       }
  }
View Full Code Here

    // Path or FileSystem. So need to achieve this by creating a dummy conf.
    // this needs to be filtered out based on version

    Configuration dupConf = new Configuration(conf);
    FileSystem.setDefaultUri(dupConf, fs.getUri());
    Trash trash = new Trash(dupConf);
    return trash.moveToTrash(path);
  }
View Full Code Here

    // Path or FileSystem. So need to achieve this by creating a dummy conf.
    // this needs to be filtered out based on version

    Configuration dupConf = new Configuration(conf);
    FileSystem.setDefaultUri(dupConf, fs.getUri());
    Trash trash = new Trash(dupConf);
    return trash.moveToTrash(path);
  }
View Full Code Here

    conf.set("fs.trash.interval", "0.2"); // 12 seconds
    conf.set("fs.trash.checkpoint.interval", "0.1"); // 6 seconds
    conf.setClass("fs.trash.classname", TrashPolicyPattern.class, TrashPolicy.class);
    conf.set("fs.trash.base.paths", TEST_DIR + "/my_root/*/");
    conf.set("fs.trash.unmatched.paths", TEST_DIR + "/unmatched/");
    Trash trash = new Trash(conf);
    // clean up trash can
    fs.delete(new Path(TEST_DIR + "/my_root/*/"), true);
    fs.delete(new Path(TEST_DIR + "/my_root_not/*/"), true);


    FsShell shell = new FsShell();
    shell.setConf(conf);
    shell.init();
    // First create a new directory with mkdirs
    deleteAndCheckTrash(fs, shell, "my_root/sub_dir1/sub_dir1_1/myFile",
        "my_root/sub_dir1/.Trash/Current/" + TEST_DIR
            + "/my_root/sub_dir1/sub_dir1_1");
    deleteAndCheckTrash(fs, shell, "my_root/sub_dir2/sub_dir2_1/myFile",
        "my_root/sub_dir2/.Trash/Current/" + TEST_DIR
            + "/my_root/sub_dir2/sub_dir2_1");
    deleteAndCheckTrash(fs, shell, "my_root_not/", "unmatched/.Trash/Current"
        + TEST_DIR + "/my_root_not");
    deleteAndCheckTrash(fs, shell, "my_root/file", "unmatched/.Trash/Current"
        + TEST_DIR + "/my_root/file");

    Path currentTrash = new Path(TEST_DIR, "my_root/sub_dir1/.Trash/Current/");
    fs.mkdirs(currentTrash);
    cmdUsingShell("-rmr", shell, currentTrash);
    TestCase.assertTrue(!fs.exists(currentTrash));

    cmdUsingShell("-rmr", shell, new Path(TEST_DIR, "my_root"));
    TestCase.assertTrue(fs.exists(new Path(TEST_DIR,
        "unmatched/.Trash/Current/" + TEST_DIR + "/my_root")));
   
    // Test Emplier
    // Start Emptier in background
    Runnable emptier = trash.getEmptier();
    Thread emptierThread = new Thread(emptier);
    emptierThread.start();

    int fileIndex = 0;
    Set<String> checkpoints = new HashSet<String>();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.Trash$Emptier

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.