Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem.deleteOnExit()


              pathSrc.getName());

          // LOG.info("WorkingDirectory: " + hdfs.getWorkingDirectory());
          LOG.debug("copyToHDFSFile: " + pathDst);
          hdfs.copyFromLocalFile(pathSrc, pathDst);
          hdfs.deleteOnExit(pathDst);

          finalPath = pathDst.makeQualified(hdfs).toString();
          finalArr[i] = finalPath;
        }
      } catch (IOException e) {
View Full Code Here


  private ImportTestFilesAndData prepareTestFiles() throws Throwable {
    Configuration defaultConf = new Configuration();
    Path tempFile = new Path("target/accumulo-test/import/sample.map");
    Path failures = new Path("target/accumulo-test/failures/");
    FileSystem fs = FileSystem.get(new URI("file:///"), defaultConf);
    fs.deleteOnExit(tempFile);
    fs.deleteOnExit(failures);
    fs.delete(failures, true);
    fs.delete(tempFile, true);
    fs.mkdirs(failures);
    fs.mkdirs(tempFile.getParent());
View Full Code Here

    Configuration defaultConf = new Configuration();
    Path tempFile = new Path("target/accumulo-test/import/sample.map");
    Path failures = new Path("target/accumulo-test/failures/");
    FileSystem fs = FileSystem.get(new URI("file:///"), defaultConf);
    fs.deleteOnExit(tempFile);
    fs.deleteOnExit(failures);
    fs.delete(failures, true);
    fs.delete(tempFile, true);
    fs.mkdirs(failures);
    fs.mkdirs(tempFile.getParent());
    FileSKVWriter writer = FileOperations.getInstance().openWriter(
View Full Code Here

    // cleanup the test root directory
    fileSystem.delete(testRootDir, true);
    // set the current working directory
    fileSystem.setWorkingDirectory(testRootDir);
   
    fileSystem.deleteOnExit(testRootDir);
   
    // create a mini cluster using the local file system
    MiniMRCluster mrCluster =
      new MiniMRCluster(1, fileSystem.getUri().toString(), 1);
   
View Full Code Here

    // define output directory
    Path outputDir =
      new Path(rootTempDir, "testResourceUsageMetricsWithHadoopLogsAnalyzer");
    lfs.delete(outputDir, true);
    lfs.deleteOnExit(outputDir);
   
    // run HadoopLogsAnalyzer
    HadoopLogsAnalyzer analyzer = new HadoopLogsAnalyzer();
    analyzer.setConf(conf);
    Path traceOutput = new Path(outputDir, "trace.json");
View Full Code Here

      stm3.close();

      // set delete on exit flag on files.
      fs.deleteOnExit(file1);
      fs.deleteOnExit(file2);
      localfs.deleteOnExit(file3);

      // close the file system. This should make the above files
      // disappear.
      fs.close();
      localfs.close();
View Full Code Here

   * delete(URI.create(xxx), xxx can't include the illegal characters.)
   *
   */
  public void delete(String file) throws FileNotFoundException, IOException {
    FileSystem fs = getfs();
    fs.deleteOnExit(new Path(file));
    fs.close();
  }

  /**
   * rename
 
View Full Code Here

      stm3.close();

      // set delete on exit flag on files.
      fs.deleteOnExit(file1);
      fs.deleteOnExit(file2);
      localfs.deleteOnExit(file3);

      // close the file system. This should make the above files
      // disappear.
      fs.close();
      localfs.close();
View Full Code Here

    // create the partitions file
    FileSystem fs = FileSystem.get(job.getConfiguration());
    Path partitionsPath = new Path("/tmp", "partitions_" + UUID.randomUUID());
    fs.makeQualified(partitionsPath);
    fs.deleteOnExit(partitionsPath);
    writePartitions(job.getConfiguration(), partitionsPath, splitPoints);

    // configure job to use it
    job.setPartitionerClass(TotalOrderPartitioner.class);
    TotalOrderPartitioner.setPartitionFile(job.getConfiguration(), partitionsPath);
View Full Code Here

      db.setName(TEST_DB1_NAME);
      dbLocation =
          HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/_testDB_file_";
      FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), hiveConf);
      fs.createNewFile(new Path(dbLocation));
      fs.deleteOnExit(new Path(dbLocation));
      db.setLocationUri(dbLocation);

      boolean createFailed = false;
      try {
        client.createDatabase(db);
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.