Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileStatus


    return delete(path, true);
  }

  @Override
  public boolean delete(Path f, boolean recursive) throws IOException {
    FileStatus status;
    try {
      status = getFileStatus(f);
    } catch (FileNotFoundException e) {
      return false;
    }
    Path absolutePath = makeAbsolute(f);
    String key = pathToKey(absolutePath);
    if (status.isDir()) {
      FileStatus[] contents = listStatus(f);
      if (!recursive && contents.length > 0) {
        throw new IOException("Directory " + f.toString() + " is not empty.");
      }
      for (FileStatus p : contents) {
View Full Code Here


   
    return status.toArray(new FileStatus[0]);
  }
 
  private FileStatus newFile(FileMetadata meta, Path path) {
    return new FileStatus(meta.getLength(), false, 1, getDefaultBlockSize(),
        meta.getLastModified(), path.makeQualified(this));
  }
View Full Code Here

    return new FileStatus(meta.getLength(), false, 1, getDefaultBlockSize(),
        meta.getLastModified(), path.makeQualified(this));
  }
 
  private FileStatus newDirectory(Path path) {
    return new FileStatus(0, true, 1, 0, 0, path.makeQualified(this));
  }
View Full Code Here

    return result;
  }
 
  private boolean mkdir(Path f) throws IOException {
    try {
      FileStatus fileStatus = getFileStatus(f);
      if (!fileStatus.isDir()) {
        throw new IOException(String.format(
            "Can't make directory for path %s since it is a file.", f));

      }
    } catch (FileNotFoundException e) {
View Full Code Here

    try {
      localCache[1] = manager.getLocalCache(secondCacheFile.toUri(), conf,
          TaskTracker.getPrivateDistributedCacheDir(userName),
          fs.getFileStatus(secondCacheFile), false,
          System.currentTimeMillis(), new Path(TEST_ROOT_DIR), false, false);
      FileStatus stat = localfs.getFileStatus(myFile);
      assertTrue(stat.getPermission().equals(myPermission));
      // validate permissions of localized files.
      checkFilePermissions(localCache);
    } finally {
      localfs.delete(myFile, false);
    }
View Full Code Here

  }
 
  public static void check(FileSystem fs, Path p, long length) throws IOException {
    int i = -1;
    try {
      final FileStatus status = fs.getFileStatus(p);
      TestCase.assertEquals(length, status.getLen());
      InputStream in = fs.open(p);
      for(i++; i < length; i++) {
        TestCase.assertEquals((byte)i, (byte)in.read())
      }
      i = -(int)length;
View Full Code Here

    }

    Preconditions.checkArgument(count == 1,
        "Exactly one of testSplitSize, testSplitPct, testRandomSelectionSize, testRandomSelectionPct should be set");

    FileStatus trainingOutputDirStatus = fs.getFileStatus(trainingOutputDirectory);
    Preconditions.checkArgument(trainingOutputDirStatus != null && trainingOutputDirStatus.isDir(),
                                "%s is not a directory", trainingOutputDirectory);
    FileStatus testOutputDirStatus = fs.getFileStatus(testOutputDirectory);
    Preconditions.checkArgument(testOutputDirStatus != null && testOutputDirStatus.isDir(),
                                "%s is not a directory", testOutputDirectory);
  }
View Full Code Here

   */
  public static FileInputStream openForRead(File f, String expectedOwner,
      String expectedGroup) throws IOException {
    if (skipSecurity) {
      // Subject to race conditions but this is the best we can do
      FileStatus status =
        rawFilesystem.getFileStatus(new Path(f.getAbsolutePath()));
      checkStat(f, status.getOwner(), status.getGroup(),
          expectedOwner, expectedGroup);
      return new FileInputStream(f);
    }

    FileInputStream fis = new FileInputStream(f);
View Full Code Here

   */
  static String obtainLogDirOwner(TaskAttemptID taskid) throws IOException {
    Configuration conf = new Configuration();
    FileSystem raw = FileSystem.getLocal(conf).getRaw();
    Path jobLogDir = new Path(getJobDir(taskid.getJobID()).getAbsolutePath());
    FileStatus jobStat = raw.getFileStatus(jobLogDir);
    return jobStat.getOwner();
  }
View Full Code Here

     * @return a list of HDFS data blocks
     * @throws IOException Thrown if there is an error while communcating
     * with the HDFS Namenode
     */
    private static Set<LocatedBlock> getBlocks(String path, Configuration conf, DFSClient dfs, Set<LocatedBlock> blocks) throws IOException {
        FileStatus s = FileSystem.get(conf).getFileStatus(new Path(path));
       
        if (!s.isDir()) {
            blocks.addAll(dfs.namenode.getBlockLocations(path, 0,
                    Long.MAX_VALUE).getLocatedBlocks());
        } else {
            DirectoryListing drl = dfs.listPaths(path, HdfsFileStatus.EMPTY_NAME);
            for (HdfsFileStatus fileStatus : drl.getPartialListing()) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FileStatus

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.