Examples of FileStatus


Examples of org.apache.hadoop.fs.FileStatus

  /**
   * compute HDFS block distribution, for reference file, it is an estimate
   */
  private void computeHDFSBlockDistribution() throws IOException {
    if (isReference()) {
      FileStatus status;
      if (this.link != null) {
        status = this.link.getFileStatus(fs);
      } else {
        status = fs.getFileStatus(this.referencePath);
      }
      this.hdfsBlocksDistribution = computeRefFileHDFSBlockDistribution(
        this.fs, this.reference, status);
    } else {
      FileStatus status;
      if (isLink()) {
        status = link.getFileStatus(fs);
      } else {
        status = this.fs.getFileStatus(path);
      }
      long length = status.getLen();
      this.hdfsBlocksDistribution = FSUtils.computeHDFSBlocksDistribution(
        this.fs, status, 0, length);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

        return false;
      }

      try {
        // Verify if the input file exists
        FileStatus inputStat = getFileStatus(inputFs, inputPath);
        if (inputStat == null) return false;

        // Verify if the output file exists and is the same that we want to copy
        FileStatus outputStat = getFileStatus(outputFs, outputPath);
        if (outputStat != null && sameFile(inputStat, outputStat)) {
          LOG.info("Skip copy " + inputPath + " to " + outputPath + ", same file.");
          return true;
        }
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

    /**
     * Preserve the files attribute selected by the user copying them from the source file
     */
    private boolean preserveAttributes(final Path path, final FileStatus refStat) {
      FileStatus stat;
      try {
        stat = outputFs.getFileStatus(path);
      } catch (IOException e) {
        LOG.warn("Unable to get the status for file=" + path);
        return false;
      }

      try {
        if (filesMode > 0 && stat.getPermission().toShort() != filesMode) {
          outputFs.setPermission(path, new FsPermission(filesMode));
        } else if (!stat.getPermission().equals(refStat.getPermission())) {
          outputFs.setPermission(path, refStat.getPermission());
        }
      } catch (IOException e) {
        LOG.error("Unable to set the permission for file=" + path, e);
        return false;
      }

      try {
        String user = (filesUser != null) ? filesUser : refStat.getOwner();
        String group = (filesGroup != null) ? filesGroup : refStat.getGroup();
        if (!(user.equals(stat.getOwner()) && group.equals(stat.getGroup()))) {
          outputFs.setOwner(path, user, group);
        }
      } catch (IOException e) {
        LOG.error("Unable to set the owner/group for file=" + path, e);
        return false;
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

    return this.cache.contains(fileName);
  }

  private synchronized void refreshCache() throws IOException {
    // get the status of the snapshots directory
    FileStatus status;
    try {
      status = fs.getFileStatus(snapshotDir);
    } catch (FileNotFoundException e) {
      LOG.error("Snapshot directory: " + snapshotDir + " doesn't exist");
      return;
    }
    // if the snapshot directory wasn't modified since we last check, we are done
    if (status.getModificationTime() <= lastModifiedTime) return;

    // directory was modified, so we need to reload our cache
    // there could be a slight race here where we miss the cache, check the directory modification
    // time, then someone updates the directory, causing us to not scan the directory again.
    // However, snapshot directories are only created once, so this isn't an issue.

    // 1. update the modified time
    this.lastModifiedTime = status.getModificationTime();

    // 2.clear the cache
    this.cache.clear();
    Map<String, SnapshotDirectoryInfo> known = new HashMap<String, SnapshotDirectoryInfo>();
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

   * @return True if file was zero-length (and if so, we'll delete it in here).
   * @throws IOException
   */
  private static boolean isZeroLengthThenDelete(final FileSystem fs, final Path p)
      throws IOException {
    FileStatus stat = fs.getFileStatus(p);
    if (stat.getLen() > 0) return false;
    LOG.warn("File " + p + " is zero-length, deleting.");
    fs.delete(p, false);
    return true;
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

        if (ioe.getCause() instanceof EOFException) {

          boolean considerDumping = false;
          if (this.queueRecovered) {
            try {
              FileStatus stat = this.fs.getFileStatus(this.currentPath);
              if (stat.getLen() == 0) {
                LOG.warn(peerClusterZnode + " Got EOF and the file was empty");
              }
              considerDumping = true;
            } catch (IOException e) {
              LOG.warn(peerClusterZnode + " Got while getting file size: ", e);
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

      setupTable(table);
      HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();

      Path hbaseTableDir = new Path(conf.get(HConstants.HBASE_DIR) + "/" + table );
      fs = hbaseTableDir.getFileSystem(conf);
      FileStatus status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
      tableinfo = status.getPath();
      fs.rename(tableinfo, new Path("/.tableinfo"));

      //to report error if .tableinfo is missing.
      HBaseFsck hbck = doFsck(conf, false);
      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NO_TABLEINFO_FILE });

      // fix OrphanTable with default .tableinfo (htd not yet cached on master)
      hbck = doFsck(conf, true);
      assertNoErrors(hbck);
      status = null;
      status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
      assertNotNull(status);

      HTableDescriptor htd = admin.getTableDescriptor(table.getBytes());
      htd.setValue("NOT_DEFAULT", "true");
      admin.disableTable(table);
      admin.modifyTable(table.getBytes(), htd);
      admin.enableTable(table);
      fs.delete(status.getPath(), true);

      // fix OrphanTable with cache
      htd = admin.getTableDescriptor(table.getBytes()); // warms up cached htd on master
      hbck = doFsck(conf, true);
      assertNoErrors(hbck);
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

   * @return True if file was zero-length (and if so, we'll delete it in here).
   * @throws IOException
   */
  private static boolean isZeroLengthThenDelete(final FileSystem fs, final Path p)
      throws IOException {
    FileStatus stat = fs.getFileStatus(p);
    if (stat.getLen() > 0) return false;
    LOG.warn("File " + p + " is zero-length, deleting.");
    fs.delete(p, false);
    return true;
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

        if (ioe.getCause() instanceof EOFException) {

          boolean considerDumping = false;
          if (this.queueRecovered) {
            try {
              FileStatus stat = this.fs.getFileStatus(this.currentPath);
              if (stat.getLen() == 0) {
                LOG.warn(peerClusterZnode + " Got EOF and the file was empty");
              }
              considerDumping = true;
            } catch (IOException e) {
              LOG.warn(peerClusterZnode + " Got while getting file size: ", e);
View Full Code Here

Examples of org.apache.hadoop.fs.FileStatus

    String jobId = t.getJobId();
    String jobFile = t.getJobFile();
    // Get sizes of JobFile and JarFile
    // sizes are -1 if they are not present.
    FileSystem fileSystem = FileSystem.get(fConf);
    FileStatus status[] = fileSystem.listStatus(new Path(jobFile).getParent());
    long jarFileSize = -1;
    long jobFileSize = -1;
    for(FileStatus stat : status) {
      if (stat.getPath().toString().contains("job.xml")) {
        jobFileSize = stat.getLen();
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.