Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileStatus


   * @return True if file was zero-length (and if so, we'll delete it in here).
   * @throws IOException
   */
  private static boolean isZeroLengthThenDelete(final FileSystem fs, final Path p)
      throws IOException {
    FileStatus stat = fs.getFileStatus(p);
    if (stat.getLen() > 0) return false;
    LOG.warn("File " + p + " is zero-length, deleting.");
    fs.delete(p, false);
    return true;
  }
View Full Code Here


        if (ioe.getCause() instanceof EOFException) {

          boolean considerDumping = false;
          if (this.queueRecovered) {
            try {
              FileStatus stat = this.fs.getFileStatus(this.currentPath);
              if (stat.getLen() == 0) {
                LOG.warn(peerClusterZnode + " Got EOF and the file was empty");
              }
              considerDumping = true;
            } catch (IOException e) {
              LOG.warn(peerClusterZnode + " Got while getting file size: ", e);
View Full Code Here

      setupTable(table);
      HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();

      Path hbaseTableDir = new Path(conf.get(HConstants.HBASE_DIR) + "/" + table );
      fs = hbaseTableDir.getFileSystem(conf);
      FileStatus status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
      tableinfo = status.getPath();
      fs.rename(tableinfo, new Path("/.tableinfo"));

      //to report error if .tableinfo is missing.
      HBaseFsck hbck = doFsck(conf, false);
      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NO_TABLEINFO_FILE });

      // fix OrphanTable with default .tableinfo (htd not yet cached on master)
      hbck = doFsck(conf, true);
      assertNoErrors(hbck);
      status = null;
      status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
      assertNotNull(status);

      HTableDescriptor htd = admin.getTableDescriptor(table.getBytes());
      htd.setValue("NOT_DEFAULT", "true");
      admin.disableTable(table);
      admin.modifyTable(table.getBytes(), htd);
      admin.enableTable(table);
      fs.delete(status.getPath(), true);

      // fix OrphanTable with cache
      htd = admin.getTableDescriptor(table.getBytes()); // warms up cached htd on master
      hbck = doFsck(conf, true);
      assertNoErrors(hbck);
View Full Code Here

   * @return True if file was zero-length (and if so, we'll delete it in here).
   * @throws IOException
   */
  private static boolean isZeroLengthThenDelete(final FileSystem fs, final Path p)
      throws IOException {
    FileStatus stat = fs.getFileStatus(p);
    if (stat.getLen() > 0) return false;
    LOG.warn("File " + p + " is zero-length, deleting.");
    fs.delete(p, false);
    return true;
  }
View Full Code Here

        if (ioe.getCause() instanceof EOFException) {

          boolean considerDumping = false;
          if (this.queueRecovered) {
            try {
              FileStatus stat = this.fs.getFileStatus(this.currentPath);
              if (stat.getLen() == 0) {
                LOG.warn(peerClusterZnode + " Got EOF and the file was empty");
              }
              considerDumping = true;
            } catch (IOException e) {
              LOG.warn(peerClusterZnode + " Got while getting file size: ", e);
View Full Code Here

    String jobId = t.getJobId();
    String jobFile = t.getJobFile();
    // Get sizes of JobFile and JarFile
    // sizes are -1 if they are not present.
    FileSystem fileSystem = FileSystem.get(fConf);
    FileStatus status[] = fileSystem.listStatus(new Path(jobFile).getParent());
    long jarFileSize = -1;
    long jobFileSize = -1;
    for(FileStatus stat : status) {
      if (stat.getPath().toString().contains("job.xml")) {
        jobFileSize = stat.getLen();
View Full Code Here

                        HRegionInfo.encodeRegionName(key)),
                        HREGION_OLDLOGFILE_NAME);
                    Path oldlogfile = null;
                    SequenceFile.Reader old = null;
                    if (fs.exists(logfile)) {
                      FileStatus stat = fs.getFileStatus(logfile);
                      if (stat.getLen() <= 0) {
                        LOG.warn("Old hlog file " + logfile + " is zero " +
                          "length. Deleting existing file");
                        fs.delete(logfile, false);
                      } else {
                        LOG.warn("Old hlog file " + logfile + " already " +
View Full Code Here

                                   ") total_size(" + cbsize + ") listuri(" +
                                   srcfilelist + ")");
      }
      Path src = new Path(srcfilelist);
      FileSystem fs = src.getFileSystem(job);
      FileStatus srcst = fs.getFileStatus(src);

      ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
      LongWritable key = new LongWritable();
      FilePair value = new FilePair();
      final long targetsize = cbsize / numSplits;
      long pos = 0L;
      long last = 0L;
      long acc = 0L;
      long cbrem = srcst.getLen();
      for (SequenceFile.Reader sl = new SequenceFile.Reader(fs, src, job);
           sl.next(key, value); last = sl.getPosition()) {
        // if adding this split would put this split past the target size,
        // cut the last split and put this next file in the next split.
        if (acc + key.get() > targetsize && acc != 0) {
View Full Code Here

        if (!destFileSys.mkdirs(absdst.getParent())) {
          throw new IOException("Failed to craete parent dir: " + absdst.getParent());
        }
        rename(destFileSys, tmpfile, absdst);

        FileStatus dststat = destFileSys.getFileStatus(absdst);
        if (dststat.getLen() != srcstat.getLen()) {
          destFileSys.delete(absdst);
          throw new IOException("File size not matched: copied "
              + bytesString(dststat.getLen()) + " to dst (=" + absdst
              + ") but expected " + bytesString(srcstat.getLen())
              + " from " + srcstat.getPath());       
        }
      }
View Full Code Here

     */
    public void map(LongWritable key,
                    FilePair value,
                    OutputCollector<WritableComparable, Text> out,
                    Reporter reporter) throws IOException {
      FileStatus srcstat = value.input;
      Path dstpath = value.output;
      try {
        copy(srcstat, dstpath, out, reporter);
      } catch (IOException e) {
        ++failcount;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FileStatus

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.