Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataInputStream$Buffer


      return new Path(outputArchive, path);
    }

    private boolean copyFile(final Context context, final Path inputPath, final Path outputPath)
        throws IOException {
      FSDataInputStream in = openSourceFile(inputPath);
      if (in == null) {
        context.getCounter(Counter.MISSING_FILES).increment(1);
        return false;
      }

      try {
        // Verify if the input file exists
        FileStatus inputStat = getFileStatus(inputFs, inputPath);
        if (inputStat == null) return false;

        // Verify if the output file exists and is the same that we want to copy
        FileStatus outputStat = getFileStatus(outputFs, outputPath);
        if (outputStat != null && sameFile(inputStat, outputStat)) {
          LOG.info("Skip copy " + inputPath + " to " + outputPath + ", same file.");
          return true;
        }

        context.getCounter(Counter.BYTES_EXPECTED).increment(inputStat.getLen());

        // Ensure that the output folder is there and copy the file
        outputFs.mkdirs(outputPath.getParent());
        FSDataOutputStream out = outputFs.create(outputPath, true);
        try {
          if (!copyData(context, inputPath, in, outputPath, out, inputStat.getLen()))
            return false;
        } finally {
          out.close();
        }

        // Preserve attributes
        return preserveAttributes(outputPath, inputStat);
      } finally {
        in.close();
      }
    }
View Full Code Here


   */
  public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir)
      throws CorruptedSnapshotException {
    Path snapshotInfo = new Path(snapshotDir, SNAPSHOTINFO_FILE);
    try {
      FSDataInputStream in = null;
      try {
        in = fs.open(snapshotInfo);
        return SnapshotDescription.parseFrom(in);
      } finally {
        if (in != null) in.close();
      }
    } catch (IOException e) {
      throw new CorruptedSnapshotException("Couldn't read snapshot info from:" + snapshotInfo, e);
    }
  }
View Full Code Here

    }

    Path regioninfo = new Path(regionDir, HRegion.REGIONINFO_FILE);
    FileSystem fs = regioninfo.getFileSystem(getConf());

    FSDataInputStream in = fs.open(regioninfo);
    HRegionInfo hri = new HRegionInfo();
    hri.readFields(in);
    in.close();
    LOG.debug("HRegionInfo read: " + hri.toString());
    hbi.hdfsEntry.hri = hri;
  }
View Full Code Here

    Path regionInfo = new Path(regionDir, HRegion.REGIONINFO_FILE);
    // make sure the file exists
    if (!fs.exists(regionInfo)) {
      throw new CorruptedSnapshotException("No region info found for region:" + region, snapshot);
    }
    FSDataInputStream in = fs.open(regionInfo);
    HRegionInfo found = new HRegionInfo();
    try {
      found.readFields(in);
      if (!region.equals(found)) {
        throw new CorruptedSnapshotException("Found region info (" + found
           + ") doesn't match expected region:" + region, snapshot);
      }
    } finally {
      in.close();
    }

    // make sure we have the expected recovered edits files
    TakeSnapshotUtils.verifyRecoveredEdits(fs, snapshotDir, found, snapshot);
View Full Code Here

  public static String getVersion(FileSystem fs, Path rootdir)
  throws IOException {
    Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
    String version = null;
    if (fs.exists(versionFile)) {
      FSDataInputStream s =
        fs.open(versionFile);
      try {
        version = DataInputStream.readUTF(s);
      } catch (EOFException eof) {
        LOG.warn("Version file was empty, odd, will try to set it.");
      } finally {
        s.close();
      }
    }
    return version;
  }
View Full Code Here

  public static String getClusterId(FileSystem fs, Path rootdir)
      throws IOException {
    Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
    String clusterId = null;
    if (fs.exists(idPath)) {
      FSDataInputStream in = fs.open(idPath);
      try {
        clusterId = in.readUTF();
      } catch (EOFException eof) {
        LOG.warn("Cluster ID file "+idPath.toString()+" was empty");
      } finally{
        in.close();
      }
    } else {
      LOG.warn("Cluster ID file does not exist at " + idPath.toString());
    }
    return clusterId;
View Full Code Here

      // if the snapshot is bad
      if (!fs.exists(info)) {
        LOG.error("Snapshot information for " + snapshot.getPath() + " doesn't exist");
        continue;
      }
      FSDataInputStream in = null;
      try {
        in = fs.open(info);
        SnapshotDescription desc = SnapshotDescription.parseFrom(in);
        snapshotDescs.add(desc);
      } catch (IOException e) {
        LOG.warn("Found a corrupted snapshot " + snapshot.getPath(), e);
      } finally {
        if (in != null) {
          in.close();
        }
      }
    }
    return snapshotDescs;
  }
View Full Code Here

    public Reader(FileSystem fs, Path path, HFileLink hfileLink, long size,
        CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache,
        boolean closeIStream) throws IOException {
      super(path);

      FSDataInputStream in = hfileLink.open(fs);
      FSDataInputStream inNoChecksum = in;
      if (fs instanceof HFileSystem) {
        FileSystem noChecksumFs = ((HFileSystem)fs).getNoChecksumFs();
        inNoChecksum = hfileLink.open(noChecksumFs);
      }
View Full Code Here

   */
  public static HRegionInfo loadDotRegionInfoFileContent(final FileSystem fs, final Path dir)
  throws IOException {
    Path regioninfo = new Path(dir, HRegion.REGIONINFO_FILE);
    if (!fs.exists(regioninfo)) throw new FileNotFoundException(regioninfo.toString());
    FSDataInputStream in = fs.open(regioninfo);
    try {
      HRegionInfo hri = new HRegionInfo();
      hri.readFields(in);
      return hri;
    } finally {
      in.close();
    }
  }
View Full Code Here

          os.write(block);
          totalSize += block.length;
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV1(is, algo,
            totalSize);
        HFileBlock b;
        int numBlocksRead = 0;
        long pos = 0;
        while (pos < totalSize) {
          b = hbr.readBlockData(pos, block.length, uncompressedSizeV1, pread);
          b.sanityCheck();
          pos += block.length;
          numBlocksRead++;
        }
        assertEquals(numBlocks, numBlocksRead);
        is.close();
      }
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FSDataInputStream$Buffer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.