Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataOutputStream$Buffer


      }
      HLog.Writer nextWriter = this.createWriterInstance(fs, newPath, conf);
      // Can we get at the dfsclient outputstream?  If an instance of
      // SFLW, it'll have done the necessary reflection to get at the
      // protected field name.
      FSDataOutputStream nextHdfsOut = null;
      if (nextWriter instanceof SequenceFileLogWriter) {
        nextHdfsOut = ((SequenceFileLogWriter)nextWriter).getWriterFSDataOutputStream();
      }

      synchronized (updateLock) {
View Full Code Here


        context.getCounter(Counter.BYTES_EXPECTED).increment(inputStat.getLen());

        // Ensure that the output folder is there and copy the file
        outputFs.mkdirs(outputPath.getParent());
        FSDataOutputStream out = outputFs.create(outputPath, true);
        try {
          if (!copyData(context, inputPath, in, outputPath, out, inputStat.getLen()))
            return false;
        } finally {
          out.close();
        }

        // Preserve attributes
        return preserveAttributes(outputPath, inputStat);
      } finally {
View Full Code Here

      throws IOException {
    FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(),
      HConstants.DATA_FILE_UMASK_KEY);
    Path snapshotInfo = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
    try {
      FSDataOutputStream out = FSUtils.create(fs, snapshotInfo, perms, true);
      try {
        snapshot.writeTo(out);
      } finally {
        out.close();
      }
    } catch (IOException e) {
      // if we get an exception, try to remove the snapshot info
      if (!fs.delete(snapshotInfo, false)) {
        String msg = "Couldn't delete snapshot info file: " + snapshotInfo;
View Full Code Here

    Path dstPath = new Path(filePath);
    FileSystem hdfs = dstPath.getFileSystem(conf);

    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
            (dstPath)) {
      FSDataOutputStream outStream = hdfs.append(dstPath);
      writer = SequenceFile.createWriter(conf, outStream, fmt.getKeyClass(),
          fmt.getValueClass(), compType, codeC);
    } else {
      writer = SequenceFile.createWriter(hdfs, conf, dstPath,
          fmt.getKeyClass(), fmt.getValueClass(), compType, codeC);
View Full Code Here

  public static void setVersion(FileSystem fs, Path rootdir, String version,
      int wait, int retries) throws IOException {
    Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
    while (true) {
      try {
        FSDataOutputStream s = fs.create(versionFile);
        s.writeUTF(version);
        LOG.debug("Created version file at " + rootdir.toString() +
            " set its version at:" + version);
        s.close();
        return;
      } catch (IOException e) {
        if (retries > 0) {
          LOG.warn("Unable to create version file at " + rootdir.toString() +
              ", retrying: " + e.getMessage());
View Full Code Here

  public static void setClusterId(FileSystem fs, Path rootdir, String clusterId,
      int wait) throws IOException {
    while (true) {
      try {
        Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
        FSDataOutputStream s = fs.create(filePath);
        s.writeUTF(clusterId);
        s.close();
        if (LOG.isDebugEnabled()) {
          LOG.debug("Created cluster ID file at " + filePath.toString() +
              " with ID: " + clusterId);
        }
        return;
View Full Code Here

    // Hence delete and create the file if exists.
    if (FSUtils.isExists(fs, tmpPath)) {
      FSUtils.delete(fs, tmpPath, true);
    }

    FSDataOutputStream out = FSUtils.create(fs, tmpPath, perms);

    try {
      regionInfo.write(out);
      out.write('\n');
      out.write('\n');
      out.write(Bytes.toBytes(regionInfo.toString()));
    } finally {
      out.close();
    }
    if (!fs.rename(tmpPath, regioninfoPath)) {
      throw new IOException("Unable to rename " + tmpPath + " to " +
        regioninfoPath);
    }
View Full Code Here

      for (boolean pread : new boolean[] { false, true }) {
        byte[] block = createTestV1Block(algo);
        Path path = new Path(TEST_UTIL.getDataTestDir(),
          "blocks_v1_"+ algo);
        LOG.info("Creating temporary file at " + path);
        FSDataOutputStream os = fs.create(path);
        int totalSize = 0;
        int numBlocks = 50;
        for (int i = 0; i < numBlocks; ++i) {
          os.write(block);
          totalSize += block.length;
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV1(is, algo,
            totalSize);
        HFileBlock b;
View Full Code Here

      for (boolean pread : new boolean[] { false, true }) {
          LOG.info("testReaderV2: Compression algorithm: " + algo +
                   ", pread=" + pread);
        Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
            + algo);
        FSDataOutputStream os = fs.create(path);
        HFileBlock.Writer hbw = new HFileBlock.Writer(algo, null,
            includesMemstoreTS,
            HFileReaderV2.MAX_MINOR_VERSION,
            HFile.DEFAULT_CHECKSUM_TYPE,
            HFile.DEFAULT_BYTES_PER_CHECKSUM);
        long totalSize = 0;
        for (int blockId = 0; blockId < 2; ++blockId) {
          DataOutputStream dos = hbw.startWriting(BlockType.DATA);
          for (int i = 0; i < 1234; ++i)
            dos.writeInt(i);
          hbw.writeHeaderAndData(os);
          totalSize += hbw.getOnDiskSizeWithHeader();
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, algo,
            totalSize);
        HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
View Full Code Here

    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
      for (boolean pread : new boolean[] { false, true }) {
        for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
          Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
              + algo + "_" + encoding.toString());
          FSDataOutputStream os = fs.create(path);
          HFileDataBlockEncoder dataBlockEncoder =
              new HFileDataBlockEncoderImpl(encoding);
          HFileBlock.Writer hbw = new HFileBlock.Writer(algo, dataBlockEncoder,
              includesMemstoreTS,
              HFileReaderV2.MAX_MINOR_VERSION,
              HFile.DEFAULT_CHECKSUM_TYPE,
              HFile.DEFAULT_BYTES_PER_CHECKSUM);
          long totalSize = 0;
          final List<Integer> encodedSizes = new ArrayList<Integer>();
          final List<ByteBuffer> encodedBlocks = new ArrayList<ByteBuffer>();
          for (int blockId = 0; blockId < numBlocks; ++blockId) {
            DataOutputStream dos = hbw.startWriting(BlockType.DATA);
            writeEncodedBlock(encoding, dos, encodedSizes, encodedBlocks,
                blockId, includesMemstoreTS);

            hbw.writeHeaderAndData(os);
            totalSize += hbw.getOnDiskSizeWithHeader();
          }
          os.close();

          FSDataInputStream is = fs.open(path);
          HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, algo,
              totalSize);
          hbr.setDataBlockEncoder(dataBlockEncoder);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FSDataOutputStream$Buffer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.