Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataOutputStream$Buffer


  private long writeBlocks(Random rand, Compression.Algorithm compressAlgo,
      Path path, List<Long> expectedOffsets, List<Long> expectedPrevOffsets,
      List<BlockType> expectedTypes, List<ByteBuffer> expectedContents
  ) throws IOException {
    boolean cacheOnWrite = expectedContents != null;
    FSDataOutputStream os = fs.create(path);
    HFileBlock.Writer hbw = new HFileBlock.Writer(compressAlgo, null,
        includesMemstoreTS,
        HFileReaderV2.MAX_MINOR_VERSION,
        HFile.DEFAULT_CHECKSUM_TYPE,
        HFile.DEFAULT_BYTES_PER_CHECKSUM);
    Map<BlockType, Long> prevOffsetByType = new HashMap<BlockType, Long>();
    long totalSize = 0;
    for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
      long pos = os.getPos();
      int blockTypeOrdinal = rand.nextInt(BlockType.values().length);
      if (blockTypeOrdinal == BlockType.ENCODED_DATA.ordinal()) {
        blockTypeOrdinal = BlockType.DATA.ordinal();
      }
      BlockType bt = BlockType.values()[blockTypeOrdinal];
      DataOutputStream dos = hbw.startWriting(bt);
      int size = rand.nextInt(500);
      for (int j = 0; j < size; ++j) {
        // This might compress well.
        dos.writeShort(i + 1);
        dos.writeInt(j + 1);
      }

      if (expectedOffsets != null)
        expectedOffsets.add(os.getPos());

      if (expectedPrevOffsets != null) {
        Long prevOffset = prevOffsetByType.get(bt);
        expectedPrevOffsets.add(prevOffset != null ? prevOffset : -1);
        prevOffsetByType.put(bt, os.getPos());
      }

      expectedTypes.add(bt);

      hbw.writeHeaderAndData(os);
      totalSize += hbw.getOnDiskSizeWithHeader();

      if (cacheOnWrite)
        expectedContents.add(hbw.getUncompressedBufferWithHeader());

      if (detailedLogging) {
        LOG.info("Written block #" + i + " of type " + bt
            + ", uncompressed size " + hbw.getUncompressedSizeWithoutHeader()
            + " at offset " + pos);
      }
    }
    os.close();
    LOG.info("Created a temporary file at " + path + ", "
        + fs.getFileStatus(path).getLen() + " byte, compression=" +
        compressAlgo);
    return totalSize;
  }
View Full Code Here


      for (boolean pread : new boolean[] { false, true }) {
        byte[] block = createTestV1Block(algo);
        Path path = new Path(TEST_UTIL.getDataTestDir(),
          "blocks_v1_"+ algo);
        LOG.info("Creating temporary file at " + path);
        FSDataOutputStream os = fs.create(path);
        int totalSize = 0;
        int numBlocks = 50;
        for (int i = 0; i < numBlocks; ++i) {
          os.write(block);
          totalSize += block.length;
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV1(is, algo,
            totalSize);
        HFileBlock b;
View Full Code Here

      for (boolean pread : new boolean[] { false, true }) {
          LOG.info("testReaderV2: Compression algorithm: " + algo +
                   ", pread=" + pread);
        Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
            + algo);
        FSDataOutputStream os = fs.create(path);
        Writer hbw = new Writer(algo, null,
            includesMemstoreTS);
        long totalSize = 0;
        for (int blockId = 0; blockId < 2; ++blockId) {
          DataOutputStream dos = hbw.startWriting(BlockType.DATA);
          for (int i = 0; i < 1234; ++i)
            dos.writeInt(i);
          hbw.writeHeaderAndData(os);
          totalSize += hbw.getOnDiskSizeWithHeader();
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, is, algo,
            totalSize, MINOR_VERSION, fs, path);
        HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
View Full Code Here

          LOG.info("testDataBlockEncoding algo " + algo +
                   " pread = " + pread +
                   " encoding " + encoding);
          Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
              + algo + "_" + encoding.toString());
          FSDataOutputStream os = fs.create(path);
          HFileDataBlockEncoder dataBlockEncoder =
              new HFileDataBlockEncoderImpl(encoding);
          Writer hbw = new Writer(algo, dataBlockEncoder,
              includesMemstoreTS);
          long totalSize = 0;
          final List<Integer> encodedSizes = new ArrayList<Integer>();
          final List<ByteBuffer> encodedBlocks = new ArrayList<ByteBuffer>();
          for (int blockId = 0; blockId < numBlocks; ++blockId) {
            DataOutputStream dos = hbw.startWriting(BlockType.DATA);
            TestHFileBlock.writeEncodedBlock(encoding, dos, encodedSizes, encodedBlocks,
                blockId, includesMemstoreTS);

            hbw.writeHeaderAndData(os);
            totalSize += hbw.getOnDiskSizeWithHeader();
          }
          os.close();

          FSDataInputStream is = fs.open(path);
          HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, is, algo,
              totalSize, MINOR_VERSION, fs, path);
          hbr.setDataBlockEncoder(dataBlockEncoder);
View Full Code Here

      Path regionEditsDir = HLog.getRegionDirRecoveredEditsDir(new Path(tableDir, region));
      long seqId = System.currentTimeMillis();
      for (int i = 0; i < 3; ++i) {
        String editName = String.format("%019d", seqId + i);
        recoverEdits.add(editName);
        FSDataOutputStream stream = fs.create(new Path(regionEditsDir, editName));
        stream.write(Bytes.toBytes("test"));
        stream.close();
      }
    }
  }
View Full Code Here

      Path serverLogDir = new Path(logDir, server);
      fs.mkdirs(serverLogDir);
      for (int i = 0; i < 5; ++i) {
        String logfile = server + '.' + i;
        logs.add(logfile);
        FSDataOutputStream stream = fs.create(new Path(serverLogDir, logfile));
        stream.write(Bytes.toBytes("test"));
        stream.close();
      }
    }
  }
View Full Code Here

    byte[] data = new byte[4096];
    for (int i = 0; i < data.length; i++) {
      data[i] = v;
    }

    FSDataOutputStream stream = fs.create(path);
    try {
      long written = 0;
      while (written < size) {
        stream.write(data, 0, data.length);
        written += data.length;
      }
    } finally {
      stream.close();
    }
  }
View Full Code Here

      }
      HLog.Writer nextWriter = this.createWriterInstance(fs, newPath, conf);
      // Can we get at the dfsclient outputstream?  If an instance of
      // SFLW, it'll have done the necessary reflection to get at the
      // protected field name.
      FSDataOutputStream nextHdfsOut = null;
      if (nextWriter instanceof SequenceFileLogWriter) {
        nextHdfsOut = ((SequenceFileLogWriter)nextWriter).getWriterFSDataOutputStream();
      }
      // Tell our listeners that a new log was created
      if (!this.listeners.isEmpty()) {
View Full Code Here

    // Hence delete and create the file if exists.
    if (FSUtils.isExists(fs, tmpPath)) {
      FSUtils.delete(fs, tmpPath, true);
    }
   
    FSDataOutputStream out = FSUtils.create(fs, tmpPath, perms);

    try {
      this.regionInfo.write(out);
      out.write('\n');
      out.write('\n');
      out.write(Bytes.toBytes(this.regionInfo.toString()));
    } finally {
      out.close();
    }
    if (!fs.rename(tmpPath, regioninfoPath)) {
      throw new IOException("Unable to rename " + tmpPath + " to " +
        regioninfoPath);
    }
View Full Code Here

    }
    // Trying recovery
    boolean recovered = false;
    while (!recovered) {
      try {
        FSDataOutputStream out = fs.append(p);
        out.close();
        recovered = true;
      } catch (IOException e) {
        LOG.info("Failed open for append, waiting on lease recovery: " + p, e);
        try {
          Thread.sleep(1000);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FSDataOutputStream$Buffer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.