Examples of FSDataInputStream


Examples of org.apache.hadoop.fs.FSDataInputStream

    public Reader(FileSystem fs, Path path, HFileLink hfileLink, long size,
        CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache,
        boolean closeIStream) throws IOException {
      super(path);

      FSDataInputStream in = hfileLink.open(fs);
      FSDataInputStream inNoChecksum = in;
      if (fs instanceof HFileSystem) {
        FileSystem noChecksumFs = ((HFileSystem)fs).getNoChecksumFs();
        inNoChecksum = hfileLink.open(noChecksumFs);
      }
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataInputStream

   */
  public static HRegionInfo loadDotRegionInfoFileContent(final FileSystem fs, final Path dir)
  throws IOException {
    Path regioninfo = new Path(dir, HRegion.REGIONINFO_FILE);
    if (!fs.exists(regioninfo)) throw new FileNotFoundException(regioninfo.toString());
    FSDataInputStream in = fs.open(regioninfo);
    try {
      HRegionInfo hri = new HRegionInfo();
      hri.readFields(in);
      return hri;
    } finally {
      in.close();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataInputStream

          os.write(block);
          totalSize += block.length;
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV1(is, algo,
            totalSize);
        HFileBlock b;
        int numBlocksRead = 0;
        long pos = 0;
        while (pos < totalSize) {
          b = hbr.readBlockData(pos, block.length, uncompressedSizeV1, pread);
          b.sanityCheck();
          pos += block.length;
          numBlocksRead++;
        }
        assertEquals(numBlocks, numBlocksRead);
        is.close();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataInputStream

          hbw.writeHeaderAndData(os);
          totalSize += hbw.getOnDiskSizeWithHeader();
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, algo,
            totalSize);
        HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
        is.close();
        assertEquals(0, HFile.getChecksumFailuresCount());

        b.sanityCheck();
        assertEquals(4936, b.getUncompressedSizeWithoutHeader());
        assertEquals(algo == GZ ? 2173 : 4936,
                     b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
        String blockStr = b.toString();

        if (algo == GZ) {
          is = fs.open(path);
          hbr = new HFileBlock.FSReaderV2(is, algo, totalSize);
          b = hbr.readBlockData(0, 2173 + HFileBlock.HEADER_SIZE_WITH_CHECKSUMS +
                                b.totalChecksumBytes(), -1, pread);
          assertEquals(blockStr, b.toString());
          int wrongCompressedSize = 2172;
          try {
            b = hbr.readBlockData(0, wrongCompressedSize
                + HFileBlock.HEADER_SIZE_WITH_CHECKSUMS, -1, pread);
            fail("Exception expected");
          } catch (IOException ex) {
            String expectedPrefix = "On-disk size without header provided is "
                + wrongCompressedSize + ", but block header contains "
                + b.getOnDiskSizeWithoutHeader() + ".";
            assertTrue("Invalid exception message: '" + ex.getMessage()
                + "'.\nMessage is expected to start with: '" + expectedPrefix
                + "'", ex.getMessage().startsWith(expectedPrefix));
          }
          is.close();
        }
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataInputStream

            hbw.writeHeaderAndData(os);
            totalSize += hbw.getOnDiskSizeWithHeader();
          }
          os.close();

          FSDataInputStream is = fs.open(path);
          HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, algo,
              totalSize);
          hbr.setDataBlockEncoder(dataBlockEncoder);
          hbr.setIncludesMemstoreTS(includesMemstoreTS);

          HFileBlock b;
          int pos = 0;
          for (int blockId = 0; blockId < numBlocks; ++blockId) {
            b = hbr.readBlockData(pos, -1, -1, pread);
            assertEquals(0, HFile.getChecksumFailuresCount());
            b.sanityCheck();
            pos += b.getOnDiskSizeWithHeader();

            assertEquals((int) encodedSizes.get(blockId),
                b.getUncompressedSizeWithoutHeader());
            ByteBuffer actualBuffer = b.getBufferWithoutHeader();
            if (encoding != DataBlockEncoding.NONE) {
              // We expect a two-byte big-endian encoding id.
              assertEquals(0, actualBuffer.get(0));
              assertEquals(encoding.getId(), actualBuffer.get(1));
              actualBuffer.position(2);
              actualBuffer = actualBuffer.slice();
            }

            ByteBuffer expectedBuffer = encodedBlocks.get(blockId);
            expectedBuffer.rewind();

            // test if content matches, produce nice message
            assertBuffersEqual(expectedBuffer, actualBuffer, algo, encoding,
                pread);
          }
          is.close();
        }
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataInputStream

          List<ByteBuffer> expectedContents = cacheOnWrite
              ? new ArrayList<ByteBuffer>() : null;
          long totalSize = writeBlocks(rand, algo, path, expectedOffsets,
              expectedPrevOffsets, expectedTypes, expectedContents);

          FSDataInputStream is = fs.open(path);
          HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, algo,
              totalSize);
          long curOffset = 0;
          for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
            if (!pread) {
              assertEquals(is.getPos(), curOffset + (i == 0 ? 0 :
                  HFileBlock.HEADER_SIZE_WITH_CHECKSUMS));
            }

            assertEquals(expectedOffsets.get(i).longValue(), curOffset);
            if (detailedLogging) {
              LOG.info("Reading block #" + i + " at offset " + curOffset);
            }
            HFileBlock b = hbr.readBlockData(curOffset, -1, -1, pread);
            if (detailedLogging) {
              LOG.info("Block #" + i + ": " + b);
            }
            assertEquals("Invalid block #" + i + "'s type:",
                expectedTypes.get(i), b.getBlockType());
            assertEquals("Invalid previous block offset for block " + i
                + " of " + "type " + b.getBlockType() + ":",
                (long) expectedPrevOffsets.get(i), b.getPrevBlockOffset());
            b.sanityCheck();
            assertEquals(curOffset, b.getOffset());

            // Now re-load this block knowing the on-disk size. This tests a
            // different branch in the loader.
            HFileBlock b2 = hbr.readBlockData(curOffset,
                b.getOnDiskSizeWithHeader(), -1, pread);
            b2.sanityCheck();

            assertEquals(b.getBlockType(), b2.getBlockType());
            assertEquals(b.getOnDiskSizeWithoutHeader(),
                b2.getOnDiskSizeWithoutHeader());
            assertEquals(b.getOnDiskSizeWithHeader(),
                b2.getOnDiskSizeWithHeader());
            assertEquals(b.getUncompressedSizeWithoutHeader(),
                b2.getUncompressedSizeWithoutHeader());
            assertEquals(b.getPrevBlockOffset(), b2.getPrevBlockOffset());
            assertEquals(curOffset, b2.getOffset());
            assertEquals(b.getBytesPerChecksum(), b2.getBytesPerChecksum());
            assertEquals(b.getOnDiskDataSizeWithHeader(),
                         b2.getOnDiskDataSizeWithHeader());
            assertEquals(0, HFile.getChecksumFailuresCount());

            curOffset += b.getOnDiskSizeWithHeader();

            if (cacheOnWrite) {
              // In the cache-on-write mode we store uncompressed bytes so we
              // can compare them to what was read by the block reader.
              // b's buffer has header + data + checksum while
              // expectedContents have header + data only
              ByteBuffer bufRead = b.getBufferWithHeader();
              ByteBuffer bufExpected = expectedContents.get(i);
              boolean bytesAreCorrect = Bytes.compareTo(bufRead.array(),
                  bufRead.arrayOffset(),
                  bufRead.limit() - b.totalChecksumBytes(),
                  bufExpected.array(), bufExpected.arrayOffset(),
                  bufExpected.limit()) == 0;
              String wrongBytesMsg = "";

              if (!bytesAreCorrect) {
                // Optimization: only construct an error message in case we
                // will need it.
                wrongBytesMsg = "Expected bytes in block #" + i + " (algo="
                    + algo + ", pread=" + pread
                    + ", cacheOnWrite=" + cacheOnWrite + "):\n";
                wrongBytesMsg += Bytes.toStringBinary(bufExpected.array(),
                    bufExpected.arrayOffset(), Math.min(32,
                        bufExpected.limit()))
                    + ", actual:\n"
                    + Bytes.toStringBinary(bufRead.array(),
                        bufRead.arrayOffset(), Math.min(32, bufRead.limit()));
                if (detailedLogging) {
                  LOG.warn("expected header" +
                           HFileBlock.toStringHeader(bufExpected) +
                           "\nfound    header" +
                           HFileBlock.toStringHeader(bufRead));
                  LOG.warn("bufread offset " + bufRead.arrayOffset() +
                           " limit " + bufRead.limit() +
                           " expected offset " + bufExpected.arrayOffset() +
                           " limit " + bufExpected.limit());
                  LOG.warn(wrongBytesMsg);
                }
              }
              assertTrue(wrongBytesMsg, bytesAreCorrect);
            }
          }

          assertEquals(curOffset, fs.getFileStatus(path).getLen());
          is.close();
        }
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataInputStream

          new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
      Random rand = defaultRandom();
      List<Long> offsets = new ArrayList<Long>();
      List<BlockType> types = new ArrayList<BlockType>();
      writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
      FSDataInputStream is = fs.open(path);
      long fileSize = fs.getFileStatus(path).getLen();
      HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, compressAlgo,
          fileSize);

      Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
      ExecutorCompletionService<Boolean> ecs =
          new ExecutorCompletionService<Boolean>(exec);

      for (int i = 0; i < NUM_READER_THREADS; ++i) {
        ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr,
            offsets, types, fileSize));
      }

      for (int i = 0; i < NUM_READER_THREADS; ++i) {
        Future<Boolean> result = ecs.take();
        assertTrue(result.get());
        if (detailedLogging) {
          LOG.info(String.valueOf(i + 1)
            + " reader threads finished successfully (algo=" + compressAlgo
            + ")");
        }
      }

      is.close();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataInputStream

          os.write(block);
          totalSize += block.length;
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV1(is, algo,
            totalSize);
        HFileBlock b;
        int numBlocksRead = 0;
        long pos = 0;
        while (pos < totalSize) {
          b = hbr.readBlockData(pos, block.length, uncompressedSizeV1, pread);
          b.sanityCheck();
          pos += block.length;
          numBlocksRead++;
        }
        assertEquals(numBlocks, numBlocksRead);
        is.close();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataInputStream

          hbw.writeHeaderAndData(os);
          totalSize += hbw.getOnDiskSizeWithHeader();
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, is, algo,
            totalSize, MINOR_VERSION, fs, path);
        HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
        is.close();

        b.sanityCheck();
        assertEquals(4936, b.getUncompressedSizeWithoutHeader());
        assertEquals(algo == GZ ? 2173 : 4936,
                     b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
        String blockStr = b.toString();

        if (algo == GZ) {
          is = fs.open(path);
          hbr = new HFileBlock.FSReaderV2(is, is, algo, totalSize, MINOR_VERSION,
                                          fs, path);
          b = hbr.readBlockData(0, 2173 + HFileBlock.HEADER_SIZE_NO_CHECKSUM +
                                b.totalChecksumBytes(), -1, pread);
          assertEquals(blockStr, b.toString());
          int wrongCompressedSize = 2172;
          try {
            b = hbr.readBlockData(0, wrongCompressedSize
                + HFileBlock.HEADER_SIZE_NO_CHECKSUM, -1, pread);
            fail("Exception expected");
          } catch (IOException ex) {
            String expectedPrefix = "On-disk size without header provided is "
                + wrongCompressedSize + ", but block header contains "
                + b.getOnDiskSizeWithoutHeader() + ".";
            assertTrue("Invalid exception message: '" + ex.getMessage()
                + "'.\nMessage is expected to start with: '" + expectedPrefix
                + "'", ex.getMessage().startsWith(expectedPrefix));
          }
          is.close();
        }
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataInputStream

            hbw.writeHeaderAndData(os);
            totalSize += hbw.getOnDiskSizeWithHeader();
          }
          os.close();

          FSDataInputStream is = fs.open(path);
          HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, is, algo,
              totalSize, MINOR_VERSION, fs, path);
          hbr.setDataBlockEncoder(dataBlockEncoder);
          hbr.setIncludesMemstoreTS(includesMemstoreTS);

          HFileBlock b;
          int pos = 0;
          for (int blockId = 0; blockId < numBlocks; ++blockId) {
            b = hbr.readBlockData(pos, -1, -1, pread);
            b.sanityCheck();
            pos += b.getOnDiskSizeWithHeader();

            assertEquals((int) encodedSizes.get(blockId),
                b.getUncompressedSizeWithoutHeader());
            ByteBuffer actualBuffer = b.getBufferWithoutHeader();
            if (encoding != DataBlockEncoding.NONE) {
              // We expect a two-byte big-endian encoding id.
              assertEquals(0, actualBuffer.get(0));
              assertEquals(encoding.getId(), actualBuffer.get(1));
              actualBuffer.position(2);
              actualBuffer = actualBuffer.slice();
            }

            ByteBuffer expectedBuffer = encodedBlocks.get(blockId);
            expectedBuffer.rewind();

            // test if content matches, produce nice message
            TestHFileBlock.assertBuffersEqual(expectedBuffer, actualBuffer, algo, encoding,
                pread);
          }
          is.close();
        }
      }
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.