Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataInputStream


          os.write(block);
          totalSize += block.length;
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV1(is, algo,
            totalSize);
        HFileBlock b;
        int numBlocksRead = 0;
        long pos = 0;
        while (pos < totalSize) {
          b = hbr.readBlockData(pos, block.length, uncompressedSizeV1, pread);
          b.sanityCheck();
          pos += block.length;
          numBlocksRead++;
        }
        assertEquals(numBlocks, numBlocksRead);
        is.close();
      }
    }
  }
View Full Code Here


          hbw.writeHeaderAndData(os);
          totalSize += hbw.getOnDiskSizeWithHeader();
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, algo,
            totalSize);
        HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
        is.close();
        assertEquals(0, HFile.getChecksumFailuresCount());

        b.sanityCheck();
        assertEquals(4936, b.getUncompressedSizeWithoutHeader());
        assertEquals(algo == GZ ? 2173 : 4936,
                     b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
        String blockStr = b.toString();

        if (algo == GZ) {
          is = fs.open(path);
          hbr = new HFileBlock.FSReaderV2(is, algo, totalSize);
          b = hbr.readBlockData(0, 2173 + HFileBlock.HEADER_SIZE_WITH_CHECKSUMS +
                                b.totalChecksumBytes(), -1, pread);
          assertEquals(blockStr, b.toString());
          int wrongCompressedSize = 2172;
          try {
            b = hbr.readBlockData(0, wrongCompressedSize
                + HFileBlock.HEADER_SIZE_WITH_CHECKSUMS, -1, pread);
            fail("Exception expected");
          } catch (IOException ex) {
            String expectedPrefix = "On-disk size without header provided is "
                + wrongCompressedSize + ", but block header contains "
                + b.getOnDiskSizeWithoutHeader() + ".";
            assertTrue("Invalid exception message: '" + ex.getMessage()
                + "'.\nMessage is expected to start with: '" + expectedPrefix
                + "'", ex.getMessage().startsWith(expectedPrefix));
          }
          is.close();
        }
      }
    }
  }
View Full Code Here

            hbw.writeHeaderAndData(os);
            totalSize += hbw.getOnDiskSizeWithHeader();
          }
          os.close();

          FSDataInputStream is = fs.open(path);
          HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, algo,
              totalSize);
          hbr.setDataBlockEncoder(dataBlockEncoder);
          hbr.setIncludesMemstoreTS(includesMemstoreTS);

          HFileBlock b;
          int pos = 0;
          for (int blockId = 0; blockId < numBlocks; ++blockId) {
            b = hbr.readBlockData(pos, -1, -1, pread);
            assertEquals(0, HFile.getChecksumFailuresCount());
            b.sanityCheck();
            pos += b.getOnDiskSizeWithHeader();

            assertEquals((int) encodedSizes.get(blockId),
                b.getUncompressedSizeWithoutHeader());
            ByteBuffer actualBuffer = b.getBufferWithoutHeader();
            if (encoding != DataBlockEncoding.NONE) {
              // We expect a two-byte big-endian encoding id.
              assertEquals(0, actualBuffer.get(0));
              assertEquals(encoding.getId(), actualBuffer.get(1));
              actualBuffer.position(2);
              actualBuffer = actualBuffer.slice();
            }

            ByteBuffer expectedBuffer = encodedBlocks.get(blockId);
            expectedBuffer.rewind();

            // test if content matches, produce nice message
            assertBuffersEqual(expectedBuffer, actualBuffer, algo, encoding,
                pread);
          }
          is.close();
        }
      }
    }
  }
View Full Code Here

          List<ByteBuffer> expectedContents = cacheOnWrite
              ? new ArrayList<ByteBuffer>() : null;
          long totalSize = writeBlocks(rand, algo, path, expectedOffsets,
              expectedPrevOffsets, expectedTypes, expectedContents);

          FSDataInputStream is = fs.open(path);
          HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, algo,
              totalSize);
          long curOffset = 0;
          for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
            if (!pread) {
              assertEquals(is.getPos(), curOffset + (i == 0 ? 0 :
                  HFileBlock.HEADER_SIZE_WITH_CHECKSUMS));
            }

            assertEquals(expectedOffsets.get(i).longValue(), curOffset);
            if (detailedLogging) {
              LOG.info("Reading block #" + i + " at offset " + curOffset);
            }
            HFileBlock b = hbr.readBlockData(curOffset, -1, -1, pread);
            if (detailedLogging) {
              LOG.info("Block #" + i + ": " + b);
            }
            assertEquals("Invalid block #" + i + "'s type:",
                expectedTypes.get(i), b.getBlockType());
            assertEquals("Invalid previous block offset for block " + i
                + " of " + "type " + b.getBlockType() + ":",
                (long) expectedPrevOffsets.get(i), b.getPrevBlockOffset());
            b.sanityCheck();
            assertEquals(curOffset, b.getOffset());

            // Now re-load this block knowing the on-disk size. This tests a
            // different branch in the loader.
            HFileBlock b2 = hbr.readBlockData(curOffset,
                b.getOnDiskSizeWithHeader(), -1, pread);
            b2.sanityCheck();

            assertEquals(b.getBlockType(), b2.getBlockType());
            assertEquals(b.getOnDiskSizeWithoutHeader(),
                b2.getOnDiskSizeWithoutHeader());
            assertEquals(b.getOnDiskSizeWithHeader(),
                b2.getOnDiskSizeWithHeader());
            assertEquals(b.getUncompressedSizeWithoutHeader(),
                b2.getUncompressedSizeWithoutHeader());
            assertEquals(b.getPrevBlockOffset(), b2.getPrevBlockOffset());
            assertEquals(curOffset, b2.getOffset());
            assertEquals(b.getBytesPerChecksum(), b2.getBytesPerChecksum());
            assertEquals(b.getOnDiskDataSizeWithHeader(),
                         b2.getOnDiskDataSizeWithHeader());
            assertEquals(0, HFile.getChecksumFailuresCount());

            curOffset += b.getOnDiskSizeWithHeader();

            if (cacheOnWrite) {
              // In the cache-on-write mode we store uncompressed bytes so we
              // can compare them to what was read by the block reader.
              // b's buffer has header + data + checksum while
              // expectedContents have header + data only
              ByteBuffer bufRead = b.getBufferWithHeader();
              ByteBuffer bufExpected = expectedContents.get(i);
              boolean bytesAreCorrect = Bytes.compareTo(bufRead.array(),
                  bufRead.arrayOffset(),
                  bufRead.limit() - b.totalChecksumBytes(),
                  bufExpected.array(), bufExpected.arrayOffset(),
                  bufExpected.limit()) == 0;
              String wrongBytesMsg = "";

              if (!bytesAreCorrect) {
                // Optimization: only construct an error message in case we
                // will need it.
                wrongBytesMsg = "Expected bytes in block #" + i + " (algo="
                    + algo + ", pread=" + pread
                    + ", cacheOnWrite=" + cacheOnWrite + "):\n";
                wrongBytesMsg += Bytes.toStringBinary(bufExpected.array(),
                    bufExpected.arrayOffset(), Math.min(32,
                        bufExpected.limit()))
                    + ", actual:\n"
                    + Bytes.toStringBinary(bufRead.array(),
                        bufRead.arrayOffset(), Math.min(32, bufRead.limit()));
                if (detailedLogging) {
                  LOG.warn("expected header" +
                           HFileBlock.toStringHeader(bufExpected) +
                           "\nfound    header" +
                           HFileBlock.toStringHeader(bufRead));
                  LOG.warn("bufread offset " + bufRead.arrayOffset() +
                           " limit " + bufRead.limit() +
                           " expected offset " + bufExpected.arrayOffset() +
                           " limit " + bufExpected.limit());
                  LOG.warn(wrongBytesMsg);
                }
              }
              assertTrue(wrongBytesMsg, bytesAreCorrect);
            }
          }

          assertEquals(curOffset, fs.getFileStatus(path).getLen());
          is.close();
        }
      }
    }
  }
View Full Code Here

          new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
      Random rand = defaultRandom();
      List<Long> offsets = new ArrayList<Long>();
      List<BlockType> types = new ArrayList<BlockType>();
      writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
      FSDataInputStream is = fs.open(path);
      long fileSize = fs.getFileStatus(path).getLen();
      HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, compressAlgo,
          fileSize);

      Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
      ExecutorCompletionService<Boolean> ecs =
          new ExecutorCompletionService<Boolean>(exec);

      for (int i = 0; i < NUM_READER_THREADS; ++i) {
        ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr,
            offsets, types, fileSize));
      }

      for (int i = 0; i < NUM_READER_THREADS; ++i) {
        Future<Boolean> result = ecs.take();
        assertTrue(result.get());
        if (detailedLogging) {
          LOG.info(String.valueOf(i + 1)
            + " reader threads finished successfully (algo=" + compressAlgo
            + ")");
        }
      }

      is.close();
    }
  }
View Full Code Here

          os.write(block);
          totalSize += block.length;
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV1(is, algo,
            totalSize);
        HFileBlock b;
        int numBlocksRead = 0;
        long pos = 0;
        while (pos < totalSize) {
          b = hbr.readBlockData(pos, block.length, uncompressedSizeV1, pread);
          b.sanityCheck();
          pos += block.length;
          numBlocksRead++;
        }
        assertEquals(numBlocks, numBlocksRead);
        is.close();
      }
    }
  }
View Full Code Here

          hbw.writeHeaderAndData(os);
          totalSize += hbw.getOnDiskSizeWithHeader();
        }
        os.close();

        FSDataInputStream is = fs.open(path);
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, is, algo,
            totalSize, MINOR_VERSION, fs, path);
        HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
        is.close();

        b.sanityCheck();
        assertEquals(4936, b.getUncompressedSizeWithoutHeader());
        assertEquals(algo == GZ ? 2173 : 4936,
                     b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
        String blockStr = b.toString();

        if (algo == GZ) {
          is = fs.open(path);
          hbr = new HFileBlock.FSReaderV2(is, is, algo, totalSize, MINOR_VERSION,
                                          fs, path);
          b = hbr.readBlockData(0, 2173 + HFileBlock.HEADER_SIZE_NO_CHECKSUM +
                                b.totalChecksumBytes(), -1, pread);
          assertEquals(blockStr, b.toString());
          int wrongCompressedSize = 2172;
          try {
            b = hbr.readBlockData(0, wrongCompressedSize
                + HFileBlock.HEADER_SIZE_NO_CHECKSUM, -1, pread);
            fail("Exception expected");
          } catch (IOException ex) {
            String expectedPrefix = "On-disk size without header provided is "
                + wrongCompressedSize + ", but block header contains "
                + b.getOnDiskSizeWithoutHeader() + ".";
            assertTrue("Invalid exception message: '" + ex.getMessage()
                + "'.\nMessage is expected to start with: '" + expectedPrefix
                + "'", ex.getMessage().startsWith(expectedPrefix));
          }
          is.close();
        }
      }
    }
  }
View Full Code Here

            hbw.writeHeaderAndData(os);
            totalSize += hbw.getOnDiskSizeWithHeader();
          }
          os.close();

          FSDataInputStream is = fs.open(path);
          HFileBlock.FSReaderV2 hbr = new HFileBlock.FSReaderV2(is, is, algo,
              totalSize, MINOR_VERSION, fs, path);
          hbr.setDataBlockEncoder(dataBlockEncoder);
          hbr.setIncludesMemstoreTS(includesMemstoreTS);

          HFileBlock b;
          int pos = 0;
          for (int blockId = 0; blockId < numBlocks; ++blockId) {
            b = hbr.readBlockData(pos, -1, -1, pread);
            b.sanityCheck();
            pos += b.getOnDiskSizeWithHeader();

            assertEquals((int) encodedSizes.get(blockId),
                b.getUncompressedSizeWithoutHeader());
            ByteBuffer actualBuffer = b.getBufferWithoutHeader();
            if (encoding != DataBlockEncoding.NONE) {
              // We expect a two-byte big-endian encoding id.
              assertEquals(0, actualBuffer.get(0));
              assertEquals(encoding.getId(), actualBuffer.get(1));
              actualBuffer.position(2);
              actualBuffer = actualBuffer.slice();
            }

            ByteBuffer expectedBuffer = encodedBlocks.get(blockId);
            expectedBuffer.rewind();

            // test if content matches, produce nice message
            TestHFileBlock.assertBuffersEqual(expectedBuffer, actualBuffer, algo, encoding,
                pread);
          }
          is.close();
        }
      }
    }
  }
View Full Code Here

    List<Path> files = new ArrayList<Path>();
    files.add(originalPath);
    files.add(archivedPath);

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      long size = 0;

      // Read from origin
      int n = in.read(data);
      dataVerify(data, n, (byte)2);
      size += n;

      // Move origin to archive
      assertFalse(fs.exists(archivedPath));
      fs.rename(originalPath, archivedPath);
      assertFalse(fs.exists(originalPath));
      assertTrue(fs.exists(archivedPath));

      // Try to read to the end
      while ((n = in.read(data)) > 0) {
        dataVerify(data, n, (byte)2);
        size += n;
      }

      assertEquals(256 << 20, size);
    } finally {
      in.close();
      if (fs.exists(originalPath)) fs.delete(originalPath);
      if (fs.exists(archivedPath)) fs.delete(archivedPath);
    }
  }
View Full Code Here

        writeSomeData(fs, path, 1 << 20, (byte)i);
        files.add(path);
      }

      FileLink link = new FileLink(files);
      FSDataInputStream in = link.open(fs);
      try {
        byte[] data = new byte[8192];
        int n;

        // Switch to file 1
        n = in.read(data);
        dataVerify(data, n, (byte)0);
        fs.delete(files.get(0));
        skipBuffer(in, (byte)0);

        // Switch to file 2
        n = in.read(data);
        dataVerify(data, n, (byte)1);
        fs.delete(files.get(1));
        skipBuffer(in, (byte)1);

        // Switch to file 3
        n = in.read(data);
        dataVerify(data, n, (byte)2);
        fs.delete(files.get(2));
        skipBuffer(in, (byte)2);

        // No more files available
        try {
          n = in.read(data);
          assert(n <= 0);
        } catch (FileNotFoundException e) {
          assertTrue(true);
        }
      } finally {
        in.close();
      }
    } finally {
      testUtil.shutdownMiniCluster();
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FSDataInputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.