Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DistributedFileSystem.open()


            "waitReplication: " + e);
      } catch (TimeoutException e) {
        Assert.fail("unexpected TimeoutException during " +
            "waitReplication: " + e);
      }
      fsIn = fs.open(TEST_PATH);
      byte original[] = new byte[TEST_FILE_LENGTH];
      IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
      fsIn.close();
      fsIn = fs.open(TEST_PATH);
      testFallbackImpl(fsIn, original);
View Full Code Here


      }
      fsIn = fs.open(TEST_PATH);
      byte original[] = new byte[TEST_FILE_LENGTH];
      IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
      fsIn.close();
      fsIn = fs.open(TEST_PATH);
      testFallbackImpl(fsIn, original);
    } finally {
      if (fsIn != null) fsIn.close();
      if (fs != null) fs.close();
      if (cluster != null) cluster.shutdown();
View Full Code Here

    DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
    byte original[] = DFSTestUtil.
        calculateFileContentsFromSeed(RANDOM_SEED, TEST_FILE_LENGTH);

    // Prior to caching, the file can't be read via zero-copy
    FSDataInputStream fsIn = fs.open(TEST_PATH);
    try {
      result = fsIn.read(null, TEST_FILE_LENGTH / 2,
          EnumSet.noneOf(ReadOption.class));
      Assert.fail("expected UnsupportedOperationException");
    } catch (UnsupportedOperationException e) {
View Full Code Here

    }
    Assert.assertArrayEquals(Arrays.copyOfRange(original, 0,
        BLOCK_SIZE), byteBufferToArray(result));
    // Test that files opened after the cache operation has finished
    // still get the benefits of zero-copy (regression test for HDFS-6086)
    FSDataInputStream fsIn2 = fs.open(TEST_PATH);
    try {
      result2 = fsIn2.read(null, TEST_FILE_LENGTH,
          EnumSet.noneOf(ReadOption.class));
    } catch (UnsupportedOperationException e) {
      Assert.fail("expected to be able to read cached file via zero-copy");
View Full Code Here

      cluster.waitActive();
      fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, TEST_PATH,
          TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
      DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
      fsIn = fs.open(TEST_PATH);
      try {
        fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
        Assert.fail("expected zero-copy read to fail when client mmaps " +
            "were disabled.");
      } catch (UnsupportedOperationException e) {
View Full Code Here

      cluster.waitActive();
      fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, TEST_PATH,
          TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
      DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
      fsIn = fs.open(TEST_PATH);
      ByteBuffer buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      fsIn.releaseBuffer(buf);
      // Test EOF behavior
      IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1);
      buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
View Full Code Here

      cluster.waitActive();
      DistributedFileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short)1, 0xB);
      DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
     
      fsIn = fs.open(TEST_PATH);
      buf1 = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(1, buf1.remaining());
      fsIn.releaseBuffer(buf1);
      buf1 = null;
      fsIn.seek(2147483640L);
View Full Code Here

      final Path TEST_PATH2 = new Path("/b");
      conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 268435456L);
      DFSTestUtil.createFile(fs, TEST_PATH2, 1024 * 1024, TEST_FILE_LENGTH,
          268435456L, (short)1, 0xA);
     
      fsIn2 = fs.open(TEST_PATH2);
      fsIn2.seek(2147483640L);
      buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(8, buf2.remaining());
      Assert.assertEquals(2147483648L, fsIn2.getPos());
      fsIn2.releaseBuffer(buf2);
View Full Code Here

      Path path = new Path("/test");
      FSDataOutputStream out = fs.create(path);
      out.writeBytes("data");
      out.hsync();
     
      List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs.open(path));
      final LocatedBlock block = blocks.get(0);
      final DataNode dataNode = cluster.getDataNodes().get(0);
     
      final AtomicBoolean recoveryInitResult = new AtomicBoolean(true);
      Thread recoveryThread = new Thread() {
View Full Code Here

      harDirectory =
        harDirectory.substring(0, harDirectory.lastIndexOf(Path.SEPARATOR));
      Path indexFile = new Path(harDirectory + "/" + HarIndex.indexFileName);
      FileStatus indexStat = dfs.getFileStatus(indexFile);
      // Parses through the HAR index file.
      HarIndex harIndex = new HarIndex(dfs.open(indexFile), indexStat.getLen());

      String uriPath = partFile.toUri().getPath();
      int numBlocksFixed = 0;
      List<LocatedBlock> corrupt =
        RaidDFSUtil.corruptBlocksInFile(dfs, uriPath, 0, partFileSize);
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.