Examples of hsync()


Examples of org.apache.hadoop.fs.FSDataOutputStream.hsync()

      deleteThread.start();

      try {
        // write data and syn to make sure a block is allocated.
        out.write(new byte[32], 0, 32);
        out.hsync();
        Assert.fail("Should have failed.");
      } catch (FileNotFoundException e) {
        GenericTestUtils.assertExceptionContains(filePath.getName(), e);
      }
    } finally {
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hsync()

      secondary = startSecondaryNameNode(conf);
      fos = fs.create(new Path("tmpfile0"));
      fos.write(new byte[] { 0, 1, 2, 3 });
      secondary.doCheckpoint();
      fos.write(new byte[] { 0, 1, 2, 3 });
      fos.hsync();

      // Cause merge to fail in next checkpoint.
      Mockito.doThrow(new IOException(
          "Injecting failure during merge"))
          .when(faultInjector).duringMerge();
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hsync()

      }
      Mockito.reset(faultInjector);
      // The error must be recorded, so next checkpoint will reload image.
      fos.write(new byte[] { 0, 1, 2, 3 });
      fos.hsync();
     
      assertTrue("Another checkpoint should have reloaded image",
          secondary.doCheckpoint());
    } finally {
      if (fs != null) {
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hsync()

      secondary = startSecondaryNameNode(conf);
      fos = fs.create(new Path("tmpfile0"));
      fos.write(new byte[] { 0, 1, 2, 3 });
      secondary.doCheckpoint();
      fos.write(new byte[] { 0, 1, 2, 3 });
      fos.hsync();

      // Cause merge to fail in next checkpoint.
      Mockito.doThrow(new IOException(
          "Injecting failure after MD5Rename"))
          .when(faultInjector).afterMD5Rename();
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hsync()

    int count = 0;
    while (count < 2 * 1024 * 1024) {
      out.writeBytes("Data");
      count += 4;
    }
    out.hsync();
    // abort the original stream
    ((DFSOutputStream) out.getWrappedStream()).abort();

    LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
        file.toString(), 0, count);
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hsync()

      DistributedFileSystem dfs = cluster
          .getFileSystem();
      FSDataOutputStream out = dfs.create(path);
      int fileLength = 1030;
      out.write(new byte[fileLength]);
      out.hsync();
      cluster.restartNameNode();
      cluster.waitActive();
      in = (HdfsDataInputStream) dfs.open(path, 1024);
      // Verify the length when we just restart NN. DNs will register
      // immediately.
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hsync()

      DistributedFileSystem fs = cluster.getFileSystem();

      Path testFile = new Path("/testFlushNanosMetric.txt");
      FSDataOutputStream fout = fs.create(testFile);
      fout.write(new byte[1]);
      fout.hsync();
      fout.close();
      List<DataNode> datanodes = cluster.getDataNodes();
      DataNode datanode = datanodes.get(0);
      MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
      // Expect two flushes, 1 for the flush that occurs after writing,
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hsync()

      final BlockManager bm = namesystem.getBlockManager();
      FileSystem fs = cluster.getFileSystem();
      Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
      FSDataOutputStream out = fs.create(p, (short) 2);
      out.writeBytes("HDFS-3119: " + p);
      out.hsync();
      fs.setReplication(p, (short) 1);
      out.close();
      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
      assertEquals("Expected only one live replica for the block", 1, bm
          .countNodes(block.getLocalBlock()).liveReplicas());
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hsync()

      final FSNamesystem namesystem = cluster.getNamesystem();
      FileSystem fs = cluster.getFileSystem();
      Path testPath = new Path(MiniDFSCluster.getBaseDirectory(), "foo1");
      out = fs.create(testPath, (short) 2);
      out.writeBytes("HDFS-3157: " + testPath);
      out.hsync();
      cluster.startDataNodes(conf, 1, true, null, null, null);
      String bpid = namesystem.getBlockPoolId();
      ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath);
      Block block = blk.getLocalBlock();
      DataNode dn = cluster.getDataNodes().get(0);
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hsync()

      cluster.waitClusterUp();
      DistributedFileSystem fs = cluster.getFileSystem();
      Path path = new Path("/test");
      FSDataOutputStream out = fs.create(path);
      out.writeBytes("data");
      out.hsync();
     
      List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs.open(path));
      final LocatedBlock block = blocks.get(0);
      final DataNode dataNode = cluster.getDataNodes().get(0);
     
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.