Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()


    // Start writing a file but do not close it
    FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)1, 512L);
    for (int i = 0; i < 1024; i++) {
      fout.write(123);
    }
    fout.hflush();

    // Now abandon the last block
    DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
    LocatedBlocks blocks =
      dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
View Full Code Here


    FSDataOutputStream out = fs.create(filePath, true, 4096, (short) 3, 512);

    // Write a block to all 3 DNs (2x256bytes).
    out.write(bytes);
    out.write(bytes);
    out.hflush();

    // Remove two DNs, to put them into the exclude list.
    DataNodeProperties two = cluster.stopDataNode(2);
    DataNodeProperties one = cluster.stopDataNode(1);
View Full Code Here

    // Write another block.
    // At this point, we have two nodes already in excluded list.
    out.write(bytes);
    out.write(bytes);
    out.hflush();

    // Bring back the older DNs, since they are gonna be forgiven only
    // afterwards of this previous block write.
    Assert.assertEquals(true, cluster.restartDataNode(one, true));
    Assert.assertEquals(true, cluster.restartDataNode(two, true));
View Full Code Here

    try {
      // Attempt writing another block, which should still pass
      // cause the previous two should have been forgiven by now,
      // while the last good DN added to excludes this time.
      out.write(bytes);
      out.hflush();
      out.close();
    } catch (Exception e) {
      fail("Excluded DataNodes should be forgiven after a while and " +
           "not cause file writing exception of: '" + e.getMessage() + "'");
    }
View Full Code Here

      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
          .format(true).build();
      FileSystem fs = cluster.getFileSystem();
      fos = fs.create(new Path("tmpfile"));
      fos.write(new byte[] { 0, 1, 2, 3 });
      fos.hflush();
      assertEquals(1, cluster.getNamesystem().getLeaseManager().countLease());

      secondary = startSecondaryNameNode(conf);
      assertEquals(0, secondary.getFSNamesystem().getLeaseManager().countLease());
View Full Code Here

        for (Path path : testPaths) {
          FSDataOutputStream out = cluster.getFileSystem().create(path, (short)2);
          streams.add(out);

          out.writeBytes("old gs data\n");
          out.hflush();
        }
       
       
        // Shutdown one of the nodes in the pipeline
        DataNodeProperties oldGenstampNode = cluster.stopDataNode(0);
View Full Code Here

        for (int i = 0; i < streams.size(); i++) {
          Path path = testPaths.get(i);
          FSDataOutputStream out = streams.get(i);

          out.writeBytes("new gs data\n");
          out.hflush();

          // Set replication so that only one node is necessary for this block,
          // and close it.
          cluster.getFileSystem().setReplication(path, (short)1);
          out.close();
View Full Code Here

    String filePath = "/hard-lease-recovery-test";
    byte[] bytes = "foo-bar-baz".getBytes();
    DFSClientAdapter.stopLeaseRenewer(filesystem);
    FSDataOutputStream leaseRecoveryPath = filesystem.create(new Path(filePath));
    leaseRecoveryPath.write(bytes);
    leaseRecoveryPath.hflush();
    // Set the hard lease timeout to 1 second.
    cluster.setLeasePeriod(60 * 1000, 1000, nnIndex);
    // wait for lease recovery to complete
    LocatedBlocks locatedBlocks;
    do {
View Full Code Here

    // Add new block should fail since /test1/file has a different fileId
    try {
      fos.write(data, 0, data.length);
      // make sure addBlock() request gets to NN immediately
      fos.hflush();

      fail("Write should fail after rename");
    } catch (Exception e) {
      /* Ignore */
    } finally {
View Full Code Here

    String filePath = "/hard-lease-recovery-test";
    byte[] bytes = "foo-bar-baz".getBytes();
    DFSClientAdapter.stopLeaseRenewer(dfs);
    FSDataOutputStream leaseRecoveryPath = dfs.create(new Path(filePath));
    leaseRecoveryPath.write(bytes);
    leaseRecoveryPath.hflush();
    // Set the hard lease timeout to 1 second.
    cluster.setLeasePeriod(60 * 1000, 1000);
    // wait for lease recovery to complete
    LocatedBlocks locatedBlocks;
    do {
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.