Examples of hflush()


Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

    AppendTestUtil.LOG.info("size=" + size);
    stm.write(buffer, 0, size);

    // hflush file
    AppendTestUtil.LOG.info("hflush");
    stm.hflush();

    if (triggerLeaseRenewerInterrupt) {
      AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
      dfs.dfs.getLeaseRenewer().interruptAndJoin();
    }
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

    AppendTestUtil.LOG.info("size=" + size);
    stm.write(buffer, 0, size);

    // hflush file
    AppendTestUtil.LOG.info("hflush");
    stm.hflush();
   
    // kill the lease renewal thread
    AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
    dfs.dfs.getLeaseRenewer().interruptAndJoin();
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

    AppendTestUtil.LOG.info("size=" + size);
    stm.write(buffer, 0, size);

    // hflush file
    AppendTestUtil.LOG.info("hflush");
    stm.hflush();
    AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
    dfs.dfs.getLeaseRenewer().interruptAndJoin();

    // set the soft limit to be 1 second so that the
    // namenode triggers lease recovery on next attempt to write-for-open.
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

    assertFalse("original lease holder should not be the NN",
        originalLeaseHolder.equals(HdfsServerConstants.NAMENODE_LEASE_HOLDER));

    // hflush file
    AppendTestUtil.LOG.info("hflush");
    stm.hflush();
   
    // check visible length
    final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
    Assert.assertEquals(size, in.getVisibleLength());
    in.close();
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

    assertEquals(size, locatedBlocks.getFileLength());

    // make sure that the client can't write data anymore.
    try {
      stm.write('b');
      stm.hflush();
      fail("Should not be able to flush after we've lost the lease");
    } catch (IOException e) {
      LOG.info("Expceted exception on write/hflush", e);
    }
   
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

    //b. Reopen file in "append" mode. Append half block of data.
    FSDataOutputStream out = fs.append(p);
    final int len2 = (int)BLOCK_SIZE/2;
    AppendTestUtil.write(out, len1, len2);
    out.hflush();
   
    //c. Rename file to file.new.
    final Path pnew = new Path(p + ".new");
    assertTrue(fs.rename(p, pnew));
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

    // append to file
    stm = fs.append(p);
    // Append to a partial CRC trunk
    stm.write(fileContents, 1, 1);
    stm.hflush();
    // The partial CRC trunk is not full yet and close the file
    stm.close();
    System.out.println("Append 1 byte and closed the file " + p);

    // write the remainder of the file
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

    assertEquals(2, stm.getPos());

    // append to a partial CRC trunk
    stm.write(fileContents, 2, 1);
    // The partial chunk is not full yet, force to send a packet to DN
    stm.hflush();
    System.out.println("Append and flush 1 byte");
    // The partial chunk is not full yet, force to send another packet to DN
    stm.write(fileContents, 3, 2);
    stm.hflush();
    System.out.println("Append and flush 2 byte");
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

    // The partial chunk is not full yet, force to send a packet to DN
    stm.hflush();
    System.out.println("Append and flush 1 byte");
    // The partial chunk is not full yet, force to send another packet to DN
    stm.write(fileContents, 3, 2);
    stm.hflush();
    System.out.println("Append and flush 2 byte");

    // fill up the partial chunk and close the file
    stm.write(fileContents, 5, fileLen-5);
    stm.close();
View Full Code Here

Examples of org.apache.hadoop.fs.FSDataOutputStream.hflush()

      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
      FileSystem fs = cluster.getFileSystem();
      // Creating a file with 4096 blockSize to write multiple blocks
      stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
      stream.write(DATA_BEFORE_RESTART);
      stream.hflush();
     
      // Wait for at least a few blocks to get through
      while (len <= BLOCK_SIZE) {
        FileStatus status = fs.getFileStatus(FILE_PATH);
        len = status.getLen();
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.