Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer


      stm = fs1.create(file1, true, 1024, rep, 4096);
      AppendTestUtil.write(stm, 0, 1024);
      stm.sync();
      loseLeases(fs1);

      DFSOutputStream dfso = (DFSOutputStream)stm.getWrappedStream();
      dfso.abortForTests();

      // close the primary DN
      DataNodeProperties badDN = cluster.stopDataNode(0);

      // Truncate the block on the primary DN
View Full Code Here


      stm = fs1.create(file1, true, 1024, rep, 4096);
      AppendTestUtil.write(stm, 0, 1024);
      stm.sync();
      loseLeases(fs1);

      DFSOutputStream dfso = (DFSOutputStream)stm.getWrappedStream();
      dfso.abortForTests();

      // close the DNs
      DataNodeProperties badDN = cluster.stopDataNode(0);
      DataNodeProperties badDN2 = cluster.stopDataNode(0); // what was 1 is now 0
      assertNotNull(badDN);
View Full Code Here

    DFSClient dfsclient = ((DistributedFileSystem) fs).dfs;

    String src = "/testNameNodeFingerprintSent1.txt";
    // Path f = new Path(src);

    DFSOutputStream dos = (DFSOutputStream) dfsclient.create(src, true,
        (short) 1, 512L);

    FSDataOutputStream a_out = new FSDataOutputStream(dos); // fs.create(f);

    for (int i = 0; i < 512; i++) {
View Full Code Here

  /** This optional operation is not yet supported. */
  public FSDataOutputStream append(Path f, int bufferSize,
      Progressable progress) throws IOException {

    DFSOutputStream op = (DFSOutputStream)dfs.append(getPathName(f), bufferSize, progress);
    return new FSDataOutputStream(op, statistics, op.getInitialLen());
  }
View Full Code Here

    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   
    try {
      FileSystem fs = cluster.getFileSystem();
      NamenodeProtocols namenode = cluster.getNameNodeRpc();
      DFSOutputStream out = null;
      try {
        // Create a file and make sure a block is allocated for it.
        out = (DFSOutputStream)(fs.create(file).
            getWrappedStream());
        out.write(1);
        out.hflush();
       
        // Create a snapshot that includes the file.
        SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs,
            new Path("/"), "s1");
       
View Full Code Here

      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();
      // Open a file and get the head of the pipeline
      Path testFile = new Path("/testRoundTripAckMetric.txt");
      FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
      DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
      // Slow down the writes to catch the write pipeline
      dout.setChunksPerPacket(5);
      dout.setArtificialSlowdown(3000);
      fsout.write(new byte[10000]);
      DatanodeInfo[] pipeline = null;
      int count = 0;
      while (pipeline == null && count < 5) {
        pipeline = dout.getPipeline();
        System.out.println("Waiting for pipeline to be created.");
        Thread.sleep(1000);
        count++;
      }
      // Get the head node that should be receiving downstream acks
View Full Code Here

      cluster.waitActive();
      FileSystem fs = cluster.getFileSystem();
      // Open a file and get the head of the pipeline
      Path testFile = new Path("/testRoundTripAckMetric.txt");
      FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
      DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
      // Slow down the writes to catch the write pipeline
      dout.setChunksPerPacket(5);
      dout.setArtificialSlowdown(3000);
      fsout.write(new byte[10000]);
      DatanodeInfo[] pipeline = null;
      int count = 0;
      while (pipeline == null && count < 5) {
        pipeline = dout.getPipeline();
        System.out.println("Waiting for pipeline to be created.");
        Thread.sleep(1000);
        count++;
      }
      // Get the head node that should be receiving downstream acks
View Full Code Here

          }
        }
      }
    });

    DFSOutputStream dos = (DFSOutputStream) dfsclient.create(src, true,
        (short) 1, 512L);

    FSDataOutputStream a_out = new FSDataOutputStream(dos); // fs.create(f);

    // Writing two blocks.
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.