Examples of HdfsDataOutputStream


Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

  private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length)
      throws IOException {
    byte[] toAppend = new byte[length];
    Random random = new Random();
    random.nextBytes(toAppend);
    HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
    out.write(toAppend);
    return out;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

  public void testSnapshotWhileAppending() throws Exception {
    Path file = new Path(dir, "file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
   
    // 1. append without closing stream --> create snapshot
    HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    out.close();
   
    // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
    // deleted list, with size BLOCKSIZE*2
    INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
    INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
        .getINode(dir.toString());
    DirectoryDiff last = dirNode.getDiffs().getLast();
    Snapshot s0 = last.snapshot;
   
    // 2. append without closing stream
    out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
   
    // re-check nodeInDeleted_S0
    dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
    assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(s0));
   
    // 3. take snapshot --> close stream
    hdfs.createSnapshot(dir, "s1");
    out.close();
   
    // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
    // have been stored in s1's deleted list
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
    last = dirNode.getDiffs().getLast();
    Snapshot s1 = last.snapshot;
    assertTrue(fileNode instanceof INodeFileWithSnapshot);
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
   
    // 4. modify file --> append without closing stream --> take snapshot -->
    // close stream
    hdfs.setReplication(file, (short) (REPLICATION - 1));
    out = appendFileWithoutClosing(file, BLOCKSIZE);
    hdfs.createSnapshot(dir, "s2");
    out.close();
   
    // re-check the size of nodeInDeleted_S1
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

    SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
    final Path fileInSnapshot2 = SnapshotTestHelper.getSnapshotPath(root,
        "s2", file.getName());
   
    // append data to file without closing
    HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
   
    status = hdfs.getFileStatus(fileInSnapshot2);
    // the size of snapshot file should be BLOCKSIZE*2-1
    assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
    // the size of the file should be (3 * BLOCKSIZE - 1)
    status = hdfs.getFileStatus(file);
    assertEquals(BLOCKSIZE * 3 - 1, status.getLen());
   
    blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
        fileInSnapshot2.toString(), 0, Long.MAX_VALUE);
    assertFalse(blocks.isUnderConstruction());
    assertTrue(blocks.isLastBlockComplete());
    blockList = blocks.getLocatedBlocks();
   
    // should be 2 blocks
    assertEquals(BLOCKSIZE * 2 - 1, blocks.getFileLength());
    assertEquals(2, blockList.size());
   
    // check the last block
    lastBlock = blocks.getLastLocatedBlock();
    assertEquals(BLOCKSIZE, lastBlock.getStartOffset());
    assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
   
    blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
        fileInSnapshot2.toString(), BLOCKSIZE, 0);
    blockList = blocks.getLocatedBlocks();
    assertEquals(1, blockList.size());
   
    // check blocks for file being written
    blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
        file.toString(), 0, Long.MAX_VALUE);
    blockList = blocks.getLocatedBlocks();
    assertEquals(3, blockList.size());
    assertTrue(blocks.isUnderConstruction());
    assertFalse(blocks.isLastBlockComplete());
   
    lastBlock = blocks.getLastLocatedBlock();
    assertEquals(BLOCKSIZE * 2, lastBlock.getStartOffset());
    assertEquals(BLOCKSIZE - 1, lastBlock.getBlockSize());
    out.close();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

   */
  public HdfsDataOutputStream append(final String src, final int buffersize,
      final Progressable progress, final FileSystem.Statistics statistics
      ) throws IOException {
    final DFSOutputStream out = append(src, buffersize, progress);
    return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

  @Override
  public HdfsDataOutputStream createInternal(Path f,
      EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
      int bufferSize, short replication, long blockSize, Progressable progress,
      int bytesPerChecksum, boolean createParent) throws IOException {
    return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f),
        absolutePermission, createFlag, createParent, replication, blockSize,
        progress, bufferSize, bytesPerChecksum), getStatistics());
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

    final EnumSet<CreateFlag> cflags = overwrite?
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
        : EnumSet.of(CreateFlag.CREATE);
    final DFSOutputStream out = dfs.create(getPathName(f), permission, cflags,
        replication, blockSize, progress, bufferSize);
    return new HdfsDataOutputStream(out, statistics);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

  protected HdfsDataOutputStream primitiveCreate(Path f,
    FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
    short replication, long blockSize, Progressable progress,
    int bytesPerChecksum) throws IOException {
    statistics.incrementReadOps(1);
    return new HdfsDataOutputStream(dfs.primitiveCreate(getPathName(f),
        absolutePermission, flag, true, replication, blockSize,
        progress, bufferSize, bytesPerChecksum),statistics);
   }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

      long blockSize, Progressable progress) throws IOException {
    statistics.incrementWriteOps(1);
    if (flag.contains(CreateFlag.OVERWRITE)) {
      flag.add(CreateFlag.CREATE);
    }
    return new HdfsDataOutputStream(dfs.create(getPathName(f), permission, flag,
        false, replication, blockSize, progress, bufferSize), statistics);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

      fs = cluster.getFileSystem();
      final int nnport = cluster.getNameNodePort();

      // create a new file.
      Path file1 = new Path("/filestatus.dat");
      HdfsDataOutputStream stm = create(fs, file1, 1);
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Created file " + file1);
      assertEquals(file1 + " should be replicated to 1 datanode.", 1,
          stm.getCurrentBlockReplication());

      // write two full blocks.
      writeFile(stm, numBlocks * blockSize);
      stm.hflush();
      assertEquals(file1 + " should still be replicated to 1 datanode.", 1,
          stm.getCurrentBlockReplication());

      // rename file wile keeping it open.
      Path fileRenamed = new Path("/filestatusRenamed.dat");
      fs.rename(file1, fileRenamed);
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Renamed file " + file1 + " to " +
                         fileRenamed);
      file1 = fileRenamed;

      // create another new file.
      //
      Path file2 = new Path("/filestatus2.dat");
      FSDataOutputStream stm2 = createFile(fs, file2, 1);
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Created file " + file2);

      // create yet another new file with full path name.
      // rename it while open
      //
      Path file3 = new Path("/user/home/fullpath.dat");
      FSDataOutputStream stm3 = createFile(fs, file3, 1);
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Created file " + file3);
      Path file4 = new Path("/user/home/fullpath4.dat");
      FSDataOutputStream stm4 = createFile(fs, file4, 1);
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Created file " + file4);

      fs.mkdirs(new Path("/bin"));
      fs.rename(new Path("/user/home"), new Path("/bin"));
      Path file3new = new Path("/bin/home/fullpath.dat");
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Renamed file " + file3 + " to " +
                         file3new);
      Path file4new = new Path("/bin/home/fullpath4.dat");
      System.out.println("testFileCreationNamenodeRestart: "
                         + "Renamed file " + file4 + " to " +
                         file4new);

      // restart cluster with the same namenode port as before.
      // This ensures that leases are persisted in fsimage.
      cluster.shutdown();
      try {
        Thread.sleep(2*MAX_IDLE_TIME);
      } catch (InterruptedException e) {
      }
      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
                                               .format(false)
                                               .build();
      cluster.waitActive();

      // restart cluster yet again. This triggers the code to read in
      // persistent leases from fsimage.
      cluster.shutdown();
      try {
        Thread.sleep(5000);
      } catch (InterruptedException e) {
      }
      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
                                                .format(false)
                                                .build();
      cluster.waitActive();
      fs = cluster.getFileSystem();

      // instruct the dfsclient to use a new filename when it requests
      // new blocks for files that were renamed.
      DFSOutputStream dfstream = (DFSOutputStream)
                                                 (stm.getWrappedStream());
      dfstream.setTestFilename(file1.toString());
      dfstream = (DFSOutputStream) (stm3.getWrappedStream());
      dfstream.setTestFilename(file3new.toString());
      dfstream = (DFSOutputStream) (stm4.getWrappedStream());
      dfstream.setTestFilename(file4new.toString());

      // write 1 byte to file.  This should succeed because the
      // namenode should have persisted leases.
      byte[] buffer = AppendTestUtil.randomBytes(seed, 1);
      stm.write(buffer);
      stm.close();
      stm2.write(buffer);
      stm2.close();
      stm3.close();
      stm4.close();

View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

      dfs = cluster.getFileSystem();

      // create a new file.
      final String f = DIR + "foo";
      final Path fpath = new Path(f);
      HdfsDataOutputStream out = create(dfs, fpath, DATANODE_NUM);
      out.write("something".getBytes());
      out.hflush();
      int actualRepl = out.getCurrentBlockReplication();
      assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.",
                 actualRepl == DATANODE_NUM);

      // set the soft and hard limit to be 1 second so that the
      // namenode triggers lease recovery
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.